Merge branch 'cassandra-4.1' into trunk
diff --git a/.circleci/config-2_1.yml.high_res.patch b/.circleci/config-2_1.yml.high_res.patch
index 56e1ca2..36fabbd 100644
--- a/.circleci/config-2_1.yml.high_res.patch
+++ b/.circleci/config-2_1.yml.high_res.patch
@@ -1,5 +1,5 @@
---- config-2_1.yml	2022-05-30 12:06:34.000000000 -0400
-+++ config-2_1.yml.HIGHRES	2022-05-30 12:06:59.000000000 -0400
+--- config-2_1.yml	2022-05-30 12:09:35.000000000 -0400
++++ config-2_1.yml.HIGHRES	2022-05-30 12:10:16.000000000 -0400
 @@ -105,14 +105,14 @@
  j8_par_executor: &j8_par_executor
    executor:
diff --git a/.circleci/config-2_1.yml.mid_res.patch b/.circleci/config-2_1.yml.mid_res.patch
index 5a14e34..90995a7 100644
--- a/.circleci/config-2_1.yml.mid_res.patch
+++ b/.circleci/config-2_1.yml.mid_res.patch
@@ -1,5 +1,5 @@
---- config-2_1.yml	2022-05-30 12:06:34.000000000 -0400
-+++ config-2_1.yml.MIDRES	2022-05-30 12:06:52.000000000 -0400
+--- config-2_1.yml	2022-05-30 12:09:35.000000000 -0400
++++ config-2_1.yml.MIDRES	2022-05-30 12:10:10.000000000 -0400
 @@ -105,14 +105,14 @@
  j8_par_executor: &j8_par_executor
    executor:
diff --git a/CHANGES.txt b/CHANGES.txt
index 5f02fd3..7c9137f 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,25 @@
-4.1-alpha2
+4.2
+ * When doing a host replacement, -Dcassandra.broadcast_interval_ms is used to know when to check the ring but checks that the ring wasn't changed in -Dcassandra.ring_delay_ms, changes to ring delay should not depend on when we publish load stats (CASSANDRA-17776)
+ * When bootstrap fails, CassandraRoleManager may attempt to do read queries that fail with "Cannot read from a bootstrapping node", and increments unavailables counters (CASSANDRA-17754)
+ * Add guardrail to disallow DROP KEYSPACE commands (CASSANDRA-17767)
+ * Remove ephemeral snapshot marker file and introduce a flag to SnapshotManifest (CASSANDRA-16911)
+ * Add a virtual table that exposes currently running queries (CASSANDRA-15241)
+ * Allow sstableloader to specify table without relying on path (CASSANDRA-16584)
+ * Fix TestGossipingPropertyFileSnitch.test_prefer_local_reconnect_on_listen_address (CASSANDRA-17700)
+ * Add ByteComparable API (CASSANDRA-6936)
+ * Add guardrail for maximum replication factor (CASSANDRA-17500)
+ * Increment CQLSH to version 6.2.0 for release 4.2 (CASSANDRA-17646)
+ * Adding support to perform certificate based internode authentication (CASSANDRA-17661)
+ * Option to disable CDC writes of repaired data (CASSANDRA-17666)
+ * When a node is bootstrapping it gets the whole gossip state but applies in random order causing some cases where StorageService will fail causing an instance to not show up in TokenMetadata (CASSANDRA-17676)
+ * Add CQLSH command SHOW REPLICAS (CASSANDRA-17577)
+ * Add guardrail to allow disabling of SimpleStrategy (CASSANDRA-17647)
+ * Change default directory permission to 750 in packaging (CASSANDRA-17470)
+ * Adding support for TLS client authentication for internode communication (CASSANDRA-17513)
+ * Add new CQL function maxWritetime (CASSANDRA-17425)
+ * Add guardrail for ALTER TABLE ADD / DROP / REMOVE column operations (CASSANDRA-17495)
+ * Rename DisableFlag class to EnableFlag on guardrails (CASSANDRA-17544)
+Merged from 4.1:
  * Fix sstable_preemptive_open_interval disabled value. sstable_preemptive_open_interval = null backward compatible with
    sstable_preemptive_open_interval_in_mb = -1 (CASSANDRA-17737)
  * Remove usages of Path#toFile() in the snapshot apparatus (CASSANDRA-17769)
@@ -24,9 +45,9 @@
    (CASSANDRA-17737)
  * Clean up ScheduledExecutors, CommitLog, and MessagingService shutdown for in-JVM dtests (CASSANDRA-17731)
  * Remove extra write to system table for prepared statements (CASSANDRA-17764)
-Merged from 3.11:
+Merge from 3.11:
  * Creating of a keyspace on insufficient number of replicas should filter out gosspping-only members (CASSANDRA-17759)
-Merged from 3.0:
+Merge from 3.0:
  * Fix restarting of services on gossipping-only member (CASSANDRA-17752)
 
 
@@ -51,6 +72,13 @@
  * Fix repair_request_timeout_in_ms and remove paxos_auto_repair_threshold_mb (CASSANDRA-17557)
  * Incremental repair leaks SomeRepairFailedException after switch away from flatMap (CASSANDRA-17620)
  * StorageService read threshold get methods throw NullPointerException due to not handling null configs (CASSANDRA-17593)
+Merged from 4.0:
+ * Ensure FileStreamTask cannot compromise shared channel proxy for system table when interrupted (CASSANDRA-17663)
+Merged from 3.11:
+Merged from 3.0:
+
+
+4.1
  * Rename truncate_drop guardrail to drop_truncate_table (CASSANDRA-17592)
  * nodetool enablefullquerylog can NPE when directory has no files (CASSANDRA-17595)
  * Add auto_snapshot_ttl configuration (CASSANDRA-16790)
@@ -214,6 +242,7 @@
  * GossiperTest.testHasVersion3Nodes didn't take into account trunk version changes, fixed to rely on latest version (CASSANDRA-16651)
  * Update JNA library to 5.9.0 and snappy-java to version 1.1.8.4 (CASSANDRA-17040)
 Merged from 4.0:
+ * silence benign SslClosedEngineException (CASSANDRA-17565)
 Merged from 3.11:
 Merged from 3.0:
  * Fix issue where frozen maps may not be serialized in the correct order (CASSANDRA-17623)
diff --git a/NEWS.txt b/NEWS.txt
index 1945f61..fa12563 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -51,6 +51,39 @@
 'sstableloader' tool. You can upgrade the file format of your snapshots
 using the provided 'sstableupgrade' tool.
 
+
+4.2
+===
+
+New features
+------------
+    - Added a new configuration cdc_on_repair_enabled to toggle whether CDC mutations are replayed through the
+      write path on streaming, e.g. repair. When enabled, CDC data streamed to the destination node will be written into
+      commit log first. When disabled, the streamed CDC data is written into SSTables just the same as normal streaming.
+      If this is set to false, streaming will be considerably faster however it's possible that, in extreme situations
+      (losing > quorum # nodes in a replica set), you may have data in your SSTables that never makes it to the CDC log.
+      The default is true/enabled. The configuration can be altered via JMX.
+    - Added a new CQL function, maxwritetime. It shows the largest unix timestamp that the data was written, similar to
+      its sibling CQL function, writetime. Unlike writetime, maxwritetime can be applied to multi-cell data types, e.g.
+      non-frozen collections and UDT, and returns the largest timestamp. One should not to use it when upgrading to 4.2.
+    - New Guardrails added:
+      - Whether ALTER TABLE commands are allowed to mutate columns
+      - Whether SimpleStrategy is allowed on keyspace creation or alteration
+      - Maximum replication factor
+      - Whether DROP KEYSPACE commands are allowed.
+    - It is possible to list ephemeral snapshots by nodetool listsnaphots command when flag "-e" is specified.
+
+Upgrading
+---------
+    - Emphemeral marker files for snapshots done by repairs are not created anymore, 
+      there is a dedicated flag in snapshot manifest instead. On upgrade of a node to version 4.2, on node's start, in case there 
+      are such ephemeral snapshots on disk, they will be deleted (same behaviour as before) and any new ephemeral snapshots 
+      will stop to create ephemeral marker files as flag in a snapshot manifest was introduced instead.
+
+Deprecation
+-----------
+
+
 4.1
 ===
 
diff --git a/README.asc b/README.asc
index f484aa2..cba3a2b 100644
--- a/README.asc
+++ b/README.asc
@@ -39,7 +39,7 @@
 
 ----
 Connected to Test Cluster at localhost:9160.
-[cqlsh 6.0.0 | Cassandra 4.1 | CQL spec 3.4.6 | Native protocol v5]
+[cqlsh 6.2.0 | Cassandra 4.2-SNAPSHOT | CQL spec 3.4.6 | Native protocol v5]
 Use HELP for help.
 cqlsh>
 ----
diff --git a/bin/cqlsh.py b/bin/cqlsh.py
index 637c95e..e47bc59 100755
--- a/bin/cqlsh.py
+++ b/bin/cqlsh.py
@@ -47,7 +47,7 @@
 UTF8 = 'utf-8'
 
 description = "CQL Shell for Apache Cassandra"
-version = "6.1.0"
+version = "6.2.0"
 
 readline = None
 try:
@@ -600,6 +600,13 @@
     def show_session(self, sessionid, partial_session=False):
         print_trace_session(self, self.session, sessionid, partial_session)
 
+    def show_replicas(self, token_value, keyspace=None):
+        ks = self.current_keyspace if keyspace is None else keyspace
+        token_map = self.conn.metadata.token_map
+        nodes = token_map.get_replicas(ks, token_map.token_class(token_value))
+        addresses = [x.address for x in nodes]
+        print(f"{addresses}")
+
     def get_connection_versions(self):
         result, = self.session.execute("select * from system.local where key = 'local'")
         vers = {
@@ -979,7 +986,7 @@
         if parsed:
             self.printerr('Improper %s command (problem at %r).' % (cmdword, parsed.remainder[0]))
         else:
-            self.printerr('Improper %s command.' % cmdword)
+            self.printerr(f'Improper {cmdword} command.')
 
     def do_use(self, parsed):
         ksname = parsed.get_binding('ksname')
@@ -1578,6 +1585,11 @@
         SHOW SESSION <sessionid>
 
           Pretty-prints the requested tracing session.
+
+        SHOW REPLICAS <token> (<keyspace>)
+
+          Lists the replica nodes by IP address for the given token. The current
+          keyspace is used if one is not specified.
         """
         showwhat = parsed.get_binding('what').lower()
         if showwhat == 'version':
@@ -1588,6 +1600,10 @@
         elif showwhat.startswith('session'):
             session_id = parsed.get_binding('sessionid').lower()
             self.show_session(UUID(session_id))
+        elif showwhat.startswith('replicas'):
+            token_id = parsed.get_binding('token')
+            keyspace = parsed.get_binding('keyspace')
+            self.show_replicas(token_id, keyspace)
         else:
             self.printerr('Wait, how do I show %r?' % (showwhat,))
 
diff --git a/build.xml b/build.xml
index ff1a8d3..ca346c9 100644
--- a/build.xml
+++ b/build.xml
@@ -33,7 +33,7 @@
     <property name="debuglevel" value="source,lines,vars"/>
 
     <!-- default version and SCM information -->
-    <property name="base.version" value="4.1-alpha2"/>
+    <property name="base.version" value="4.2"/>
     <property name="scm.connection" value="scm:https://gitbox.apache.org/repos/asf/cassandra.git"/>
     <property name="scm.developerConnection" value="scm:https://gitbox.apache.org/repos/asf/cassandra.git"/>
     <property name="scm.url" value="https://gitbox.apache.org/repos/asf?p=cassandra.git;a=tree"/>
@@ -1658,9 +1658,27 @@
     ant testsome -Dtest.name=org.apache.cassandra.service.StorageServiceServerTest -Dtest.methods=testRegularMode,testGetAllRangesEmpty
   -->
   <target name="testsome" depends="build-test" description="Execute specific unit tests" >
+    <condition property="withoutMethods">
+      <and>
+        <equals arg1="${test.methods}" arg2=""/>
+        <not>
+          <contains string="${test.name}" substring="*"/>
+        </not>
+      </and>
+    </condition>
+    <condition property="withMethods">
+      <and>
+        <not>
+         <equals arg1="${test.methods}" arg2=""/>
+        </not>
+        <not>
+          <contains string="${test.name}" substring="*"/>
+        </not>
+      </and>
+    </condition>
     <testmacro inputdir="${test.unit.src}" timeout="${test.timeout}">
-      <test unless:blank="${test.methods}" name="${test.name}" methods="${test.methods}" outfile="build/test/output/TEST-${test.name}-${test.methods}"/>
-      <test if:blank="${test.methods}" name="${test.name}" outfile="build/test/output/TEST-${test.name}"/>
+      <test if="withMethods" name="${test.name}" methods="${test.methods}" outfile="build/test/output/TEST-${test.name}-${test.methods}"/>
+      <test if="withoutMethods" name="${test.name}" outfile="build/test/output/TEST-${test.name}"/>
       <jvmarg value="-Dlegacy-sstable-root=${test.data}/legacy-sstables"/>
       <jvmarg value="-Dinvalid-legacy-sstable-root=${test.data}/invalid-legacy-sstables"/>
       <jvmarg value="-Dcassandra.ring_delay_ms=1000"/>
diff --git a/conf/cassandra.yaml b/conf/cassandra.yaml
index b2ddde8..dd388f2 100644
--- a/conf/cassandra.yaml
+++ b/conf/cassandra.yaml
@@ -298,6 +298,18 @@
 # containing a CDC-enabled table if at space limit in cdc_raw_directory).
 cdc_enabled: false
 
+# Specify whether writes to the CDC-enabled tables should be blocked when CDC data on disk has reached to the limit.
+# When setting to false, the writes will not be blocked and the oldest CDC data on disk will be deleted to
+# ensure the size constraint. The default is true.
+# cdc_block_writes: true
+
+# Specify whether CDC mutations are replayed through the write path on streaming, e.g. repair.
+# When enabled, CDC data streamed to the destination node will be written into commit log first. When setting to false,
+# the streamed CDC data is written into SSTables just the same as normal streaming. The default is true.
+# If this is set to false, streaming will be considerably faster however it's possible that, in extreme situations
+# (losing > quorum # nodes in a replica set), you may have data in your SSTables that never makes it to the CDC log.
+# cdc_on_repair_enabled: true
+
 # CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the
 # segment contains mutations for a CDC-enabled table. This should be placed on a
 # separate spindle than the data directories. If not set, the default directory is
@@ -1318,6 +1330,12 @@
   # Set to a valid keystore if internode_encryption is dc, rack or all
   keystore: conf/.keystore
   keystore_password: cassandra
+  # During internode mTLS authentication, inbound connections (acting as servers) use keystore, keystore_password
+  # containing server certificate to create SSLContext and
+  # outbound connections (acting as clients) use outbound_keystore & outbound_keystore_password with client certificates
+  # to create SSLContext. By default, outbound_keystore is the same as keystore indicating mTLS is not enabled.
+#  outbound_keystore: conf/.keystore
+#  outbound_keystore_password: cassandra
   # Verify peer server certificates
   require_client_auth: false
   # Set to a valid trustore if require_client_auth is true
@@ -1667,58 +1685,78 @@
 # The two thresholds default to -1 to disable.
 # keyspaces_warn_threshold: -1
 # keyspaces_fail_threshold: -1
+#
 # Guardrail to warn or fail when creating more user tables than threshold.
 # The two thresholds default to -1 to disable.
 # tables_warn_threshold: -1
 # tables_fail_threshold: -1
+#
 # Guardrail to enable or disable the ability to create uncompressed tables
 # uncompressed_tables_enabled: true
+#
 # Guardrail to warn or fail when creating/altering a table with more columns per table than threshold.
 # The two thresholds default to -1 to disable.
 # columns_per_table_warn_threshold: -1
 # columns_per_table_fail_threshold: -1
+#
 # Guardrail to warn or fail when creating more secondary indexes per table than threshold.
 # The two thresholds default to -1 to disable.
 # secondary_indexes_per_table_warn_threshold: -1
 # secondary_indexes_per_table_fail_threshold: -1
+#
 # Guardrail to enable or disable the creation of secondary indexes
 # secondary_indexes_enabled: true
+#
 # Guardrail to warn or fail when creating more materialized views per table than threshold.
 # The two thresholds default to -1 to disable.
 # materialized_views_per_table_warn_threshold: -1
 # materialized_views_per_table_fail_threshold: -1
+#
 # Guardrail to warn about, ignore or reject properties when creating tables. By default all properties are allowed.
 # table_properties_warned: []
 # table_properties_ignored: []
 # table_properties_disallowed: []
+#
 # Guardrail to allow/disallow user-provided timestamps. Defaults to true.
 # user_timestamps_enabled: true
+#
 # Guardrail to allow/disallow GROUP BY functionality.
 # group_by_enabled: true
+#
 # Guardrail to allow/disallow TRUNCATE and DROP TABLE statements
 # drop_truncate_table_enabled: true
+#
+# Guardrail to allow/disallow DROP KEYSPACE statements
+# drop_keyspace_enabled: true
+#
 # Guardrail to warn or fail when using a page size greater than threshold.
 # The two thresholds default to -1 to disable.
 # page_size_warn_threshold: -1
 # page_size_fail_threshold: -1
+#
 # Guardrail to allow/disallow list operations that require read before write, i.e. setting list element by index and
 # removing list elements by either index or value. Defaults to true.
 # read_before_write_list_operations_enabled: true
+#
 # Guardrail to warn or fail when querying with an IN restriction selecting more partition keys than threshold.
 # The two thresholds default to -1 to disable.
 # partition_keys_in_select_warn_threshold: -1
 # partition_keys_in_select_fail_threshold: -1
+#
 # Guardrail to warn or fail when an IN query creates a cartesian product with a size exceeding threshold,
 # eg. "a in (1,2,...10) and b in (1,2...10)" results in cartesian product of 100.
 # The two thresholds default to -1 to disable.
 # in_select_cartesian_product_warn_threshold: -1
 # in_select_cartesian_product_fail_threshold: -1
+#
 # Guardrail to warn about or reject read consistency levels. By default, all consistency levels are allowed.
 # read_consistency_levels_warned: []
 # read_consistency_levels_disallowed: []
+#
 # Guardrail to warn about or reject write consistency levels. By default, all consistency levels are allowed.
 # write_consistency_levels_warned: []
 # write_consistency_levels_disallowed: []
+#
 # Guardrail to warn or fail when encountering larger size of collection data than threshold.
 # At query time this guardrail is applied only to the collection fragment that is being writen, even though in the case
 # of non-frozen collections there could be unaccounted parts of the collection on the sstables. This is done this way to
@@ -1729,6 +1767,7 @@
 # collection_size_warn_threshold:
 # Min unit: B
 # collection_size_fail_threshold:
+#
 # Guardrail to warn or fail when encountering more elements in collection than threshold.
 # At query time this guardrail is applied only to the collection fragment that is being writen, even though in the case
 # of non-frozen collections there could be unaccounted parts of the collection on the sstables. This is done this way to
@@ -1737,12 +1776,21 @@
 # The two thresholds default to -1 to disable.
 # items_per_collection_warn_threshold: -1
 # items_per_collection_fail_threshold: -1
+#
 # Guardrail to allow/disallow querying with ALLOW FILTERING. Defaults to true.
 # allow_filtering_enabled: true
+#
+# Guardrail to allow/disallow setting SimpleStrategy via keyspace creation or alteration. Defaults to true.
+# simplestrategy_enabled: true
+#
 # Guardrail to warn or fail when creating a user-defined-type with more fields in than threshold.
 # Default -1 to disable.
 # fields_per_udt_warn_threshold: -1
 # fields_per_udt_fail_threshold: -1
+#
+# Guardrail to indicate whether or not users are allowed to use ALTER TABLE commands to make column changes to tables
+# alter_table_enabled: true
+#
 # Guardrail to warn or fail when local data disk usage percentage exceeds threshold. Valid values are in [1, 100].
 # This is only used for the disks storing data directories, so it won't count any separate disks used for storing
 # the commitlog, hints nor saved caches. The disk usage is the ratio between the amount of space used by the data
@@ -1754,7 +1802,8 @@
 # The two thresholds default to -1 to disable.
 # data_disk_usage_percentage_warn_threshold: -1
 # data_disk_usage_percentage_fail_threshold: -1
-# Allows defining the max disk size of the data directories when calculating thresholds for
+#
+# Guardrail that allows users to define the max disk size of the data directories when calculating thresholds for
 # disk_usage_percentage_warn_threshold and disk_usage_percentage_fail_threshold, so if this is greater than zero they
 # become percentages of a fixed size on disk instead of percentages of the physically available disk size. This should
 # be useful when we have a large disk and we only want to use a part of it for Cassandra's data directories.
@@ -1762,11 +1811,17 @@
 # Defaults to null to disable and use the physically available disk size of data directories during calculations.
 # Min unit: B
 # data_disk_usage_max_disk_size:
+#
 # Guardrail to warn or fail when the minimum replication factor is lesser than threshold.
 # This would also apply to system keyspaces.
 # Suggested value for use in production: 2 or higher
 # minimum_replication_factor_warn_threshold: -1
 # minimum_replication_factor_fail_threshold: -1
+#
+# Guardrail to warn or fail when the maximum replication factor is greater than threshold.
+# This would also apply to system keyspaces.
+# maximum_replication_factor_warn_threshold: -1
+# maximum_replication_factor_fail_threshold: -1
 
 # Startup Checks are executed as part of Cassandra startup process, not all of them
 # are configurable (so you can disable them) but these which are enumerated bellow.
diff --git a/debian/cassandra.postinst b/debian/cassandra.postinst
index 752ff1f..95882e3 100644
--- a/debian/cassandra.postinst
+++ b/debian/cassandra.postinst
@@ -37,6 +37,8 @@
         if [ -z "$2" ]; then
             chown -R cassandra: /var/lib/cassandra
             chown -R cassandra: /var/log/cassandra
+            chmod 750 /var/lib/cassandra/
+            chmod 750 /var/log/cassandra/
         fi
         if ! sysctl -p /etc/sysctl.d/cassandra.conf; then
             echo >&2
diff --git a/debian/changelog b/debian/changelog
index 5c193b9..b2397e5 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,14 +1,8 @@
-cassandra (4.1~alpha2) UNRELEASED; urgency=medium
+cassandra (4.2) UNRELEASED; urgency=medium
 
   * New release
 
- -- Mick Semb Wever <mck@apache.org>  Fri, 20 May 2022 22:02:50 +0200
-
-cassandra (4.1~alpha1) unstable; urgency=medium
-
-  * New release
-
- -- Mick Semb Wever <mck@apache.org>  Fri, 20 May 2022 22:02:50 +0200
+ -- Mick Semb Wever <mck@apache.org>  Wed, 21 Apr 2021 19:24:28 +0200
 
 cassandra (4.0~rc1) unstable; urgency=medium
 
diff --git a/doc/cql3/CQL.textile b/doc/cql3/CQL.textile
index 8bedf19..5fef1a9 100644
--- a/doc/cql3/CQL.textile
+++ b/doc/cql3/CQL.textile
@@ -1083,6 +1083,7 @@
 <selector> ::= <identifier>
              | <term>
              | WRITETIME '(' <identifier> ')'
+             | MAXWRITETIME '(' <identifier> ')'
              | COUNT '(' '*' ')'
              | TTL '(' <identifier> ')'
              | CAST '(' <selector> AS <type> ')'
@@ -1131,7 +1132,7 @@
 
 The @<select-clause>@ determines which columns needs to be queried and returned in the result-set. It consists of either the comma-separated list of <selector> or the wildcard character (@*@) to select all the columns defined for the table. Please note that for wildcard @SELECT@ queries the order of columns returned is not specified and is not guaranteed to be stable between Cassandra versions.
 
-A @<selector>@ is either a column name to retrieve or a @<function>@ of one or more @<term>@s. The function allowed are the same as for @<term>@ and are described in the "function section":#functions. In addition to these generic functions, the @WRITETIME@ (resp. @TTL@) function allows to select the timestamp of when the column was inserted (resp. the time to live (in seconds) for the column (or null if the column has no expiration set)) and the "@CAST@":#castFun function can be used to convert one data type to another.
+A @<selector>@ is either a column name to retrieve or a @<function>@ of one or more @<term>@s. The function allowed are the same as for @<term>@ and are described in the "function section":#functions. In addition to these generic functions, the @WRITETIME@ and @MAXWRITETIME@ (resp. @TTL@) function allows to select the timestamp of when the column was inserted (resp. the time to live (in seconds) for the column (or null if the column has no expiration set)) and the "@CAST@":#castFun function can be used to convert one data type to another.
 
 Additionally, individual values of maps and sets can be selected using @[ <term> ]@. For maps, this will return the value corresponding to the key, if such entry exists. For sets, this will return the key that is selected if it exists and is thus mainly a way to check element existence. It is also possible to select a slice of a set or map with @[ <term> ... <term> @], where both bound can be omitted.
 
@@ -2052,7 +2053,7 @@
 h2(#aggregates). Aggregates
 
 Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.
-If @normal@ columns, @scalar functions@, @UDT@ fields, @writetime@ or @ttl@ are selected together with aggregate functions, the values returned for them will be the ones of the first row matching the query.
+If @normal@ columns, @scalar functions@, @UDT@ fields, @writetime@, @maxwritetime@ or @ttl@ are selected together with aggregate functions, the values returned for them will be the ones of the first row matching the query.
 
 CQL3 distinguishes between built-in aggregates (so called 'native aggregates') and "user-defined aggregates":#udas. CQL3 includes several native aggregates, described below:
 
@@ -2433,6 +2434,7 @@
 | @WHERE@        | yes |
 | @WITH@         | yes |
 | @WRITETIME@    | no  |
+| @MAXWRITETIME@    | no  |
 
 h2(#appendixB). Appendix B: CQL Reserved Types
 
diff --git a/doc/modules/cassandra/pages/cql/appendices.adoc b/doc/modules/cassandra/pages/cql/appendices.adoc
index 7e17266..544afc0 100644
--- a/doc/modules/cassandra/pages/cql/appendices.adoc
+++ b/doc/modules/cassandra/pages/cql/appendices.adoc
@@ -139,6 +139,7 @@
 |`WHERE` |yes
 |`WITH` |yes
 |`WRITETIME` |no
+|`MAXWRITETIME` |no
 |===
 
 == Appendix B: CQL Reserved Types
diff --git a/doc/modules/cassandra/pages/cql/cql_singlefile.adoc b/doc/modules/cassandra/pages/cql/cql_singlefile.adoc
index d99e12b..3e8c47f 100644
--- a/doc/modules/cassandra/pages/cql/cql_singlefile.adoc
+++ b/doc/modules/cassandra/pages/cql/cql_singlefile.adoc
@@ -1645,6 +1645,7 @@
 ::=  +
 |  +
 | WRITETIME `(' `)' +
+| MAXWRITETIME `(' `)' +
 | COUNT `(' `*' `)' +
 | TTL `(' `)' +
 | CAST `(' AS `)' +
@@ -1706,8 +1707,8 @@
 A `<selector>` is either a column name to retrieve or a `<function>` of
 one or more `<term>`s. The function allowed are the same as for `<term>`
 and are described in the link:#functions[function section]. In addition
-to these generic functions, the `WRITETIME` (resp. `TTL`) function
-allows to select the timestamp of when the column was inserted (resp.
+to these generic functions, the `WRITETIME` and `MAXWRITETIME` (resp. `TTL`)
+function allows to select the timestamp of when the column was inserted (resp.
 the time to live (in seconds) for the column (or null if the column has
 no expiration set)) and the link:#castFun[`CAST`] function can be used
 to convert one data type to another. The `WRITETIME` and `TTL` functions
@@ -3150,8 +3151,8 @@
 
 Aggregate functions work on a set of rows. They receive values for each
 row and returns one value for the whole set. +
-If `normal` columns, `scalar functions`, `UDT` fields, `writetime` or
-`ttl` are selected together with aggregate functions, the values
+If `normal` columns, `scalar functions`, `UDT` fields, `writetime`, `maxwritetime`
+or `ttl` are selected together with aggregate functions, the values
 returned for them will be the ones of the first row matching the query.
 
 CQL3 distinguishes between built-in aggregates (so called `native
diff --git a/doc/modules/cassandra/pages/cql/dml.adoc b/doc/modules/cassandra/pages/cql/dml.adoc
index d0517aa..af9dbba 100644
--- a/doc/modules/cassandra/pages/cql/dml.adoc
+++ b/doc/modules/cassandra/pages/cql/dml.adoc
@@ -75,14 +75,17 @@
 ====
 
 [[writetime-and-ttl-function]]
-==== `WRITETIME` and `TTL` function
+==== `WRITETIME`, `MAXWRITETIME` and `TTL` function
 
-Selection supports two special functions that aren't allowed anywhere
-else: `WRITETIME` and `TTL`. 
-Both functions take only one argument, a column name.
+Selection supports three special functions that aren't allowed anywhere
+else: `WRITETIME`, `MAXWRITETIME` and `TTL`.
+All functions take only one argument, a column name.
 These functions retrieve meta-information that is stored internally for each column:
 
-* `WRITETIME` stores the timestamp of the value of the column
+* `WRITETIME` stores the timestamp of the value of the column. Note that this function cannot be applied to non-frozen collection
+and UDT.
+* `MAXWRITETIME` stores the largest timestamp of the value of the column. For non-collection and non-UDT columns, `MAXWRITETIME`
+is equivalent to `WRITETIME`. In the other cases, it returns the largest timestamp of the values in the column.
 * `TTL` stores the remaining time to live (in seconds) for the value of the column if it is set to expire; otherwise the value is `null`.
 
 The `WRITETIME` and `TTL` functions can't be used on multi-cell columns such as non-frozen
diff --git a/doc/modules/cassandra/pages/tools/cqlsh.adoc b/doc/modules/cassandra/pages/tools/cqlsh.adoc
index 8050ee5..0d40608 100644
--- a/doc/modules/cassandra/pages/tools/cqlsh.adoc
+++ b/doc/modules/cassandra/pages/tools/cqlsh.adoc
@@ -181,6 +181,21 @@
 Connected to Prod_Cluster at 192.0.0.1:9042.
 ----
 
+=== `SHOW REPLICAS`
+
+Prints the IP addresses of the Cassandra nodes which are replicas for the
+listed given token and keyspace. This command is available from Cassandra 4.2.
+
+`Usage`: `SHOW REPLICAS <token> (<keyspace>)`
+
+Example usage:
+
+[source,none]
+----
+cqlsh> SHOW REPLICAS 95
+['192.0.0.1', '192.0.0.2']
+----
+
 === `SHOW SESSION`
 
 Pretty prints a specific tracing session.
diff --git a/pylib/cqlshlib/cql3handling.py b/pylib/cqlshlib/cql3handling.py
index 7de95cf..7e123bd 100644
--- a/pylib/cqlshlib/cql3handling.py
+++ b/pylib/cqlshlib/cql3handling.py
@@ -731,6 +731,7 @@
 <selector> ::= [colname]=<cident> ( "[" ( <term> ( ".." <term> "]" )? | <term> ".." ) )?
              | <udtSubfieldSelection>
              | "WRITETIME" "(" [colname]=<cident> ")"
+             | "MAXWRITETIME" "(" [colname]=<cident> ")"
              | "TTL" "(" [colname]=<cident> ")"
              | "COUNT" "(" star=( "*" | "1" ) ")"
              | "CAST" "(" <selector> "AS" <storageType> ")"
diff --git a/pylib/cqlshlib/cqlshhandling.py b/pylib/cqlshlib/cqlshhandling.py
index aa1fbc0..cc8590a 100644
--- a/pylib/cqlshlib/cqlshhandling.py
+++ b/pylib/cqlshlib/cqlshhandling.py
@@ -131,7 +131,7 @@
 '''
 
 cqlsh_show_cmd_syntax_rules = r'''
-<showCommand> ::= "SHOW" what=( "VERSION" | "HOST" | "SESSION" sessionid=<uuid> )
+<showCommand> ::= "SHOW" what=( "VERSION" | "HOST" | "SESSION" sessionid=<uuid> | "REPLICAS" token=<integer> (keyspace=<keyspaceName>)? )
                 ;
 '''
 
diff --git a/redhat/cassandra.spec b/redhat/cassandra.spec
index 7431c1c..3711dfd 100644
--- a/redhat/cassandra.spec
+++ b/redhat/cassandra.spec
@@ -161,9 +161,9 @@
 %{_sysconfdir}/security/limits.d/%{username}.conf
 /usr/share/%{username}*
 %config(noreplace) /%{_sysconfdir}/%{username}
-%attr(755,%{username},%{username}) %config(noreplace) /var/lib/%{username}/*
-%attr(755,%{username},%{username}) /var/log/%{username}*
-%attr(755,%{username},%{username}) /var/run/%{username}*
+%attr(750,%{username},%{username}) %config(noreplace) /var/lib/%{username}/*
+%attr(750,%{username},%{username}) /var/log/%{username}*
+%attr(750,%{username},%{username}) /var/run/%{username}*
 %{python_sitelib}/cqlshlib/
 %{python_sitelib}/cassandra_pylib*.egg-info
 
diff --git a/src/antlr/Lexer.g b/src/antlr/Lexer.g
index 34c7e2e..84dd036 100644
--- a/src/antlr/Lexer.g
+++ b/src/antlr/Lexer.g
@@ -178,6 +178,7 @@
 K_TIMEUUID:    T I M E U U I D;
 K_TOKEN:       T O K E N;
 K_WRITETIME:   W R I T E T I M E;
+K_MAXWRITETIME:M A X W R I T E T I M E;
 K_DATE:        D A T E;
 K_TIME:        T I M E;
 
diff --git a/src/antlr/Parser.g b/src/antlr/Parser.g
index d061ee4..2643e0a 100644
--- a/src/antlr/Parser.g
+++ b/src/antlr/Parser.g
@@ -415,8 +415,9 @@
 
 selectionFunction returns [Selectable.Raw s]
     : K_COUNT '(' '\*' ')'                      { $s = Selectable.WithFunction.Raw.newCountRowsFunction(); }
-    | K_WRITETIME '(' c=sident ')'              { $s = new Selectable.WritetimeOrTTL.Raw(c, true); }
-    | K_TTL       '(' c=sident ')'              { $s = new Selectable.WritetimeOrTTL.Raw(c, false); }
+    | K_MAXWRITETIME '(' c=sident ')'           { $s = new Selectable.WritetimeOrTTL.Raw(c, Selectable.WritetimeOrTTL.Kind.MAX_WRITE_TIME); }
+    | K_WRITETIME '(' c=sident ')'              { $s = new Selectable.WritetimeOrTTL.Raw(c, Selectable.WritetimeOrTTL.Kind.WRITE_TIME); }
+    | K_TTL       '(' c=sident ')'              { $s = new Selectable.WritetimeOrTTL.Raw(c, Selectable.WritetimeOrTTL.Kind.TTL); }
     | K_CAST      '(' sn=unaliasedSelector K_AS t=native_type ')' {$s = new Selectable.WithCast.Raw(sn, t);}
     | f=functionName args=selectionFunctionArgs { $s = new Selectable.WithFunction.Raw(f, args); }
     ;
@@ -1870,7 +1871,7 @@
 
 unreserved_keyword returns [String str]
     : u=unreserved_function_keyword     { $str = u; }
-    | k=(K_TTL | K_COUNT | K_WRITETIME | K_KEY | K_CAST | K_JSON | K_DISTINCT) { $str = $k.text; }
+    | k=(K_TTL | K_COUNT | K_WRITETIME | K_MAXWRITETIME | K_KEY | K_CAST | K_JSON | K_DISTINCT) { $str = $k.text; }
     ;
 
 unreserved_function_keyword returns [String str]
diff --git a/src/java/org/apache/cassandra/auth/AllowAllInternodeAuthenticator.java b/src/java/org/apache/cassandra/auth/AllowAllInternodeAuthenticator.java
index d0d2d74..ac62bfa 100644
--- a/src/java/org/apache/cassandra/auth/AllowAllInternodeAuthenticator.java
+++ b/src/java/org/apache/cassandra/auth/AllowAllInternodeAuthenticator.java
@@ -20,12 +20,14 @@
 package org.apache.cassandra.auth;
 
 import java.net.InetAddress;
+import java.security.cert.Certificate;
 
 import org.apache.cassandra.exceptions.ConfigurationException;
 
 public class AllowAllInternodeAuthenticator implements IInternodeAuthenticator
 {
-    public boolean authenticate(InetAddress remoteAddress, int remotePort)
+    public boolean authenticate(InetAddress remoteAddress, int remotePort,
+                                Certificate[] certificates, InternodeConnectionDirection connectionType)
     {
         return true;
     }
diff --git a/src/java/org/apache/cassandra/auth/CassandraRoleManager.java b/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
index 0344de9..c227270 100644
--- a/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
+++ b/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
@@ -43,6 +43,7 @@
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.service.ClientState;
+import org.apache.cassandra.service.StorageProxy;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.transport.messages.ResultMessage;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -386,6 +387,12 @@
     {
         // The delay is to give the node a chance to see its peers before attempting the operation
         ScheduledExecutors.optionalTasks.scheduleSelfRecurring(() -> {
+            if (!StorageProxy.isSafeToPerformRead())
+            {
+                logger.trace("Setup task may not run due to it not being safe to perform reads... rescheduling");
+                scheduleSetupTask(setupTask);
+                return;
+            }
             try
             {
                 setupTask.call();
diff --git a/src/java/org/apache/cassandra/auth/IInternodeAuthenticator.java b/src/java/org/apache/cassandra/auth/IInternodeAuthenticator.java
index 8e09b90..e5038c0 100644
--- a/src/java/org/apache/cassandra/auth/IInternodeAuthenticator.java
+++ b/src/java/org/apache/cassandra/auth/IInternodeAuthenticator.java
@@ -20,6 +20,7 @@
 package org.apache.cassandra.auth;
 
 import java.net.InetAddress;
+import java.security.cert.Certificate;
 
 import org.apache.cassandra.exceptions.ConfigurationException;
 
@@ -33,7 +34,35 @@
      * @param remotePort port of the connecting node.
      * @return true if the connection should be accepted, false otherwise.
      */
-    boolean authenticate(InetAddress remoteAddress, int remotePort);
+    @Deprecated
+    default boolean authenticate(InetAddress remoteAddress, int remotePort)
+    {
+        return false;
+    }
+
+    /**
+     * Decides whether a peer is allowed to connect to this node.
+     * If this method returns false, the socket will be immediately closed.
+     * <p>
+     * Default implementation calls authenticate method by IP and port method
+     * <p>
+     * 1. If it is IP based authentication ignore the certificates & connectionType parameters in the implementation
+     * of this method.
+     * 2. For certificate based authentication like mTLS, server's identity for outbound connections is verified by the
+     * trusted root certificates in the outbound_keystore. In such cases this method may be overridden to return true
+     * when certificateType is OUTBOUND, as the authentication of the server happens during SSL Handshake.
+     *
+     * @param remoteAddress  ip address of the connecting node.
+     * @param remotePort     port of the connecting node.
+     * @param certificates   peer certificates
+     * @param connectionType If the connection is inbound/outbound connection.
+     * @return true if the connection should be accepted, false otherwise.
+     */
+    default boolean authenticate(InetAddress remoteAddress, int remotePort,
+                                 Certificate[] certificates, InternodeConnectionDirection connectionType)
+    {
+        return authenticate(remoteAddress, remotePort);
+    }
 
     /**
      * Validates configuration of IInternodeAuthenticator implementation (if configurable).
@@ -41,4 +70,30 @@
      * @throws ConfigurationException when there is a configuration error.
      */
     void validateConfiguration() throws ConfigurationException;
+
+    /**
+     * Setup is called once upon system startup to initialize the IAuthenticator.
+     *
+     * For example, use this method to create any required keyspaces/column families.
+     */
+    default void setupInternode()
+    {
+
+    }
+
+    /**
+     * Enum that represents connection type of internode connection.
+     *
+     * INBOUND - called after connection established, with certificate available if present.
+     * OUTBOUND - called after connection established, with certificate available if present.
+     * OUTBOUND_PRECONNECT - called before initiating a connection, without certificate available.
+     * The outbound connection will be authenticated with the certificate once a redirected connection is established.
+     * This is an extra check that can be used to detect misconfiguration before reconnection, or ignored by returning true.
+     */
+    enum InternodeConnectionDirection
+    {
+        INBOUND,
+        OUTBOUND,
+        OUTBOUND_PRECONNECT
+    }
 }
diff --git a/src/java/org/apache/cassandra/concurrent/DebuggableTask.java b/src/java/org/apache/cassandra/concurrent/DebuggableTask.java
new file mode 100644
index 0000000..ac04eb4
--- /dev/null
+++ b/src/java/org/apache/cassandra/concurrent/DebuggableTask.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.concurrent;
+
+import org.apache.cassandra.utils.Shared;
+
+import static org.apache.cassandra.utils.Shared.Recursive.INTERFACES;
+import static org.apache.cassandra.utils.Shared.Scope.SIMULATION;
+
+/**
+ * Interface to include on a Runnable or Callable submitted to the {@link SharedExecutorPool} to provide more
+ * detailed diagnostics.
+ */
+@Shared(scope = SIMULATION, inner = INTERFACES)
+public interface DebuggableTask
+{
+    public long creationTimeNanos();
+
+    public long startTimeNanos();
+
+    public String description();
+    
+    interface RunnableDebuggableTask extends Runnable, DebuggableTask {}
+
+    /**
+     * Wraps a {@link DebuggableTask} to include the name of the thread running it.
+     */
+    public static class RunningDebuggableTask implements DebuggableTask
+    {
+        private final DebuggableTask task;
+        private final String threadId;
+
+        public RunningDebuggableTask(String threadId, DebuggableTask task)
+        {
+            this.task = task;
+            this.threadId = threadId;
+        }
+
+        public String threadId()
+        {
+            return threadId;
+        }
+
+        public boolean hasTask()
+        {
+            return task != null;
+        }
+
+        @Override
+        public long creationTimeNanos()
+        {
+            assert hasTask();
+            return task.creationTimeNanos();
+        }
+
+        @Override
+        public long startTimeNanos()
+        {
+            assert hasTask();
+            return task.startTimeNanos();
+        }
+
+        @Override
+        public String description()
+        {
+            assert hasTask();
+            return task.description();
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/concurrent/ExecutionFailure.java b/src/java/org/apache/cassandra/concurrent/ExecutionFailure.java
index 7fa7dcb..27ab885 100644
--- a/src/java/org/apache/cassandra/concurrent/ExecutionFailure.java
+++ b/src/java/org/apache/cassandra/concurrent/ExecutionFailure.java
@@ -21,6 +21,7 @@
 import java.util.concurrent.Callable;
 import java.util.concurrent.Future;
 
+import org.apache.cassandra.concurrent.DebuggableTask.RunnableDebuggableTask;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -106,6 +107,14 @@
     }
 
     /**
+     * @see #suppressing(WithResources, Runnable)
+     */
+    static RunnableDebuggableTask suppressingDebuggable(WithResources withResources, RunnableDebuggableTask debuggable)
+    {
+        return enforceOptionsDebuggable(withResources, debuggable, false);
+    }
+
+    /**
      * Encapsulate the execution, propagating or suppressing any exceptions as requested.
      *
      * note that if {@code wrap} is a {@link java.util.concurrent.Future} its exceptions may not be captured,
@@ -119,7 +128,7 @@
             @Override
             public void run()
             {
-                try (Closeable close = withResources.get())
+                try (@SuppressWarnings("unused") Closeable close = withResources.get())
                 {
                     wrap.run();
                 }
@@ -140,6 +149,54 @@
     }
 
     /**
+     * @see #enforceOptions(WithResources, Runnable, boolean)
+     */
+    private static RunnableDebuggableTask enforceOptionsDebuggable(WithResources withResources, RunnableDebuggableTask debuggable, boolean propagate)
+    {
+        return new RunnableDebuggableTask()
+        {
+            @Override
+            public void run()
+            {
+                try (@SuppressWarnings("unused") Closeable close = withResources.get())
+                {
+                    debuggable.run();
+                }
+                catch (Throwable t)
+                {
+                    handle(t);
+                    if (propagate)
+                        throw t;
+                }
+            }
+
+            @Override
+            public String toString()
+            {
+                return debuggable.toString();
+            }
+
+            @Override
+            public long creationTimeNanos()
+            {
+                return debuggable.creationTimeNanos();
+            }
+
+            @Override
+            public long startTimeNanos()
+            {
+                return debuggable.startTimeNanos();
+            }
+
+            @Override
+            public String description()
+            {
+                return debuggable.description();
+            }
+        };
+    }
+
+    /**
      * See {@link #enforceOptions(WithResources, Callable)}
      */
     static <V> Callable<V> propagating(Callable<V> wrap)
@@ -158,7 +215,7 @@
             @Override
             public V call() throws Exception
             {
-                try (Closeable close = withResources.get())
+                try (@SuppressWarnings("unused") Closeable close = withResources.get())
                 {
                     return wrap.call();
                 }
diff --git a/src/java/org/apache/cassandra/concurrent/FutureTask.java b/src/java/org/apache/cassandra/concurrent/FutureTask.java
index 2348ff6..763884a 100644
--- a/src/java/org/apache/cassandra/concurrent/FutureTask.java
+++ b/src/java/org/apache/cassandra/concurrent/FutureTask.java
@@ -20,9 +20,10 @@
 
 import java.util.concurrent.Callable;
 
-import org.apache.cassandra.utils.concurrent.RunnableFuture;
+import javax.annotation.Nullable;
 
 import org.apache.cassandra.utils.concurrent.AsyncFuture;
+import org.apache.cassandra.utils.concurrent.RunnableFuture;
 
 /**
  * A FutureTask that utilises Cassandra's {@link AsyncFuture}, making it compatible with {@link ExecutorPlus}.
@@ -31,15 +32,28 @@
 public class FutureTask<V> extends AsyncFuture<V> implements RunnableFuture<V>
 {
     private Callable<? extends V> call;
+    private volatile DebuggableTask debuggable;
 
     public FutureTask(Callable<? extends V> call)
     {
-        this.call = call;
+        this(call, call instanceof DebuggableTask ? (DebuggableTask) call : null);
     }
 
     public FutureTask(Runnable run)
     {
-        this.call = callable(run);
+        this(callable(run), run instanceof DebuggableTask ? (DebuggableTask) run : null);
+    }
+
+    private FutureTask(Callable<? extends V> call, DebuggableTask debuggable)
+    {
+        this.call = call;
+        this.debuggable = debuggable;
+    }
+
+    @Nullable
+    DebuggableTask debuggableTask()
+    {
+        return debuggable;
     }
 
     V call() throws Exception
@@ -63,6 +77,7 @@
         finally
         {
             call = null;
+            debuggable = null;
         }
     }
 
diff --git a/src/java/org/apache/cassandra/concurrent/SEPWorker.java b/src/java/org/apache/cassandra/concurrent/SEPWorker.java
index c7b9abf..fe16c95 100644
--- a/src/java/org/apache/cassandra/concurrent/SEPWorker.java
+++ b/src/java/org/apache/cassandra/concurrent/SEPWorker.java
@@ -48,6 +48,8 @@
     long prevStopCheck = 0;
     long soleSpinnerSpinTime = 0;
 
+    private final AtomicReference<Runnable> currentTask = new AtomicReference<>();
+
     SEPWorker(ThreadGroup threadGroup, Long workerId, Work initialState, SharedExecutorPool pool)
     {
         this.pool = pool;
@@ -58,9 +60,27 @@
         thread.start();
     }
 
+    /**
+     * @return the current {@link DebuggableTask}, if one exists
+     */
+    public DebuggableTask currentDebuggableTask()
+    {
+        // can change after null check so go off local reference
+        Runnable task = currentTask.get();
+
+        // Local read and mutation Runnables are themselves debuggable
+        if (task instanceof DebuggableTask)
+            return (DebuggableTask) task;
+
+        if (task instanceof FutureTask)
+            return ((FutureTask<?>) task).debuggableTask();
+            
+        return null;
+    }
+
     public void run()
     {
-        /**
+        /*
          * we maintain two important invariants:
          * 1)   after exiting spinning phase, we ensure at least one more task on _each_ queue will be processed
          *      promptly after we begin, assuming any are outstanding on any pools. this is to permit producers to
@@ -101,8 +121,10 @@
                 if (assigned == null)
                     continue;
                 if (SET_THREAD_NAME)
-                    Thread.currentThread().setName(assigned.name + "-" + workerId);
+                    Thread.currentThread().setName(assigned.name + '-' + workerId);
+
                 task = assigned.tasks.poll();
+                currentTask.lazySet(task);
 
                 // if we do have tasks assigned, nobody will change our state so we can simply set it to WORKING
                 // (which is also a state that will never be interrupted externally)
@@ -128,9 +150,12 @@
                         break;
 
                     task = assigned.tasks.poll();
+                    currentTask.lazySet(task);
                 }
 
                 // return our work permit, and maybe signal shutdown
+                currentTask.lazySet(null);
+
                 if (status != RETURNED_WORK_PERMIT)
                     assigned.returnWorkPermit();
 
@@ -173,6 +198,11 @@
                 logger.error("Unexpected exception killed worker", t);
             }
         }
+        finally
+        {
+            currentTask.lazySet(null);
+            pool.workerEnded(this);
+        }
     }
 
     // try to assign this worker the provided work
@@ -420,4 +450,22 @@
             return assigned != null;
         }
     }
+
+    @Override
+    public String toString()
+    {
+        return thread.getName();
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return workerId.intValue();
+    }
+
+    @Override
+    public boolean equals(Object obj)
+    {
+        return obj == this;
+    }
 }
diff --git a/src/java/org/apache/cassandra/concurrent/SharedExecutorPool.java b/src/java/org/apache/cassandra/concurrent/SharedExecutorPool.java
index f74854f..0631ec6 100644
--- a/src/java/org/apache/cassandra/concurrent/SharedExecutorPool.java
+++ b/src/java/org/apache/cassandra/concurrent/SharedExecutorPool.java
@@ -17,8 +17,11 @@
  */
 package org.apache.cassandra.concurrent;
 
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.TimeUnit;
@@ -26,6 +29,9 @@
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.LockSupport;
+import java.util.stream.Collectors;
+
+import org.apache.cassandra.concurrent.DebuggableTask.RunningDebuggableTask;
 
 import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
 import static org.apache.cassandra.concurrent.SEPWorker.Work;
@@ -77,6 +83,8 @@
     final ConcurrentSkipListMap<Long, SEPWorker> spinning = new ConcurrentSkipListMap<>();
     // the collection of threads that have been asked to stop/deschedule - new workers are scheduled from here last
     final ConcurrentSkipListMap<Long, SEPWorker> descheduled = new ConcurrentSkipListMap<>();
+    // All SEPWorkers that are currently running
+    private final Set<SEPWorker> allWorkers = Collections.newSetFromMap(new ConcurrentHashMap<>());
 
     volatile boolean shuttingDown = false;
 
@@ -102,7 +110,23 @@
                 return;
 
         if (!work.isStop())
-            new SEPWorker(threadGroup, workerId.incrementAndGet(), work, this);
+        {
+            SEPWorker worker = new SEPWorker(threadGroup, workerId.incrementAndGet(), work, this);
+            allWorkers.add(worker);
+        }
+    }
+
+    void workerEnded(SEPWorker worker)
+    {
+        allWorkers.remove(worker);
+    }
+
+    public List<RunningDebuggableTask> runningTasks()
+    {
+        return allWorkers.stream()
+                         .map(worker -> new RunningDebuggableTask(worker.toString(), worker.currentDebuggableTask()))
+                         .filter(RunningDebuggableTask::hasTask)
+                         .collect(Collectors.toList());
     }
 
     void maybeStartSpinningWorker()
diff --git a/src/java/org/apache/cassandra/concurrent/TaskFactory.java b/src/java/org/apache/cassandra/concurrent/TaskFactory.java
index 56087d9..faeabe6 100644
--- a/src/java/org/apache/cassandra/concurrent/TaskFactory.java
+++ b/src/java/org/apache/cassandra/concurrent/TaskFactory.java
@@ -20,6 +20,7 @@
 
 import java.util.concurrent.Callable;
 
+import org.apache.cassandra.concurrent.DebuggableTask.RunnableDebuggableTask;
 import org.apache.cassandra.utils.Shared;
 import org.apache.cassandra.utils.WithResources;
 import org.apache.cassandra.utils.concurrent.RunnableFuture;
@@ -127,6 +128,9 @@
         @Override
         public Runnable toExecute(Runnable runnable)
         {
+            if (runnable instanceof RunnableDebuggableTask)
+                return ExecutionFailure.suppressingDebuggable(ExecutorLocals.propagate(), (RunnableDebuggableTask) runnable);
+
             // no reason to propagate exception when it is inaccessible to caller
             return ExecutionFailure.suppressing(ExecutorLocals.propagate(), runnable);
         }
diff --git a/src/java/org/apache/cassandra/config/Config.java b/src/java/org/apache/cassandra/config/Config.java
index 578e480..834a3f6 100644
--- a/src/java/org/apache/cassandra/config/Config.java
+++ b/src/java/org/apache/cassandra/config/Config.java
@@ -382,6 +382,9 @@
     // When true, new CDC mutations are rejected/blocked when reaching max CDC storage.
     // When false, new CDC mutations can always be added. But it will remove the oldest CDC commit log segment on full.
     public volatile boolean cdc_block_writes = true;
+    // When true, CDC data in SSTable go through commit logs during internodes streaming, e.g. repair
+    // When false, it behaves the same as normal streaming.
+    public volatile boolean cdc_on_repair_enabled = true;
     public String cdc_raw_directory;
     @Replaces(oldName = "cdc_total_space_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE_INT, deprecated = true)
     public DataStorageSpec.IntMebibytesBound cdc_total_space = new DataStorageSpec.IntMebibytesBound("0MiB");
@@ -829,13 +832,16 @@
     public volatile Set<ConsistencyLevel> write_consistency_levels_warned = Collections.emptySet();
     public volatile Set<ConsistencyLevel> write_consistency_levels_disallowed = Collections.emptySet();
     public volatile boolean user_timestamps_enabled = true;
+    public volatile boolean alter_table_enabled = true;
     public volatile boolean group_by_enabled = true;
     public volatile boolean drop_truncate_table_enabled = true;
+    public volatile boolean drop_keyspace_enabled = true;
     public volatile boolean secondary_indexes_enabled = true;
     public volatile boolean uncompressed_tables_enabled = true;
     public volatile boolean compact_tables_enabled = true;
     public volatile boolean read_before_write_list_operations_enabled = true;
     public volatile boolean allow_filtering_enabled = true;
+    public volatile boolean simplestrategy_enabled = true;
     public volatile DataStorageSpec.LongBytesBound collection_size_warn_threshold = null;
     public volatile DataStorageSpec.LongBytesBound collection_size_fail_threshold = null;
     public volatile int items_per_collection_warn_threshold = -1;
@@ -847,6 +853,8 @@
     public volatile DataStorageSpec.LongBytesBound data_disk_usage_max_disk_size = null;
     public volatile int minimum_replication_factor_warn_threshold = -1;
     public volatile int minimum_replication_factor_fail_threshold = -1;
+    public volatile int maximum_replication_factor_warn_threshold = -1;
+    public volatile int maximum_replication_factor_fail_threshold = -1;
 
     public volatile DurationSpec.LongNanosecondsBound streaming_state_expires = new DurationSpec.LongNanosecondsBound("3d");
     public volatile DataStorageSpec.LongBytesBound streaming_state_size = new DataStorageSpec.LongBytesBound("40MiB");
diff --git a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
index 93fd1ff..e025ecc 100644
--- a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
+++ b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
@@ -3520,6 +3520,16 @@
         conf.cdc_block_writes = val;
     }
 
+    public static boolean isCDCOnRepairEnabled()
+    {
+        return conf.cdc_on_repair_enabled;
+    }
+
+    public static void setCDCOnRepairEnabled(boolean val)
+    {
+        conf.cdc_on_repair_enabled = val;
+    }
+
     public static String getCDCLogLocation()
     {
         return conf.cdc_raw_directory;
@@ -4120,6 +4130,11 @@
             throw new IllegalArgumentException(String.format("default_keyspace_rf to be set (%d) cannot be less than minimum_replication_factor_fail_threshold (%d)", value, guardrails.getMinimumReplicationFactorFailThreshold()));
         }
 
+        if (guardrails.getMaximumReplicationFactorFailThreshold() != -1 && value > guardrails.getMaximumReplicationFactorFailThreshold())
+        {
+            throw new IllegalArgumentException(String.format("default_keyspace_rf to be set (%d) cannot be greater than maximum_replication_factor_fail_threshold (%d)", value, guardrails.getMaximumReplicationFactorFailThreshold()));
+        }
+
         conf.default_keyspace_rf = value;
     }
 
diff --git a/src/java/org/apache/cassandra/config/EncryptionOptions.java b/src/java/org/apache/cassandra/config/EncryptionOptions.java
index eb6724f..0ab653f 100644
--- a/src/java/org/apache/cassandra/config/EncryptionOptions.java
+++ b/src/java/org/apache/cassandra/config/EncryptionOptions.java
@@ -25,20 +25,14 @@
 import java.util.Objects;
 import java.util.Set;
 
-import javax.net.ssl.KeyManagerFactory;
-import javax.net.ssl.SSLException;
-import javax.net.ssl.TrustManagerFactory;
-
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.fasterxml.jackson.annotation.JsonIgnore;
 import org.apache.cassandra.locator.IEndpointSnitch;
 import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.security.AbstractSslContextFactory;
 import org.apache.cassandra.security.DisableSslContextFactory;
 import org.apache.cassandra.security.ISslContextFactory;
 import org.apache.cassandra.utils.FBUtilities;
@@ -111,6 +105,8 @@
     {
         KEYSTORE("keystore"),
         KEYSTORE_PASSWORD("keystore_password"),
+        OUTBOUND_KEYSTORE("outbound_keystore"),
+        OUTBOUND_KEYSTORE_PASSWORD("outbound_keystore_password"),
         TRUSTSTORE("truststore"),
         TRUSTSTORE_PASSWORD("truststore_password"),
         CIPHER_SUITES("cipher_suites"),
@@ -263,11 +259,8 @@
         }
     }
 
-    private void initializeSslContextFactory()
+    protected void fillSslContextParams(Map<String, Object> sslContextFactoryParameters)
     {
-        Map<String,Object> sslContextFactoryParameters = new HashMap<>();
-        prepareSslContextFactoryParameterizedKeys(sslContextFactoryParameters);
-
         /*
          * Copy all configs to the Map to pass it on to the ISslContextFactory's implementation
          */
@@ -284,6 +277,13 @@
         putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.REQUIRE_ENDPOINT_VERIFICATION, this.require_endpoint_verification);
         putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.ENABLED, this.enabled);
         putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.OPTIONAL, this.optional);
+    }
+
+    private void initializeSslContextFactory()
+    {
+        Map<String, Object> sslContextFactoryParameters = new HashMap<>();
+        prepareSslContextFactoryParameterizedKeys(sslContextFactoryParameters);
+        fillSslContextParams(sslContextFactoryParameters);
 
         if (CassandraRelevantProperties.TEST_JVM_DTEST_DISABLE_SSL.getBoolean())
         {
@@ -296,8 +296,7 @@
         }
     }
 
-    private void putSslContextFactoryParameter(Map<String,Object> existingParameters, ConfigKey configKey,
-                                               Object value)
+    protected static void putSslContextFactoryParameter(Map<String, Object> existingParameters, ConfigKey configKey, Object value)
     {
         if (value != null) {
             existingParameters.put(configKey.getKeyName(), value);
@@ -608,15 +607,20 @@
         public final InternodeEncryption internode_encryption;
         @Replaces(oldName = "enable_legacy_ssl_storage_port", deprecated = true)
         public final boolean legacy_ssl_storage_port_enabled;
+        public final String outbound_keystore;
+        public final String outbound_keystore_password;
 
         public ServerEncryptionOptions()
         {
             this.internode_encryption = InternodeEncryption.none;
             this.legacy_ssl_storage_port_enabled = false;
+            this.outbound_keystore = null;
+            this.outbound_keystore_password = null;
         }
 
         public ServerEncryptionOptions(ParameterizedClass sslContextFactoryClass, String keystore,
-                                       String keystore_password, String truststore, String truststore_password,
+                                       String keystore_password,String outbound_keystore,
+                                       String outbound_keystore_password, String truststore, String truststore_password,
                                        List<String> cipher_suites, String protocol, List<String> accepted_protocols,
                                        String algorithm, String store_type, boolean require_client_auth,
                                        boolean require_endpoint_verification, Boolean optional,
@@ -627,6 +631,8 @@
             null, optional);
             this.internode_encryption = internode_encryption;
             this.legacy_ssl_storage_port_enabled = legacy_ssl_storage_port_enabled;
+            this.outbound_keystore = outbound_keystore;
+            this.outbound_keystore_password = outbound_keystore_password;
         }
 
         public ServerEncryptionOptions(ServerEncryptionOptions options)
@@ -634,6 +640,16 @@
             super(options);
             this.internode_encryption = options.internode_encryption;
             this.legacy_ssl_storage_port_enabled = options.legacy_ssl_storage_port_enabled;
+            this.outbound_keystore = options.outbound_keystore;
+            this.outbound_keystore_password = options.outbound_keystore_password;
+        }
+
+        @Override
+        protected void fillSslContextParams(Map<String, Object> sslContextFactoryParameters)
+        {
+            super.fillSslContextParams(sslContextFactoryParameters);
+            putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.OUTBOUND_KEYSTORE, this.outbound_keystore);
+            putSslContextFactoryParameter(sslContextFactoryParameters, ConfigKey.OUTBOUND_KEYSTORE_PASSWORD, this.outbound_keystore_password);
         }
 
         @Override
@@ -697,7 +713,6 @@
          * values of "dc" and "all". This method returns the explicit, raw value of {@link #optional}
          * as set by the user (if set at all).
          */
-        @JsonIgnore
         public boolean isExplicitlyOptional()
         {
             return optional != null && optional;
@@ -705,7 +720,8 @@
 
         public ServerEncryptionOptions withSslContextFactory(ParameterizedClass sslContextFactoryClass)
         {
-            return new ServerEncryptionOptions(sslContextFactoryClass, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(sslContextFactoryClass, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -714,7 +730,8 @@
 
         public ServerEncryptionOptions withKeyStore(String keystore)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -723,7 +740,8 @@
 
         public ServerEncryptionOptions withKeyStorePassword(String keystore_password)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -732,7 +750,8 @@
 
         public ServerEncryptionOptions withTrustStore(String truststore)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -741,7 +760,8 @@
 
         public ServerEncryptionOptions withTrustStorePassword(String truststore_password)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -750,16 +770,18 @@
 
         public ServerEncryptionOptions withCipherSuites(List<String> cipher_suites)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
                                                legacy_ssl_storage_port_enabled).applyConfigInternal();
         }
 
-        public ServerEncryptionOptions withCipherSuites(String ... cipher_suites)
+        public ServerEncryptionOptions withCipherSuites(String... cipher_suites)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, Arrays.asList(cipher_suites), protocol,
                                                accepted_protocols, algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -768,7 +790,8 @@
 
         public ServerEncryptionOptions withProtocol(String protocol)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -777,7 +800,8 @@
 
         public ServerEncryptionOptions withAcceptedProtocols(List<String> accepted_protocols)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -786,7 +810,8 @@
 
         public ServerEncryptionOptions withAlgorithm(String algorithm)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -795,7 +820,8 @@
 
         public ServerEncryptionOptions withStoreType(String store_type)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -804,7 +830,8 @@
 
         public ServerEncryptionOptions withRequireClientAuth(boolean require_client_auth)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -813,7 +840,8 @@
 
         public ServerEncryptionOptions withRequireEndpointVerification(boolean require_endpoint_verification)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -822,7 +850,8 @@
 
         public ServerEncryptionOptions withOptional(boolean optional)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -831,7 +860,8 @@
 
         public ServerEncryptionOptions withInternodeEncryption(InternodeEncryption internode_encryption)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
@@ -840,12 +870,32 @@
 
         public ServerEncryptionOptions withLegacySslStoragePort(boolean enable_legacy_ssl_storage_port)
         {
-            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password, truststore,
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outbound_keystore_password, truststore,
                                                truststore_password, cipher_suites, protocol, accepted_protocols,
                                                algorithm, store_type, require_client_auth,
                                                require_endpoint_verification, optional, internode_encryption,
                                                enable_legacy_ssl_storage_port).applyConfigInternal();
         }
 
+        public ServerEncryptionOptions withOutboundKeystore(String outboundKeystore)
+        {
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outboundKeystore, outbound_keystore_password, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
+        }
+
+        public ServerEncryptionOptions withOutboundKeystorePassword(String outboundKeystorePassword)
+        {
+            return new ServerEncryptionOptions(ssl_context_factory, keystore, keystore_password,
+                                               outbound_keystore, outboundKeystorePassword, truststore,
+                                               truststore_password, cipher_suites, protocol, accepted_protocols,
+                                               algorithm, store_type, require_client_auth,
+                                               require_endpoint_verification, optional, internode_encryption,
+                                               legacy_ssl_storage_port_enabled).applyConfigInternal();
+        }
     }
 }
diff --git a/src/java/org/apache/cassandra/config/GuardrailsOptions.java b/src/java/org/apache/cassandra/config/GuardrailsOptions.java
index e4694b9..e84e0e2 100644
--- a/src/java/org/apache/cassandra/config/GuardrailsOptions.java
+++ b/src/java/org/apache/cassandra/config/GuardrailsOptions.java
@@ -81,7 +81,8 @@
         validateMaxIntThreshold(config.fields_per_udt_warn_threshold, config.fields_per_udt_fail_threshold, "fields_per_udt");
         validatePercentageThreshold(config.data_disk_usage_percentage_warn_threshold, config.data_disk_usage_percentage_fail_threshold, "data_disk_usage_percentage");
         validateDataDiskUsageMaxDiskSize(config.data_disk_usage_max_disk_size);
-        validateMinRFThreshold(config.minimum_replication_factor_warn_threshold, config.minimum_replication_factor_fail_threshold, "minimum_replication_factor");
+        validateMinRFThreshold(config.minimum_replication_factor_warn_threshold, config.minimum_replication_factor_fail_threshold);
+        validateMaxRFThreshold(config.maximum_replication_factor_warn_threshold, config.maximum_replication_factor_fail_threshold);
     }
 
     @Override
@@ -344,6 +345,20 @@
     }
 
     @Override
+    public boolean getDropKeyspaceEnabled()
+    {
+        return config.drop_keyspace_enabled;
+    }
+
+    public void setDropKeyspaceEnabled(boolean enabled)
+    {
+        updatePropertyWithLogging("drop_keyspace_enabled",
+                                  enabled,
+                                  () -> config.drop_keyspace_enabled,
+                                  x -> config.drop_keyspace_enabled = x);
+    }
+
+    @Override
     public boolean getSecondaryIndexesEnabled()
     {
         return config.secondary_indexes_enabled;
@@ -386,6 +401,20 @@
     }
 
     @Override
+    public boolean getAlterTableEnabled()
+    {
+        return config.alter_table_enabled;
+    }
+
+    public void setAlterTableEnabled(boolean enabled)
+    {
+        updatePropertyWithLogging("alter_table_enabled",
+                                  enabled,
+                                  () -> config.alter_table_enabled,
+                                  x -> config.alter_table_enabled = x);
+    }
+
+    @Override
     public boolean getReadBeforeWriteListOperationsEnabled()
     {
         return config.read_before_write_list_operations_enabled;
@@ -414,6 +443,20 @@
     }
 
     @Override
+    public boolean getSimpleStrategyEnabled()
+    {
+        return config.simplestrategy_enabled;
+    }
+
+    public void setSimpleStrategyEnabled(boolean enabled)
+    {
+        updatePropertyWithLogging("simplestrategy_enabled",
+                                  enabled,
+                                  () -> config.simplestrategy_enabled,
+                                  x -> config.simplestrategy_enabled = x);
+    }
+
+    @Override
     public int getInSelectCartesianProductWarnThreshold()
     {
         return config.in_select_cartesian_product_warn_threshold;
@@ -623,7 +666,7 @@
 
     public void setMinimumReplicationFactorThreshold(int warn, int fail)
     {
-        validateMinRFThreshold(warn, fail, "minimum_replication_factor");
+        validateMinRFThreshold(warn, fail);
         updatePropertyWithLogging("minimum_replication_factor_warn_threshold",
                                   warn,
                                   () -> config.minimum_replication_factor_warn_threshold,
@@ -634,6 +677,31 @@
                                   x -> config.minimum_replication_factor_fail_threshold = x);
     }
 
+    @Override
+    public int getMaximumReplicationFactorWarnThreshold()
+    {
+        return config.maximum_replication_factor_warn_threshold;
+    }
+
+    @Override
+    public int getMaximumReplicationFactorFailThreshold()
+    {
+        return config.maximum_replication_factor_fail_threshold;
+    }
+
+    public void setMaximumReplicationFactorThreshold(int warn, int fail)
+    {
+        validateMaxRFThreshold(warn, fail);
+        updatePropertyWithLogging("maximum_replication_factor_warn_threshold",
+                                  warn,
+                                  () -> config.maximum_replication_factor_warn_threshold,
+                                  x -> config.maximum_replication_factor_warn_threshold = x);
+        updatePropertyWithLogging("maximum_replication_factor_fail_threshold",
+                                  fail,
+                                  () -> config.maximum_replication_factor_fail_threshold,
+                                  x -> config.maximum_replication_factor_fail_threshold = x);
+    }
+
     private static <T> void updatePropertyWithLogging(String propertyName, T newValue, Supplier<T> getter, Consumer<T> setter)
     {
         T oldValue = getter.get();
@@ -689,10 +757,24 @@
         validateWarnGreaterThanFail(warn, fail, name);
     }
 
-    private static void validateMinRFThreshold(int warn, int fail, String name)
+    private static void validateMinRFThreshold(int warn, int fail)
     {
-        validateMinIntThreshold(warn, fail, name);
-        validateMinRFVersusDefaultRF(fail, name);
+        validateMinIntThreshold(warn, fail, "minimum_replication_factor");
+
+        if (fail > DatabaseDescriptor.getDefaultKeyspaceRF())
+            throw new IllegalArgumentException(format("minimum_replication_factor_fail_threshold to be set (%d) " +
+                                                      "cannot be greater than default_keyspace_rf (%d)",
+                                                      fail, DatabaseDescriptor.getDefaultKeyspaceRF()));
+    }
+
+    private static void validateMaxRFThreshold(int warn, int fail)
+    {
+        validateMaxIntThreshold(warn, fail, "maximum_replication_factor");
+
+        if (fail != -1 && fail < DatabaseDescriptor.getDefaultKeyspaceRF())
+            throw new IllegalArgumentException(format("maximum_replication_factor_fail_threshold to be set (%d) " +
+                                                      "cannot be lesser than default_keyspace_rf (%d)",
+                                                      fail, DatabaseDescriptor.getDefaultKeyspaceRF()));
     }
 
     private static void validateWarnLowerThanFail(long warn, long fail, String name)
@@ -715,15 +797,6 @@
                                                       "than the fail threshold %d", warn, name, fail));
     }
 
-    private static void validateMinRFVersusDefaultRF(int fail, String name) throws IllegalArgumentException
-    {
-        if (fail > DatabaseDescriptor.getDefaultKeyspaceRF())
-        {
-            throw new IllegalArgumentException(String.format("%s_fail_threshold to be set (%d) cannot be greater than default_keyspace_rf (%d)",
-                                                           name, fail, DatabaseDescriptor.getDefaultKeyspaceRF()));
-        }
-    }
-
     private static void validateSize(DataStorageSpec.LongBytesBound size, boolean allowZero, String name)
     {
         if (size == null)
diff --git a/src/java/org/apache/cassandra/cql3/CQL3Type.java b/src/java/org/apache/cassandra/cql3/CQL3Type.java
index 1d792b2..1c20e6b 100644
--- a/src/java/org/apache/cassandra/cql3/CQL3Type.java
+++ b/src/java/org/apache/cassandra/cql3/CQL3Type.java
@@ -201,7 +201,7 @@
 
             StringBuilder target = new StringBuilder();
             buffer = buffer.duplicate();
-            int size = CollectionSerializer.readCollectionSize(buffer, version);
+            int size = CollectionSerializer.readCollectionSize(buffer, ByteBufferAccessor.instance, version);
             buffer.position(buffer.position() + CollectionSerializer.sizeOfCollectionSize(size, version));
 
             switch (type.kind)
diff --git a/src/java/org/apache/cassandra/cql3/Tuples.java b/src/java/org/apache/cassandra/cql3/Tuples.java
index b8acd59..6e028c2 100644
--- a/src/java/org/apache/cassandra/cql3/Tuples.java
+++ b/src/java/org/apache/cassandra/cql3/Tuples.java
@@ -154,14 +154,14 @@
 
         public static Value fromSerialized(ByteBuffer bytes, TupleType type)
         {
-            ByteBuffer[] values = type.split(bytes);
+            ByteBuffer[] values = type.split(ByteBufferAccessor.instance, bytes);
             if (values.length > type.size())
             {
                 throw new InvalidRequestException(String.format(
                         "Tuple value contained too many fields (expected %s, got %s)", type.size(), values.length));
             }
 
-            return new Value(type.split(bytes));
+            return new Value(type.split(ByteBufferAccessor.instance, bytes));
         }
 
         public ByteBuffer get(ProtocolVersion protocolVersion)
@@ -272,7 +272,8 @@
                 // type.split(bytes)
                 List<List<ByteBuffer>> elements = new ArrayList<>(l.size());
                 for (Object element : l)
-                    elements.add(Arrays.asList(tupleType.split(type.getElementsType().decompose(element))));
+                    elements.add(Arrays.asList(tupleType.split(ByteBufferAccessor.instance,
+                                                               type.getElementsType().decompose(element))));
                 return new InValue(elements);
             }
             catch (MarshalException e)
diff --git a/src/java/org/apache/cassandra/cql3/UserTypes.java b/src/java/org/apache/cassandra/cql3/UserTypes.java
index b023a8a..a63420f 100644
--- a/src/java/org/apache/cassandra/cql3/UserTypes.java
+++ b/src/java/org/apache/cassandra/cql3/UserTypes.java
@@ -217,7 +217,7 @@
         public static Value fromSerialized(ByteBuffer bytes, UserType type)
         {
             type.validate(bytes);
-            return new Value(type, type.split(bytes));
+            return new Value(type, type.split(ByteBufferAccessor.instance, bytes));
         }
 
         public ByteBuffer get(ProtocolVersion protocolVersion)
diff --git a/src/java/org/apache/cassandra/cql3/conditions/ColumnCondition.java b/src/java/org/apache/cassandra/cql3/conditions/ColumnCondition.java
index e3f463a..68cf2d3 100644
--- a/src/java/org/apache/cassandra/cql3/conditions/ColumnCondition.java
+++ b/src/java/org/apache/cassandra/cql3/conditions/ColumnCondition.java
@@ -650,8 +650,8 @@
 
             Cell<?> cell = getCell(row, column);
             return cell == null
-                      ? null
-                      : userType.split(cell.buffer())[userType.fieldPosition(field)];
+                   ? null
+                   : userType.split(ByteBufferAccessor.instance, cell.buffer())[userType.fieldPosition(field)];
         }
 
         private boolean isSatisfiedBy(ByteBuffer rowValue)
diff --git a/src/java/org/apache/cassandra/cql3/selection/FieldSelector.java b/src/java/org/apache/cassandra/cql3/selection/FieldSelector.java
index 0c62397..ddcc868 100644
--- a/src/java/org/apache/cassandra/cql3/selection/FieldSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/FieldSelector.java
@@ -27,6 +27,7 @@
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.ByteBufferAccessor;
 import org.apache.cassandra.db.marshal.UserType;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.io.util.DataInputPlus;
@@ -108,7 +109,7 @@
         ByteBuffer value = selected.getOutput(protocolVersion);
         if (value == null)
             return null;
-        ByteBuffer[] buffers = type.split(value);
+        ByteBuffer[] buffers = type.split(ByteBufferAccessor.instance, value);
         return field < buffers.length ? buffers[field] : null;
     }
 
diff --git a/src/java/org/apache/cassandra/cql3/selection/ResultSetBuilder.java b/src/java/org/apache/cassandra/cql3/selection/ResultSetBuilder.java
index 22566b2..3e652df 100644
--- a/src/java/org/apache/cassandra/cql3/selection/ResultSetBuilder.java
+++ b/src/java/org/apache/cassandra/cql3/selection/ResultSetBuilder.java
@@ -19,7 +19,10 @@
 
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
+import java.util.function.BiFunction;
+import java.util.function.Function;
 
 import org.apache.cassandra.cql3.ResultSet;
 import org.apache.cassandra.cql3.ResultSet.ResultMetadata;
@@ -28,6 +31,7 @@
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.aggregation.GroupMaker;
 import org.apache.cassandra.db.rows.Cell;
+import org.apache.cassandra.db.rows.ComplexColumnData;
 
 public final class ResultSetBuilder
 {
@@ -98,6 +102,27 @@
         inputRow.add(v);
     }
 
+    public void add(ComplexColumnData complexColumnData, Function<Iterator<Cell<?>>, ByteBuffer> serializer)
+    {
+        if (complexColumnData == null)
+        {
+            inputRow.add(null);
+            return;
+        }
+
+        long timestamp = -1L;
+        if (selectors.collectMaxTimestamps())
+        {
+            Iterator<Cell<?>> cells = complexColumnData.iterator();
+            while (cells.hasNext())
+            {
+                timestamp = Math.max(timestamp, cells.next().timestamp());
+            }
+        }
+
+        inputRow.add(serializer.apply(complexColumnData.iterator()), timestamp, -1);
+    }
+
     public void add(Cell<?> c, int nowInSec)
     {
         inputRow.add(c, nowInSec);
diff --git a/src/java/org/apache/cassandra/cql3/selection/Selectable.java b/src/java/org/apache/cassandra/cql3/selection/Selectable.java
index 102e2f9..4210f9c 100644
--- a/src/java/org/apache/cassandra/cql3/selection/Selectable.java
+++ b/src/java/org/apache/cassandra/cql3/selection/Selectable.java
@@ -222,19 +222,46 @@
 
     public static class WritetimeOrTTL implements Selectable
     {
-        public final ColumnMetadata column;
-        public final boolean isWritetime;
+        // The order of the variants in the Kind enum matters as they are used in ser/deser
+        public enum Kind
+        {
+            TTL("ttl", Int32Type.instance),
+            WRITE_TIME("writetime", LongType.instance),
+            MAX_WRITE_TIME("maxwritetime", LongType.instance); // maxwritetime is available after Cassandra 4.1 (exclusive)
 
-        public WritetimeOrTTL(ColumnMetadata column, boolean isWritetime)
+            public final String name;
+            public final AbstractType<?> returnType;
+
+            public static Kind fromOrdinal(int ordinal)
+            {
+                return values()[ordinal];
+            }
+
+            Kind(String name, AbstractType<?> returnType)
+            {
+                this.name = name;
+                this.returnType = returnType;
+            }
+
+            public boolean allowedForMultiCell()
+            {
+                return this == MAX_WRITE_TIME;
+            }
+        }
+
+        public final ColumnMetadata column;
+        public final Kind kind;
+
+        public WritetimeOrTTL(ColumnMetadata column, Kind kind)
         {
             this.column = column;
-            this.isWritetime = isWritetime;
+            this.kind = kind;
         }
 
         @Override
         public String toString()
         {
-            return (isWritetime ? "writetime" : "ttl") + "(" + column.name + ")";
+            return kind.name + "(" + column.name + ")";
         }
 
         public Selector.Factory newSelectorFactory(TableMetadata table,
@@ -245,20 +272,22 @@
             if (column.isPrimaryKeyColumn())
                 throw new InvalidRequestException(
                         String.format("Cannot use selection function %s on PRIMARY KEY part %s",
-                                      isWritetime ? "writeTime" : "ttl",
+                                      kind.name,
                                       column.name));
-            if (column.type.isMultiCell())
+
+            // only maxwritetime is allowed for multicell types
+            if (column.type.isMultiCell() && !kind.allowedForMultiCell())
                 throw new InvalidRequestException(String.format("Cannot use selection function %s on non-frozen %s %s",
-                                                                isWritetime ? "writeTime" : "ttl",
+                                                                kind.name,
                                                                 column.type.isCollection() ? "collection" : "UDT",
                                                                 column.name));
 
-            return WritetimeOrTTLSelector.newFactory(column, addAndGetIndex(column, defs), isWritetime);
+            return WritetimeOrTTLSelector.newFactory(column, addAndGetIndex(column, defs), kind);
         }
 
         public AbstractType<?> getExactTypeIfKnown(String keyspace)
         {
-            return isWritetime ? LongType.instance : Int32Type.instance;
+            return kind.returnType;
         }
 
         @Override
@@ -270,18 +299,18 @@
         public static class Raw implements Selectable.Raw
         {
             private final Selectable.RawIdentifier id;
-            private final boolean isWritetime;
+            private final Kind kind;
 
-            public Raw(Selectable.RawIdentifier id, boolean isWritetime)
+            public Raw(Selectable.RawIdentifier id, Kind kind)
             {
                 this.id = id;
-                this.isWritetime = isWritetime;
+                this.kind = kind;
             }
 
             @Override
             public WritetimeOrTTL prepare(TableMetadata table)
             {
-                return new WritetimeOrTTL(id.prepare(table), isWritetime);
+                return new WritetimeOrTTL(id.prepare(table), kind);
             }
         }
     }
diff --git a/src/java/org/apache/cassandra/cql3/selection/Selection.java b/src/java/org/apache/cassandra/cql3/selection/Selection.java
index f07184a..2f41192 100644
--- a/src/java/org/apache/cassandra/cql3/selection/Selection.java
+++ b/src/java/org/apache/cassandra/cql3/selection/Selection.java
@@ -377,6 +377,12 @@
         public boolean collectTimestamps();
 
         /**
+         * Checks if one of the selectors collects maxTimestamps.
+         * @return {@code true} if one of the selectors collect maxTimestamps, {@code false} otherwise.
+         */
+        public boolean collectMaxTimestamps();
+
+        /**
          * Adds the current row of the specified <code>ResultSetBuilder</code>.
          *
          * @param input the input row
@@ -507,6 +513,11 @@
                 }
 
                 @Override
+                public boolean collectMaxTimestamps() {
+                    return false;
+                }
+
+                @Override
                 public ColumnFilter getColumnFilter()
                 {
                     // In the case of simple selection we know that the ColumnFilter has already been computed and
@@ -521,6 +532,7 @@
     {
         private final SelectorFactories factories;
         private final boolean collectTimestamps;
+        private final boolean collectMaxTimestamps;
         private final boolean collectTTLs;
 
         public SelectionWithProcessing(TableMetadata table,
@@ -541,7 +553,8 @@
 
             this.factories = factories;
             this.collectTimestamps = factories.containsWritetimeSelectorFactory();
-            this.collectTTLs = factories.containsTTLSelectorFactory();;
+            this.collectMaxTimestamps = factories.containsMaxWritetimeSelectorFactory();
+            this.collectTTLs = factories.containsTTLSelectorFactory();
 
             for (ColumnMetadata orderingColumn : orderingColumns)
             {
@@ -619,7 +632,12 @@
                 @Override
                 public boolean collectTimestamps()
                 {
-                    return collectTimestamps;
+                    return collectTimestamps || collectMaxTimestamps;
+                }
+
+                @Override
+                public boolean collectMaxTimestamps() {
+                    return collectMaxTimestamps;
                 }
 
                 @Override
diff --git a/src/java/org/apache/cassandra/cql3/selection/Selector.java b/src/java/org/apache/cassandra/cql3/selection/Selector.java
index 463382d..8226c2d 100644
--- a/src/java/org/apache/cassandra/cql3/selection/Selector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/Selector.java
@@ -20,8 +20,10 @@
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
+import java.util.Iterator;
 import java.util.List;
 
+import org.apache.cassandra.db.rows.ComplexColumnData;
 import org.apache.cassandra.schema.CQLTypeParser;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.Schema;
@@ -71,7 +73,7 @@
     /**
      * The <code>Selector</code> kinds.
      */
-    public static enum Kind
+    public enum Kind
     {
         SIMPLE_SELECTOR(SimpleSelector.deserializer),
         TERM_SELECTOR(TermSelector.deserializer),
@@ -152,6 +154,17 @@
         }
 
         /**
+         * Checks if this factory creates <code>maxwritetime</code> selector instances.
+         *
+         * @return <code>true</code> if this factory creates <code>maxwritetime</code> selectors instances,
+         * <code>false</code> otherwise
+         */
+        public boolean isMaxWritetimeSelectorFactory()
+        {
+            return false;
+        }
+
+        /**
          * Checks if this factory creates <code>TTL</code> selectors instances.
          *
          * @return <code>true</code> if this factory creates <code>TTL</code> selectors instances,
@@ -322,13 +335,18 @@
 
         public void add(ByteBuffer v)
         {
+            add(v, Long.MIN_VALUE, -1);
+        }
+
+        public void add(ByteBuffer v, long timestamp, int ttl)
+        {
             values[index] = v;
 
             if (timestamps != null)
-                timestamps[index] = Long.MIN_VALUE;
+                timestamps[index] = timestamp;
 
             if (ttls != null)
-                ttls[index] = -1;
+                ttls[index] = ttl;
 
             index++;
         }
diff --git a/src/java/org/apache/cassandra/cql3/selection/SelectorFactories.java b/src/java/org/apache/cassandra/cql3/selection/SelectorFactories.java
index 7f4bcb3..1b275a8 100644
--- a/src/java/org/apache/cassandra/cql3/selection/SelectorFactories.java
+++ b/src/java/org/apache/cassandra/cql3/selection/SelectorFactories.java
@@ -32,22 +32,27 @@
 import org.apache.cassandra.exceptions.InvalidRequestException;
 
 /**
- * A set of <code>Selector</code> factories.
+ * A set of {@code Selector} factories.
  */
 final class SelectorFactories implements Iterable<Selector.Factory>
 {
     /**
-     * The <code>Selector</code> factories.
+     * The {@code Selector} factories.
      */
     private final List<Selector.Factory> factories;
 
     /**
-     * <code>true</code> if one of the factory creates writetime selectors.
+     * {@code true} if one of the factories creates writetime selectors.
      */
     private boolean containsWritetimeFactory;
 
     /**
-     * <code>true</code> if one of the factory creates TTL selectors.
+     * {@code true} if one of the factories creates maxWritetime selectors.
+     */
+    private boolean containsMaxWritetimeFactory;
+
+    /**
+     * {@code true} if one of the factories creates TTL selectors.
      */
     private boolean containsTTLFactory;
 
@@ -96,6 +101,7 @@
             Factory factory = selectable.newSelectorFactory(table, expectedType, defs, boundNames);
             containsWritetimeFactory |= factory.isWritetimeSelectorFactory();
             containsTTLFactory |= factory.isTTLSelectorFactory();
+            containsMaxWritetimeFactory |= factory.isMaxWritetimeSelectorFactory();
             if (factory.isAggregateSelectorFactory())
                 ++numberOfAggregateFactories;
             factories.add(factory);
@@ -166,6 +172,17 @@
     }
 
     /**
+     * Checks if this {@code SelectorFactories} contains at least one factory for maxWritetime selectors.
+     *
+     * @return {@link true} if this {@link SelectorFactories} contains at least one factory for maxWritetime
+     * selectors, {@link false} otherwise.
+     */
+    public boolean containsMaxWritetimeSelectorFactory()
+    {
+        return containsMaxWritetimeFactory;
+    }
+
+    /**
      * Checks if this <code>SelectorFactories</code> contains at least one factory for TTL selectors.
      *
      * @return <code>true</code> if this <code>SelectorFactories</code> contains at least one factory for TTL
diff --git a/src/java/org/apache/cassandra/cql3/selection/WritetimeOrTTLSelector.java b/src/java/org/apache/cassandra/cql3/selection/WritetimeOrTTLSelector.java
index 2c56f5c..29ebfbb 100644
--- a/src/java/org/apache/cassandra/cql3/selection/WritetimeOrTTLSelector.java
+++ b/src/java/org/apache/cassandra/cql3/selection/WritetimeOrTTLSelector.java
@@ -29,8 +29,6 @@
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.Int32Type;
-import org.apache.cassandra.db.marshal.LongType;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.transport.ProtocolVersion;
@@ -45,29 +43,30 @@
             ByteBuffer columnName = ByteBufferUtil.readWithVIntLength(in);
             ColumnMetadata column = metadata.getColumn(columnName);
             int idx = in.readInt();
-            boolean isWritetime = in.readBoolean();
-            return new WritetimeOrTTLSelector(column, idx, isWritetime);
+            int ordinal = in.readByte();
+            Selectable.WritetimeOrTTL.Kind k = Selectable.WritetimeOrTTL.Kind.fromOrdinal(ordinal);
+            return new WritetimeOrTTLSelector(column, idx, k);
         }
     };
 
     private final ColumnMetadata column;
     private final int idx;
-    private final boolean isWritetime;
+    private final Selectable.WritetimeOrTTL.Kind kind;
     private ByteBuffer current;
     private boolean isSet;
 
-    public static Factory newFactory(final ColumnMetadata def, final int idx, final boolean isWritetime)
+    public static Factory newFactory(final ColumnMetadata def, final int idx, final Selectable.WritetimeOrTTL.Kind kind)
     {
         return new Factory()
         {
             protected String getColumnName()
             {
-                return String.format("%s(%s)", isWritetime ? "writetime" : "ttl", def.name.toString());
+                return String.format("%s(%s)", kind.name, def.name.toString());
             }
 
             protected AbstractType<?> getReturnType()
             {
-                return isWritetime ? LongType.instance : Int32Type.instance;
+                return kind.returnType;
             }
 
             protected void addColumnMapping(SelectionColumnMapping mapping, ColumnSpecification resultsColumn)
@@ -77,17 +76,25 @@
 
             public Selector newInstance(QueryOptions options)
             {
-                return new WritetimeOrTTLSelector(def, idx, isWritetime);
+                return new WritetimeOrTTLSelector(def, idx, kind);
             }
 
+            @Override
             public boolean isWritetimeSelectorFactory()
             {
-                return isWritetime;
+                return kind != Selectable.WritetimeOrTTL.Kind.TTL;
             }
 
+            @Override
             public boolean isTTLSelectorFactory()
             {
-                return !isWritetime;
+                return kind == Selectable.WritetimeOrTTL.Kind.TTL;
+            }
+
+            @Override
+            public boolean isMaxWritetimeSelectorFactory()
+            {
+                return kind == Selectable.WritetimeOrTTL.Kind.MAX_WRITE_TIME;
             }
 
             public boolean areAllFetchedColumnsKnown()
@@ -114,16 +121,16 @@
 
         isSet = true;
 
-        if (isWritetime)
-        {
-            long ts = input.getTimestamp(idx);
-            current = ts != Long.MIN_VALUE ? ByteBufferUtil.bytes(ts) : null;
-        }
-        else
+        if (kind == Selectable.WritetimeOrTTL.Kind.TTL)
         {
             int ttl = input.getTtl(idx);
             current = ttl > 0 ? ByteBufferUtil.bytes(ttl) : null;
         }
+        else
+        {
+            long ts = input.getTimestamp(idx);
+            current = ts != Long.MIN_VALUE ? ByteBufferUtil.bytes(ts) : null;
+        }
     }
 
     public ByteBuffer getOutput(ProtocolVersion protocolVersion)
@@ -139,7 +146,7 @@
 
     public AbstractType<?> getType()
     {
-        return isWritetime ? LongType.instance : Int32Type.instance;
+        return kind.returnType;
     }
 
     @Override
@@ -148,12 +155,12 @@
         return column.name.toString();
     }
 
-    private WritetimeOrTTLSelector(ColumnMetadata column, int idx, boolean isWritetime)
+    private WritetimeOrTTLSelector(ColumnMetadata column, int idx, Selectable.WritetimeOrTTL.Kind kind)
     {
         super(Kind.WRITETIME_OR_TTL_SELECTOR);
         this.column = column;
         this.idx = idx;
-        this.isWritetime = isWritetime;
+        this.kind = kind;
     }
 
     @Override
@@ -169,13 +176,13 @@
 
         return Objects.equal(column, s.column)
             && Objects.equal(idx, s.idx)
-            && Objects.equal(isWritetime, s.isWritetime);
+            && kind == s.kind;
     }
 
     @Override
     public int hashCode()
     {
-        return Objects.hashCode(column, idx, isWritetime);
+        return Objects.hashCode(column, idx, kind);
     }
 
     @Override
@@ -183,7 +190,7 @@
     {
         return ByteBufferUtil.serializedSizeWithVIntLength(column.name.bytes)
                 + TypeSizes.sizeof(idx)
-                + TypeSizes.sizeof(isWritetime);
+                + TypeSizes.sizeofUnsignedVInt(kind.ordinal());
     }
 
     @Override
@@ -191,6 +198,6 @@
     {
         ByteBufferUtil.writeWithVIntLength(column.name.bytes, out);
         out.writeInt(idx);
-        out.writeBoolean(isWritetime);
+        out.writeByte(kind.ordinal());
     }
 }
diff --git a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
index 030b4cd..0d43313 100644
--- a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java
@@ -1039,12 +1039,16 @@
         {
             assert def.type.isMultiCell();
             ComplexColumnData complexData = row.getComplexColumnData(def);
-            if (complexData == null)
-                result.add(null);
-            else if (def.type.isCollection())
-                result.add(((CollectionType) def.type).serializeForNativeProtocol(complexData.iterator(), protocolVersion));
-            else
-                result.add(((UserType) def.type).serializeForNativeProtocol(complexData.iterator(), protocolVersion));
+            result.add(complexData, iterator -> {
+                if (def.type.isCollection())
+                {
+                    return ((CollectionType) def.type).serializeForNativeProtocol(iterator, protocolVersion);
+                }
+                else
+                {
+                    return ((UserType) def.type).serializeForNativeProtocol(iterator, protocolVersion);
+                }
+            });
         }
         else
         {
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/AlterKeyspaceStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/AlterKeyspaceStatement.java
index 87377d7..dec0655 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/AlterKeyspaceStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/AlterKeyspaceStatement.java
@@ -31,12 +31,14 @@
 import org.apache.cassandra.cql3.CQLStatement;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.gms.Gossiper;
 import org.apache.cassandra.locator.AbstractReplicationStrategy;
 import org.apache.cassandra.locator.InetAddressAndPort;
 import org.apache.cassandra.locator.LocalStrategy;
 import org.apache.cassandra.locator.ReplicationFactor;
+import org.apache.cassandra.locator.SimpleStrategy;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceMetadata.KeyspaceDiff;
 import org.apache.cassandra.schema.Keyspaces;
@@ -76,6 +78,9 @@
 
         KeyspaceMetadata newKeyspace = keyspace.withSwapped(attrs.asAlteredKeyspaceParams(keyspace.params));
 
+        if (attrs.getReplicationStrategyClass() != null && attrs.getReplicationStrategyClass().equals(SimpleStrategy.class.getSimpleName()))
+            Guardrails.simpleStrategyEnabled.ensureEnabled(state);
+
         if (newKeyspace.params.replication.klass.equals(LocalStrategy.class))
             throw ire("Unable to use given strategy class: LocalStrategy is reserved for internal use.");
 
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/AlterTableStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/AlterTableStatement.java
index 2736252..94f72d4 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/AlterTableStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/AlterTableStatement.java
@@ -79,6 +79,7 @@
 {
     protected final String tableName;
     private final boolean ifExists;
+    protected ClientState state;
 
     public AlterTableStatement(String keyspaceName, String tableName, boolean ifExists)
     {
@@ -87,6 +88,15 @@
         this.ifExists = ifExists;
     }
 
+    @Override
+    public void validate(ClientState state)
+    {
+        super.validate(state);
+
+        // save the query state to use it for guardrails validation in #apply
+        this.state = state;
+    }
+
     public Keyspaces apply(Keyspaces schema)
     {
         KeyspaceMetadata keyspace = schema.getNullable(keyspaceName);
@@ -187,6 +197,7 @@
 
         public KeyspaceMetadata apply(KeyspaceMetadata keyspace, TableMetadata table)
         {
+            Guardrails.alterTableEnabled.ensureEnabled("ALTER TABLE changing columns", state);
             TableMetadata.Builder tableBuilder = table.unbuild();
             Views.Builder viewsBuilder = keyspace.views.unbuild();
             newColumns.forEach(c -> addColumn(keyspace, table, c, ifColumnNotExists, tableBuilder, viewsBuilder));
@@ -289,6 +300,7 @@
 
         public KeyspaceMetadata apply(KeyspaceMetadata keyspace, TableMetadata table)
         {
+            Guardrails.alterTableEnabled.ensureEnabled("ALTER TABLE changing columns", state);
             TableMetadata.Builder builder = table.unbuild();
             removedColumns.forEach(c -> dropColumn(keyspace, table, c, ifColumnExists, builder));
             return keyspace.withSwapped(keyspace.tables.withSwapped(builder.build()));
@@ -356,6 +368,7 @@
 
         public KeyspaceMetadata apply(KeyspaceMetadata keyspace, TableMetadata table)
         {
+            Guardrails.alterTableEnabled.ensureEnabled("ALTER TABLE changing columns", state);
             TableMetadata.Builder tableBuilder = table.unbuild();
             Views.Builder viewsBuilder = keyspace.views.unbuild();
             renamedColumns.forEach((o, n) -> renameColumn(keyspace, table, o, n, ifColumnsExists, tableBuilder, viewsBuilder));
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/CreateKeyspaceStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/CreateKeyspaceStatement.java
index dc82f93..ad6bcc4 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/CreateKeyspaceStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/CreateKeyspaceStatement.java
@@ -36,6 +36,7 @@
 import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.exceptions.AlreadyExistsException;
 import org.apache.cassandra.locator.LocalStrategy;
+import org.apache.cassandra.locator.SimpleStrategy;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams.Option;
 import org.apache.cassandra.schema.Keyspaces;
@@ -67,6 +68,9 @@
         if (!attrs.hasOption(Option.REPLICATION))
             throw ire("Missing mandatory option '%s'", Option.REPLICATION);
 
+        if (attrs.getReplicationStrategyClass() != null && attrs.getReplicationStrategyClass().equals(SimpleStrategy.class.getSimpleName()))
+            Guardrails.simpleStrategyEnabled.ensureEnabled("SimpleStrategy", state);
+
         if (schema.containsKeyspace(keyspaceName))
         {
             if (ifNotExists)
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/DropKeyspaceStatement.java b/src/java/org/apache/cassandra/cql3/statements/schema/DropKeyspaceStatement.java
index f2bd30b..47e514a 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/DropKeyspaceStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/DropKeyspaceStatement.java
@@ -21,6 +21,7 @@
 import org.apache.cassandra.audit.AuditLogEntryType;
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.cql3.CQLStatement;
+import org.apache.cassandra.db.guardrails.Guardrails;
 import org.apache.cassandra.schema.Keyspaces;
 import org.apache.cassandra.schema.Keyspaces.KeyspacesDiff;
 import org.apache.cassandra.service.ClientState;
@@ -39,6 +40,8 @@
 
     public Keyspaces apply(Keyspaces schema)
     {
+        Guardrails.dropKeyspaceEnabled.ensureEnabled(state);
+
         if (schema.containsKeyspace(keyspaceName))
             return schema.without(keyspaceName);
 
diff --git a/src/java/org/apache/cassandra/cql3/statements/schema/KeyspaceAttributes.java b/src/java/org/apache/cassandra/cql3/statements/schema/KeyspaceAttributes.java
index 42fcaf4..d4d5b98 100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/KeyspaceAttributes.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/KeyspaceAttributes.java
@@ -50,7 +50,7 @@
             throw new ConfigurationException("Missing replication strategy class");
     }
 
-    private String getReplicationStrategyClass()
+    public String getReplicationStrategyClass()
     {
         return getAllReplicationOptions().get(ReplicationParams.CLASS);
     }
diff --git a/src/java/org/apache/cassandra/db/BufferDecoratedKey.java b/src/java/org/apache/cassandra/db/BufferDecoratedKey.java
index d375162..ae3e9d4 100644
--- a/src/java/org/apache/cassandra/db/BufferDecoratedKey.java
+++ b/src/java/org/apache/cassandra/db/BufferDecoratedKey.java
@@ -19,7 +19,9 @@
 
 import java.nio.ByteBuffer;
 
+import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
 
 public class BufferDecoratedKey extends DecoratedKey
 {
@@ -36,4 +38,28 @@
     {
         return key;
     }
+
+    /**
+     * A factory method that translates the given byte-comparable representation to a {@link BufferDecoratedKey}
+     * instance. If the given byte comparable doesn't represent the encoding of a buffer decorated key, anything from a
+     * wide variety of throwables may be thrown (e.g. {@link AssertionError}, {@link IndexOutOfBoundsException},
+     * {@link IllegalStateException}, etc.).
+     *
+     * @param byteComparable A byte-comparable representation (presumably of a {@link BufferDecoratedKey} instance).
+     * @param version The encoding version used for the given byte comparable.
+     * @param partitioner The partitioner of the encoded decorated key. Needed in order to correctly decode the token
+     *                    bytes of the key.
+     * @return A new {@link BufferDecoratedKey} instance, corresponding to the given byte-comparable representation. If
+     * we were to call {@link #asComparableBytes(Version)} on the returned object, we should get a {@link ByteSource}
+     * equal to the one of the input byte comparable.
+     */
+    public static BufferDecoratedKey fromByteComparable(ByteComparable byteComparable,
+                                                        Version version,
+                                                        IPartitioner partitioner)
+    {
+        return DecoratedKey.fromByteComparable(byteComparable,
+                                               version,
+                                               partitioner,
+                                               (token, keyBytes) -> new BufferDecoratedKey(token, ByteBuffer.wrap(keyBytes)));
+    }
 }
diff --git a/src/java/org/apache/cassandra/db/ClusteringComparator.java b/src/java/org/apache/cassandra/db/ClusteringComparator.java
index fdc4508..c1aebfa 100644
--- a/src/java/org/apache/cassandra/db/ClusteringComparator.java
+++ b/src/java/org/apache/cassandra/db/ClusteringComparator.java
@@ -18,6 +18,7 @@
 package org.apache.cassandra.db;
 
 import java.nio.ByteBuffer;
+import java.util.Arrays;
 import java.util.Comparator;
 import java.util.List;
 import java.util.Objects;
@@ -31,6 +32,15 @@
 import org.apache.cassandra.serializers.MarshalException;
 
 import org.apache.cassandra.io.sstable.IndexInfo;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+
+import static org.apache.cassandra.utils.bytecomparable.ByteSource.EXCLUDED;
+import static org.apache.cassandra.utils.bytecomparable.ByteSource.NEXT_COMPONENT;
+import static org.apache.cassandra.utils.bytecomparable.ByteSource.NEXT_COMPONENT_EMPTY;
+import static org.apache.cassandra.utils.bytecomparable.ByteSource.NEXT_COMPONENT_EMPTY_REVERSED;
+import static org.apache.cassandra.utils.bytecomparable.ByteSource.NEXT_COMPONENT_NULL;
+import static org.apache.cassandra.utils.bytecomparable.ByteSource.TERMINATOR;
 
 /**
  * A comparator of clustering prefixes (or more generally of {@link Clusterable}}.
@@ -233,6 +243,267 @@
     }
 
     /**
+     * Produce a prefix-free byte-comparable representation of the given value, i.e. such a sequence of bytes that any
+     * pair x, y of valid values of this type
+     *   compare(x, y) == compareLexicographicallyUnsigned(asByteComparable(x), asByteComparable(y))
+     * and
+     *   asByteComparable(x) is not a prefix of asByteComparable(y)
+     */
+    public <V> ByteComparable asByteComparable(ClusteringPrefix<V> clustering)
+    {
+        return new ByteComparableClustering<>(clustering);
+    }
+
+    /**
+     * A prefix-free byte-comparable representation for a clustering or prefix.
+     *
+     * Adds a NEXT_COMPONENT byte before each component (allowing inclusive/exclusive bounds over incomplete prefixes
+     * of that length) and finishes with a suitable byte for the clustering kind. Also deals with null entries.
+     *
+     * Since all types' encodings are weakly prefix-free, this is guaranteed to be prefix-free as long as the
+     * bound/ClusteringPrefix terminators are different from the separator byte. It is okay for the terminator for
+     * Clustering to be the same as the separator, as all Clusterings must be completely specified.
+     *
+     * See also {@link AbstractType#asComparableBytes}.
+     *
+     * Some examples:
+     *    "A", 0005, Clustering     -> 40 4100 40 0005 40
+     *    "B", 0006, InclusiveEnd   -> 40 4200 40 0006 60
+     *    "A", ExclusiveStart       -> 40 4100 60
+     *    "", null, Clustering      -> 40 00 3F 40
+     *    "", 0000, Clustering      -> 40 00 40 0000 40
+     *    BOTTOM                    -> 20
+     */
+    private class ByteComparableClustering<V> implements ByteComparable
+    {
+        private final ClusteringPrefix<V> src;
+
+        ByteComparableClustering(ClusteringPrefix<V> src)
+        {
+            this.src = src;
+        }
+
+        @Override
+        public ByteSource asComparableBytes(Version version)
+        {
+            return new ByteSource()
+            {
+                private ByteSource current = null;
+                private int srcnum = -1;
+
+                @Override
+                public int next()
+                {
+                    if (current != null)
+                    {
+                        int b = current.next();
+                        if (b > END_OF_STREAM)
+                            return b;
+                        current = null;
+                    }
+
+                    int sz = src.size();
+                    if (srcnum == sz)
+                        return END_OF_STREAM;
+
+                    ++srcnum;
+                    if (srcnum == sz)
+                        return src.kind().asByteComparableValue(version);
+
+                    final V nextComponent = src.get(srcnum);
+                    // We can have a null as the clustering component (this is a relic of COMPACT STORAGE, but also
+                    // can appear in indexed partitions with no rows but static content),
+                    if (nextComponent == null)
+                    {
+                        if (version != Version.LEGACY)
+                            return NEXT_COMPONENT_NULL; // always sorts before non-nulls, including for reversed types
+                        else
+                        {
+                            // legacy version did not permit nulls in clustering keys and treated these as null values
+                            return subtype(srcnum).isReversed() ? NEXT_COMPONENT_EMPTY_REVERSED : NEXT_COMPONENT_EMPTY;
+                        }
+                    }
+
+                    current = subtype(srcnum).asComparableBytes(src.accessor(), nextComponent, version);
+                    // and also null values for some types (e.g. int, varint but not text) that are encoded as empty
+                    // buffers.
+                    if (current == null)
+                        return subtype(srcnum).isReversed() ? NEXT_COMPONENT_EMPTY_REVERSED : NEXT_COMPONENT_EMPTY;
+
+                    return NEXT_COMPONENT;
+                }
+            };
+        }
+
+        public String toString()
+        {
+            return src.clusteringString(subtypes());
+        }
+    }
+
+    /**
+     * Produces a clustering from the given byte-comparable value. The method will throw an exception if the value
+     * does not correctly encode a clustering of this type, including if it encodes a position before or after a
+     * clustering (i.e. a bound/boundary).
+     *
+     * @param accessor Accessor to use to construct components.
+     * @param comparable The clustering encoded as a byte-comparable sequence.
+     */
+    public <V> Clustering<V> clusteringFromByteComparable(ValueAccessor<V> accessor, ByteComparable comparable)
+    {
+        ByteComparable.Version version = ByteComparable.Version.OSS42;
+        ByteSource.Peekable orderedBytes = ByteSource.peekable(comparable.asComparableBytes(version));
+
+        // First check for special cases (partition key only, static clustering) that can do without buffers.
+        int sep = orderedBytes.next();
+        switch (sep)
+        {
+        case TERMINATOR:
+            assert size() == 0 : "Terminator should be after " + size() + " components, got 0";
+            return accessor.factory().clustering();
+        case EXCLUDED:
+            return accessor.factory().staticClustering();
+        default:
+            // continue with processing
+        }
+
+        int cc = 0;
+        V[] components = accessor.createArray(size());
+
+        while (true)
+        {
+            switch (sep)
+            {
+            case NEXT_COMPONENT_NULL:
+                components[cc] = null;
+                break;
+            case NEXT_COMPONENT_EMPTY:
+            case NEXT_COMPONENT_EMPTY_REVERSED:
+                components[cc] = subtype(cc).fromComparableBytes(accessor, null, version);
+                break;
+            case NEXT_COMPONENT:
+                // Decode the next component, consuming bytes from orderedBytes.
+                components[cc] = subtype(cc).fromComparableBytes(accessor, orderedBytes, version);
+                break;
+            case TERMINATOR:
+                assert cc == size() : "Terminator should be after " + size() + " components, got " + cc;
+                return accessor.factory().clustering(components);
+            case EXCLUDED:
+                throw new AssertionError("Unexpected static terminator after the first component");
+            default:
+                throw new AssertionError("Unexpected separator " + Integer.toHexString(sep) + " in Clustering encoding");
+            }
+            ++cc;
+            sep = orderedBytes.next();
+        }
+    }
+
+    /**
+     * Produces a clustering bound from the given byte-comparable value. The method will throw an exception if the value
+     * does not correctly encode a bound position of this type, including if it encodes an exact clustering.
+     *
+     * Note that the encoded clustering position cannot specify the type of bound (i.e. start/end/boundary) because to
+     * correctly compare clustering positions the encoding must be the same for the different types (e.g. the position
+     * for a exclusive end and an inclusive start is the same, before the exact clustering). The type must be supplied
+     * separately (in the bound... vs boundary... call and isEnd argument).
+     *
+     * @param accessor Accessor to use to construct components.
+     * @param comparable The clustering position encoded as a byte-comparable sequence.
+     * @param isEnd true if the bound marks the end of a range, false is it marks the start.
+     */
+    public <V> ClusteringBound<V> boundFromByteComparable(ValueAccessor<V> accessor,
+                                                          ByteComparable comparable,
+                                                          boolean isEnd)
+    {
+        ByteComparable.Version version = ByteComparable.Version.OSS42;
+        ByteSource.Peekable orderedBytes = ByteSource.peekable(comparable.asComparableBytes(version));
+
+        int sep = orderedBytes.next();
+        int cc = 0;
+        V[] components = accessor.createArray(size());
+
+        while (true)
+        {
+            switch (sep)
+            {
+            case NEXT_COMPONENT_NULL:
+                components[cc] = null;
+                break;
+            case NEXT_COMPONENT_EMPTY:
+            case NEXT_COMPONENT_EMPTY_REVERSED:
+                components[cc] = subtype(cc).fromComparableBytes(accessor, null, version);
+                break;
+            case NEXT_COMPONENT:
+                // Decode the next component, consuming bytes from orderedBytes.
+                components[cc] = subtype(cc).fromComparableBytes(accessor, orderedBytes, version);
+                break;
+            case ByteSource.LT_NEXT_COMPONENT:
+                return accessor.factory().bound(isEnd ? ClusteringPrefix.Kind.EXCL_END_BOUND
+                                                      : ClusteringPrefix.Kind.INCL_START_BOUND,
+                                                Arrays.copyOf(components, cc));
+            case ByteSource.GT_NEXT_COMPONENT:
+                return accessor.factory().bound(isEnd ? ClusteringPrefix.Kind.INCL_END_BOUND
+                                                      : ClusteringPrefix.Kind.EXCL_START_BOUND,
+                                                Arrays.copyOf(components, cc));
+            default:
+                throw new AssertionError("Unexpected separator " + Integer.toHexString(sep) + " in ClusteringBound encoding");
+            }
+            ++cc;
+            sep = orderedBytes.next();
+        }
+    }
+
+    /**
+     * Produces a clustering boundary from the given byte-comparable value. The method will throw an exception if the
+     * value does not correctly encode a bound position of this type, including if it encodes an exact clustering.
+     *
+     * Note that the encoded clustering position cannot specify the type of bound (i.e. start/end/boundary) because to
+     * correctly compare clustering positions the encoding must be the same for the different types (e.g. the position
+     * for a exclusive end and an inclusive start is the same, before the exact clustering). The type must be supplied
+     * separately (in the bound... vs boundary... call and isEnd argument).
+     *
+     * @param accessor Accessor to use to construct components.
+     * @param comparable The clustering position encoded as a byte-comparable sequence.
+     */
+    public <V> ClusteringBoundary<V> boundaryFromByteComparable(ValueAccessor<V> accessor, ByteComparable comparable)
+    {
+        ByteComparable.Version version = ByteComparable.Version.OSS42;
+        ByteSource.Peekable orderedBytes = ByteSource.peekable(comparable.asComparableBytes(version));
+
+        int sep = orderedBytes.next();
+        int cc = 0;
+        V[] components = accessor.createArray(size());
+
+        while (true)
+        {
+            switch (sep)
+            {
+            case NEXT_COMPONENT_NULL:
+                components[cc] = null;
+                break;
+            case NEXT_COMPONENT_EMPTY:
+            case NEXT_COMPONENT_EMPTY_REVERSED:
+                components[cc] = subtype(cc).fromComparableBytes(accessor, null, version);
+                break;
+            case NEXT_COMPONENT:
+                // Decode the next component, consuming bytes from orderedBytes.
+                components[cc] = subtype(cc).fromComparableBytes(accessor, orderedBytes, version);
+                break;
+            case ByteSource.LT_NEXT_COMPONENT:
+                return accessor.factory().boundary(ClusteringPrefix.Kind.EXCL_END_INCL_START_BOUNDARY,
+                                                   Arrays.copyOf(components, cc));
+            case ByteSource.GT_NEXT_COMPONENT:
+                return accessor.factory().boundary(ClusteringPrefix.Kind.INCL_END_EXCL_START_BOUNDARY,
+                                                   Arrays.copyOf(components, cc));
+            default:
+                throw new AssertionError("Unexpected separator " + Integer.toHexString(sep) + " in ClusteringBoundary encoding");
+            }
+            ++cc;
+            sep = orderedBytes.next();
+        }
+    }
+
+    /**
      * A comparator for rows.
      *
      * A {@code Row} is a {@code Clusterable} so {@code ClusteringComparator} can be used
diff --git a/src/java/org/apache/cassandra/db/ClusteringPrefix.java b/src/java/org/apache/cassandra/db/ClusteringPrefix.java
index a1291c8..c7a2782 100644
--- a/src/java/org/apache/cassandra/db/ClusteringPrefix.java
+++ b/src/java/org/apache/cassandra/db/ClusteringPrefix.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.*;
+import java.util.function.ToIntFunction;
 
 import org.apache.cassandra.cache.IMeasurableMemory;
 import org.apache.cassandra.config.*;
@@ -34,6 +35,8 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.ByteArrayUtil;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable.Version;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 
 /**
  * A clustering prefix is the unit of what a {@link ClusteringComparator} can compare.
@@ -62,14 +65,19 @@
     {
         // WARNING: the ordering of that enum matters because we use ordinal() in the serialization
 
-        EXCL_END_BOUND              (0, -1),
-        INCL_START_BOUND            (0, -1),
-        EXCL_END_INCL_START_BOUNDARY(0, -1),
-        STATIC_CLUSTERING           (1, -1),
-        CLUSTERING                  (2,  0),
-        INCL_END_EXCL_START_BOUNDARY(3,  1),
-        INCL_END_BOUND              (3,  1),
-        EXCL_START_BOUND            (3,  1);
+        EXCL_END_BOUND              (0, -1, v -> ByteSource.LT_NEXT_COMPONENT),
+        INCL_START_BOUND            (0, -1, v -> ByteSource.LT_NEXT_COMPONENT),
+        EXCL_END_INCL_START_BOUNDARY(0, -1, v -> ByteSource.LT_NEXT_COMPONENT),
+        STATIC_CLUSTERING           (1, -1, v -> v == Version.LEGACY
+                                                 ? ByteSource.LT_NEXT_COMPONENT + 1
+                                                 : ByteSource.EXCLUDED),
+        CLUSTERING                  (2,  0, v -> v == Version.LEGACY
+                                                 ? ByteSource.NEXT_COMPONENT
+                                                 : ByteSource.TERMINATOR),
+        INCL_END_EXCL_START_BOUNDARY(3,  1, v -> ByteSource.GT_NEXT_COMPONENT),
+        INCL_END_BOUND              (3,  1, v -> ByteSource.GT_NEXT_COMPONENT),
+        EXCL_START_BOUND            (3,  1, v -> ByteSource.GT_NEXT_COMPONENT);
+
 
         private final int comparison;
 
@@ -79,10 +87,13 @@
          */
         public final int comparedToClustering;
 
-        Kind(int comparison, int comparedToClustering)
+        public final ToIntFunction<Version> asByteComparable;
+
+        Kind(int comparison, int comparedToClustering, ToIntFunction<Version> asByteComparable)
         {
             this.comparison = comparison;
             this.comparedToClustering = comparedToClustering;
+            this.asByteComparable = asByteComparable;
         }
 
         /**
@@ -197,6 +208,16 @@
                  ? (this == INCL_END_EXCL_START_BOUNDARY ? INCL_END_BOUND : EXCL_END_BOUND)
                  : (this == INCL_END_EXCL_START_BOUNDARY ? EXCL_START_BOUND : INCL_START_BOUND);
         }
+
+        /*
+         * Returns a terminator value for this clustering type that is suitable for byte comparison.
+         * Inclusive starts / exclusive ends need a lower value than ByteSource.NEXT_COMPONENT and the clustering byte,
+         * exclusive starts / inclusive ends -- a higher.
+         */
+        public int asByteComparableValue(Version version)
+        {
+            return asByteComparable.applyAsInt(version);
+        }
     }
 
     default boolean isBottom()
@@ -308,6 +329,24 @@
             values[i] = accessor().toBuffer(get(i));
         return CompositeType.build(ByteBufferAccessor.instance, values);
     }
+
+    /**
+     * Produce a human-readable representation of the clustering given the list of types.
+     * Easier to access than metadata for debugging.
+     */
+    public default String clusteringString(List<AbstractType<?>> types)
+    {
+        StringBuilder sb = new StringBuilder();
+        sb.append(kind()).append('(');
+        for (int i = 0; i < size(); i++)
+        {
+            if (i > 0)
+                sb.append(", ");
+            sb.append(types.get(i).getString(get(i), accessor()));
+        }
+        return sb.append(')').toString();
+    }
+
     /**
      * The values of this prefix as an array.
      * <p>
diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
index a40e5c7..94ca180 100644
--- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
@@ -22,7 +22,7 @@
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.nio.ByteBuffer;
-import java.nio.file.Files;
+import java.nio.file.Path;
 import java.time.Instant;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -155,6 +155,7 @@
 import org.apache.cassandra.service.paxos.Ballot;
 import org.apache.cassandra.service.paxos.PaxosRepairHistory;
 import org.apache.cassandra.service.paxos.TablePaxosRepairHistory;
+import org.apache.cassandra.service.snapshot.SnapshotLoader;
 import org.apache.cassandra.service.snapshot.SnapshotManifest;
 import org.apache.cassandra.service.snapshot.TableSnapshot;
 import org.apache.cassandra.streaming.TableStreamManager;
@@ -1698,9 +1699,9 @@
     /**
      * Rewrites all SSTables according to specified parameters
      *
-     * @param skipIfCurrentVersion - if {@link true}, will rewrite only SSTables that have version older than the current one ({@link BigFormat#latestVersion})
+     * @param skipIfCurrentVersion - if {@link true}, will rewrite only SSTables that have version older than the current one ({@link org.apache.cassandra.io.sstable.format.big.BigFormat#latestVersion})
      * @param skipIfNewerThanTimestamp - max timestamp (local creation time) for SSTable; SSTables created _after_ this timestamp will be excluded from compaction
-     * @param skipIfCompressionMatches - if {@link true}, will rewrite only SSTables whose compression parameters are different from {@link CFMetaData#compressionParams()}
+     * @param skipIfCompressionMatches - if {@link true}, will rewrite only SSTables whose compression parameters are different from {@link TableMetadata#params#getCompressionParameters()} ()}
      * @param jobs number of jobs for parallel execution
      */
     public CompactionManager.AllSSTableOpStatus sstablesRewrite(final boolean skipIfCurrentVersion,
@@ -2039,7 +2040,7 @@
                                          .collect(Collectors.toCollection(HashSet::new));
 
         // Create and write snapshot manifest
-        SnapshotManifest manifest = new SnapshotManifest(mapToDataFilenames(sstables), ttl, creationTime);
+        SnapshotManifest manifest = new SnapshotManifest(mapToDataFilenames(sstables), ttl, creationTime, ephemeral);
         File manifestFile = getDirectories().getSnapshotManifestFile(tag);
         writeSnapshotManifest(manifest, manifestFile);
         snapshotDirs.add(manifestFile.parent().toAbsolute()); // manifest may create empty snapshot dir
@@ -2052,16 +2053,9 @@
             snapshotDirs.add(schemaFile.parent().toAbsolute()); // schema may create empty snapshot dir
         }
 
-        // Maybe create ephemeral marker
-        if (ephemeral)
-        {
-            File ephemeralSnapshotMarker = getDirectories().getNewEphemeralSnapshotMarkerFile(tag);
-            createEphemeralSnapshotMarkerFile(tag, ephemeralSnapshotMarker);
-            snapshotDirs.add(ephemeralSnapshotMarker.parent().toAbsolute()); // marker may create empty snapshot dir
-        }
-
-        TableSnapshot snapshot = new TableSnapshot(metadata.keyspace, metadata.name, metadata.id.asUUID(), tag,
-                                                   manifest.createdAt, manifest.expiresAt, snapshotDirs);
+        TableSnapshot snapshot = new TableSnapshot(metadata.keyspace, metadata.name, metadata.id.asUUID(),
+                                                   tag, manifest.createdAt, manifest.expiresAt, snapshotDirs,
+                                                   manifest.ephemeral);
 
         StorageService.instance.addSnapshot(snapshot);
         return snapshot;
@@ -2106,34 +2100,19 @@
         }
     }
 
-    private void createEphemeralSnapshotMarkerFile(final String snapshot, File ephemeralSnapshotMarker)
-    {
-        try
-        {
-            if (!ephemeralSnapshotMarker.parent().exists())
-                ephemeralSnapshotMarker.parent().tryCreateDirectories();
-
-            Files.createFile(ephemeralSnapshotMarker.toPath());
-            if (logger.isTraceEnabled())
-                logger.trace("Created ephemeral snapshot marker file on {}.", ephemeralSnapshotMarker.absolutePath());
-        }
-        catch (IOException e)
-        {
-            logger.warn(String.format("Could not create marker file %s for ephemeral snapshot %s. " +
-                                      "In case there is a failure in the operation that created " +
-                                      "this snapshot, you may need to clean it manually afterwards.",
-                                      ephemeralSnapshotMarker.absolutePath(), snapshot), e);
-        }
-    }
-
     protected static void clearEphemeralSnapshots(Directories directories)
     {
         RateLimiter clearSnapshotRateLimiter = DatabaseDescriptor.getSnapshotRateLimiter();
 
-        for (String ephemeralSnapshot : directories.listEphemeralSnapshots())
+        List<TableSnapshot> ephemeralSnapshots = new SnapshotLoader(directories).loadSnapshots()
+                                                                                .stream()
+                                                                                .filter(TableSnapshot::isEphemeral)
+                                                                                .collect(Collectors.toList());
+
+        for (TableSnapshot ephemeralSnapshot : ephemeralSnapshots)
         {
-            logger.trace("Clearing ephemeral snapshot {} leftover from previous session.", ephemeralSnapshot);
-            Directories.clearSnapshot(ephemeralSnapshot, directories.getCFDirectories(), clearSnapshotRateLimiter);
+            logger.trace("Clearing ephemeral snapshot {} leftover from previous session.", ephemeralSnapshot.getId());
+            Directories.clearSnapshot(ephemeralSnapshot.getTag(), directories.getCFDirectories(), clearSnapshotRateLimiter);
         }
     }
 
diff --git a/src/java/org/apache/cassandra/db/DataRange.java b/src/java/org/apache/cassandra/db/DataRange.java
index 52162be..9912ac5 100644
--- a/src/java/org/apache/cassandra/db/DataRange.java
+++ b/src/java/org/apache/cassandra/db/DataRange.java
@@ -27,6 +27,7 @@
 import org.apache.cassandra.dht.*;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
 
 /**
  * Groups both the range of partitions to query, and the clustering index filter to
@@ -139,6 +140,34 @@
     }
 
     /**
+     * The start of the partition key range queried by this {@code DataRange}.
+     *
+     * @return the start of the partition key range expressed as a ByteComparable.
+     */
+    public ByteComparable startAsByteComparable()
+    {
+        PartitionPosition bound = keyRange.left;
+        if (bound.isMinimum())
+            return null;
+
+        return bound.asComparableBound(keyRange.inclusiveLeft());
+    }
+
+    /**
+     * The end of the partition key range queried by this {@code DataRange}.
+     *
+     * @return the end of the partition key range expressed as a ByteComparable.
+     */
+    public ByteComparable stopAsByteComparable()
+    {
+        PartitionPosition bound = keyRange.right;
+        if (bound.isMinimum())
+            return null;
+
+        return bound.asComparableBound(!keyRange.inclusiveRight());
+    }
+
+    /**
      * Whether the underlying clustering index filter is a names filter or not.
      *
      * @return Whether the underlying clustering index filter is a names filter or not.
diff --git a/src/java/org/apache/cassandra/db/DecoratedKey.java b/src/java/org/apache/cassandra/db/DecoratedKey.java
index 4dd87d0..569c86d 100644
--- a/src/java/org/apache/cassandra/db/DecoratedKey.java
+++ b/src/java/org/apache/cassandra/db/DecoratedKey.java
@@ -21,6 +21,7 @@
 import java.util.Comparator;
 import java.util.List;
 import java.util.StringJoiner;
+import java.util.function.BiFunction;
 
 import org.apache.cassandra.db.marshal.CompositeType;
 import org.apache.cassandra.dht.IPartitioner;
@@ -29,8 +30,11 @@
 import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.MurmurHash;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 import org.apache.cassandra.utils.IFilter.FilterKey;
+import org.apache.cassandra.utils.MurmurHash;
 
 /**
  * Represents a decorated key, handy for certain operations
@@ -102,6 +106,37 @@
         return cmp == 0 ? ByteBufferUtil.compareUnsigned(key, otherKey.getKey()) : cmp;
     }
 
+    @Override
+    public ByteSource asComparableBytes(Version version)
+    {
+        // Note: In the legacy version one encoding could be a prefix of another as the escaping is only weakly
+        // prefix-free (see ByteSourceTest.testDecoratedKeyPrefixes()).
+        // The OSS42 version avoids this by adding a terminator.
+        return ByteSource.withTerminatorMaybeLegacy(version,
+                                                    ByteSource.END_OF_STREAM,
+                                                    token.asComparableBytes(version),
+                                                    keyComparableBytes(version));
+    }
+
+    @Override
+    public ByteComparable asComparableBound(boolean before)
+    {
+        return version ->
+        {
+            assert (version != Version.LEGACY) : "Decorated key bounds are not supported by the legacy encoding.";
+
+            return ByteSource.withTerminator(
+                    before ? ByteSource.LT_NEXT_COMPONENT : ByteSource.GT_NEXT_COMPONENT,
+                    token.asComparableBytes(version),
+                    keyComparableBytes(version));
+        };
+    }
+
+    protected ByteSource keyComparableBytes(Version version)
+    {
+        return ByteSource.of(getKey(), version);
+    }
+
     public IPartitioner getPartitioner()
     {
         return getToken().getPartitioner();
@@ -169,4 +204,39 @@
         ByteBuffer key = getKey();
         MurmurHash.hash3_x64_128(key, key.position(), key.remaining(), 0, dest);
     }
+
+    /**
+     * A template factory method for creating decorated keys from their byte-comparable representation.
+     */
+    static <T extends DecoratedKey> T fromByteComparable(ByteComparable byteComparable,
+                                                         Version version,
+                                                         IPartitioner partitioner,
+                                                         BiFunction<Token, byte[], T> decoratedKeyFactory)
+    {
+        ByteSource.Peekable peekable = ByteSource.peekable(byteComparable.asComparableBytes(version));
+        // Decode the token from the first component of the multi-component sequence representing the whole decorated key.
+        Token token = partitioner.getTokenFactory().fromComparableBytes(ByteSourceInverse.nextComponentSource(peekable), version);
+        // Decode the key bytes from the second component.
+        byte[] keyBytes = ByteSourceInverse.getUnescapedBytes(ByteSourceInverse.nextComponentSource(peekable));
+        // Consume the terminator byte.
+        int terminator = peekable.next();
+        assert terminator == ByteSource.TERMINATOR : "Decorated key encoding must end in terminator.";
+        // Instantiate a decorated key from the decoded token and key bytes, using the provided factory method.
+        return decoratedKeyFactory.apply(token, keyBytes);
+    }
+
+    public static byte[] keyFromByteSource(ByteSource.Peekable peekableByteSource,
+                                           Version version,
+                                           IPartitioner partitioner)
+    {
+        assert version != Version.LEGACY;   // reverse translation is not supported for LEGACY version.
+        // Decode the token from the first component of the multi-component sequence representing the whole decorated key.
+        // We won't use it, but the decoding also positions the byte source after it.
+        partitioner.getTokenFactory().fromComparableBytes(ByteSourceInverse.nextComponentSource(peekableByteSource), version);
+        // Decode the key bytes from the second component.
+        byte[] keyBytes = ByteSourceInverse.getUnescapedBytes(ByteSourceInverse.nextComponentSource(peekableByteSource));
+        int terminator = peekableByteSource.next();
+        assert terminator == ByteSource.TERMINATOR : "Decorated key encoding must end in terminator.";
+        return keyBytes;
+    }
 }
diff --git a/src/java/org/apache/cassandra/db/Directories.java b/src/java/org/apache/cassandra/db/Directories.java
index 079b294..e062682 100644
--- a/src/java/org/apache/cassandra/db/Directories.java
+++ b/src/java/org/apache/cassandra/db/Directories.java
@@ -572,17 +572,6 @@
         return new File(snapshotDir, "schema.cql");
     }
 
-    public File getNewEphemeralSnapshotMarkerFile(String snapshotName)
-    {
-        File snapshotDir = new File(getWriteableLocationAsFile(1L), join(SNAPSHOT_SUBDIR, snapshotName));
-        return getEphemeralSnapshotMarkerFile(snapshotDir);
-    }
-
-    private static File getEphemeralSnapshotMarkerFile(File snapshotDirectory)
-    {
-        return new File(snapshotDirectory, "ephemeral.snapshot");
-    }
-
     public static File getBackupsDirectory(Descriptor desc)
     {
         return getBackupsDirectory(desc.directory);
@@ -983,18 +972,25 @@
         return snapshots;
     }
 
-    protected TableSnapshot buildSnapshot(String tag, SnapshotManifest manifest, Set<File> snapshotDirs) {
+    private TableSnapshot buildSnapshot(String tag, SnapshotManifest manifest, Set<File> snapshotDirs)
+    {
+        boolean ephemeral = manifest != null ? manifest.isEphemeral() : isLegacyEphemeralSnapshot(snapshotDirs);
         Instant createdAt = manifest == null ? null : manifest.createdAt;
         Instant expiresAt = manifest == null ? null : manifest.expiresAt;
         return new TableSnapshot(metadata.keyspace, metadata.name, metadata.id.asUUID(), tag, createdAt, expiresAt,
-                                 snapshotDirs);
+                                 snapshotDirs, ephemeral);
+    }
+
+    private static boolean isLegacyEphemeralSnapshot(Set<File> snapshotDirs)
+    {
+        return snapshotDirs.stream().map(d -> new File(d, "ephemeral.snapshot")).anyMatch(File::exists);
     }
 
     @VisibleForTesting
     protected static SnapshotManifest maybeLoadManifest(String keyspace, String table, String tag, Set<File> snapshotDirs)
     {
         List<File> manifests = snapshotDirs.stream().map(d -> new File(d, "manifest.json"))
-                                           .filter(d -> d.exists()).collect(Collectors.toList());
+                                           .filter(File::exists).collect(Collectors.toList());
 
         if (manifests.isEmpty())
         {
@@ -1018,42 +1014,6 @@
         return null;
     }
 
-    public List<String> listEphemeralSnapshots()
-    {
-        final List<String> ephemeralSnapshots = new LinkedList<>();
-        for (File snapshot : listAllSnapshots())
-        {
-            if (getEphemeralSnapshotMarkerFile(snapshot).exists())
-                ephemeralSnapshots.add(snapshot.name());
-        }
-        return ephemeralSnapshots;
-    }
-
-    private List<File> listAllSnapshots()
-    {
-        final List<File> snapshots = new LinkedList<>();
-        for (final File dir : dataPaths)
-        {
-            File snapshotDir = isSecondaryIndexFolder(dir)
-                               ? new File(dir.parentPath(), SNAPSHOT_SUBDIR)
-                               : new File(dir, SNAPSHOT_SUBDIR);
-            if (snapshotDir.exists() && snapshotDir.isDirectory())
-            {
-                final File[] snapshotDirs  = snapshotDir.tryList();
-                if (snapshotDirs != null)
-                {
-                    for (final File snapshot : snapshotDirs)
-                    {
-                        if (snapshot.isDirectory())
-                            snapshots.add(snapshot);
-                    }
-                }
-            }
-        }
-
-        return snapshots;
-    }
-
     @VisibleForTesting
     protected Map<String, Set<File>> listSnapshotDirsByTag()
     {
diff --git a/src/java/org/apache/cassandra/db/NativeDecoratedKey.java b/src/java/org/apache/cassandra/db/NativeDecoratedKey.java
index add5218..e9a564a 100644
--- a/src/java/org/apache/cassandra/db/NativeDecoratedKey.java
+++ b/src/java/org/apache/cassandra/db/NativeDecoratedKey.java
@@ -20,7 +20,11 @@
 import java.nio.ByteBuffer;
 import java.nio.ByteOrder;
 
+import net.nicoulaj.compilecommand.annotations.Inline;
+import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 import org.apache.cassandra.utils.concurrent.OpOrder;
 import org.apache.cassandra.utils.memory.MemoryUtil;
 import org.apache.cassandra.utils.memory.NativeAllocator;
@@ -41,8 +45,38 @@
         MemoryUtil.setBytes(peer + 4, key);
     }
 
+    public NativeDecoratedKey(Token token, NativeAllocator allocator, OpOrder.Group writeOp, byte[] keyBytes)
+    {
+        super(token);
+        assert keyBytes != null;
+
+        int size = keyBytes.length;
+        this.peer = allocator.allocate(4 + size, writeOp);
+        MemoryUtil.setInt(peer, size);
+        MemoryUtil.setBytes(peer + 4, keyBytes, 0, size);
+    }
+
+    @Inline
+    int length()
+    {
+        return MemoryUtil.getInt(peer);
+    }
+
+    @Inline
+    long address()
+    {
+        return this.peer + 4;
+    }
+
+    @Override
     public ByteBuffer getKey()
     {
-        return MemoryUtil.getByteBuffer(peer + 4, MemoryUtil.getInt(peer), ByteOrder.BIG_ENDIAN);
+        return MemoryUtil.getByteBuffer(address(), length(), ByteOrder.BIG_ENDIAN);
+    }
+
+    @Override
+    protected ByteSource keyComparableBytes(Version version)
+    {
+        return ByteSource.ofMemory(address(), length(), version);
     }
 }
diff --git a/src/java/org/apache/cassandra/db/PartitionPosition.java b/src/java/org/apache/cassandra/db/PartitionPosition.java
index 3b45c6c..5e1d618 100644
--- a/src/java/org/apache/cassandra/db/PartitionPosition.java
+++ b/src/java/org/apache/cassandra/db/PartitionPosition.java
@@ -24,8 +24,10 @@
 import org.apache.cassandra.dht.*;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 
-public interface PartitionPosition extends RingPosition<PartitionPosition>
+public interface PartitionPosition extends RingPosition<PartitionPosition>, ByteComparable
 {
     public static enum Kind
     {
@@ -54,6 +56,27 @@
     public Kind kind();
     public boolean isMinimum();
 
+    /**
+     * Produce a prefix-free byte-comparable representation of the key, i.e. such a sequence of bytes that any pair x, y
+     * of valid positions (with the same key column types and partitioner),
+     *   x.compareTo(y) == compareLexicographicallyUnsigned(x.asComparableBytes(), y.asComparableBytes())
+     * and
+     *   x.asComparableBytes() is not a prefix of y.asComparableBytes()
+     *
+     * We use a two-component tuple for decorated keys, and a one-component tuple for key bounds, where the terminator
+     * byte is chosen to yield the correct comparison result. No decorated key can be a prefix of another (per the tuple
+     * encoding), and no key bound can be a prefix of one because it uses a terminator byte that is different from the
+     * tuple separator.
+     */
+    public abstract ByteSource asComparableBytes(Version version);
+
+    /**
+     * Produce a byte-comparable representation for the position before or after the key.
+     * This does nothing for token boundaries (which are already at a position between valid keys), and changes
+     * the terminator byte for keys.
+     */
+    public abstract ByteComparable asComparableBound(boolean before);
+
     public static class RowPositionSerializer implements IPartitionerDependentSerializer<PartitionPosition>
     {
         /*
diff --git a/src/java/org/apache/cassandra/db/SnapshotDetailsTabularData.java b/src/java/org/apache/cassandra/db/SnapshotDetailsTabularData.java
index 4e6ab11..c091ebd 100644
--- a/src/java/org/apache/cassandra/db/SnapshotDetailsTabularData.java
+++ b/src/java/org/apache/cassandra/db/SnapshotDetailsTabularData.java
@@ -32,7 +32,8 @@
             "True size",
             "Size on disk",
             "Creation time",
-            "Expiration time",};
+            "Expiration time",
+            "Ephemeral"};
 
     private static final String[] ITEM_DESCS = new String[]{"snapshot_name",
             "keyspace_name",
@@ -40,7 +41,8 @@
             "TrueDiskSpaceUsed",
             "TotalDiskSpaceUsed",
             "created_at",
-            "expires_at",};
+            "expires_at",
+            "ephemeral"};
 
     private static final String TYPE_NAME = "SnapshotDetails";
 
@@ -56,7 +58,7 @@
     {
         try
         {
-            ITEM_TYPES = new OpenType[]{ SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING };
+            ITEM_TYPES = new OpenType[]{ SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING };
 
             COMPOSITE_TYPE = new CompositeType(TYPE_NAME, ROW_DESC, ITEM_NAMES, ITEM_DESCS, ITEM_TYPES);
 
@@ -77,8 +79,9 @@
             final String liveSize =  FileUtils.stringifyFileSize(details.computeTrueSizeBytes());
             String createdAt = safeToString(details.getCreatedAt());
             String expiresAt = safeToString(details.getExpiresAt());
+            String ephemeral = Boolean.toString(details.isEphemeral());
             result.put(new CompositeDataSupport(COMPOSITE_TYPE, ITEM_NAMES,
-                    new Object[]{ details.getTag(), details.getKeyspaceName(), details.getTableName(), liveSize, totalSize, createdAt, expiresAt }));
+                    new Object[]{ details.getTag(), details.getKeyspaceName(), details.getTableName(), liveSize, totalSize, createdAt, expiresAt, ephemeral }));
         }
         catch (OpenDataException e)
         {
diff --git a/src/java/org/apache/cassandra/db/SystemKeyspace.java b/src/java/org/apache/cassandra/db/SystemKeyspace.java
index 6fbbc3e..a1013e7 100644
--- a/src/java/org/apache/cassandra/db/SystemKeyspace.java
+++ b/src/java/org/apache/cassandra/db/SystemKeyspace.java
@@ -65,6 +65,7 @@
 import org.apache.cassandra.cql3.statements.schema.CreateTableStatement;
 import org.apache.cassandra.db.commitlog.CommitLogPosition;
 import org.apache.cassandra.db.compaction.CompactionHistoryTabularData;
+import org.apache.cassandra.db.marshal.ByteBufferAccessor;
 import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.db.marshal.LongType;
 import org.apache.cassandra.db.marshal.TimeUUIDType;
@@ -1875,7 +1876,7 @@
             TupleType tupleType = new TupleType(Lists.newArrayList(UTF8Type.instance, LongType.instance));
             for (ByteBuffer bb : top)
             {
-                ByteBuffer[] components = tupleType.split(bb);
+                ByteBuffer[] components = tupleType.split(ByteBufferAccessor.instance, bb);
                 String keyStr = UTF8Type.instance.compose(components[0]);
                 long value = LongType.instance.compose(components[1]);
                 topPartitions.add(new TopPartitionTracker.TopPartition(metadata.partitioner.decorateKey(metadata.partitionKeyType.fromString(keyStr)), value));
diff --git a/src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java b/src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java
index d4362f7..ddcbcf9 100644
--- a/src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java
+++ b/src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java
@@ -48,6 +48,7 @@
         super(sstable, file, key, indexEntry, slices, columns, ifile);
     }
 
+    @SuppressWarnings("resource") // caller to close
     protected Reader createReaderInternal(RowIndexEntry indexEntry, FileDataInput file, boolean shouldCloseFile)
     {
         return indexEntry.isIndexed()
diff --git a/src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java b/src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java
index a60aafa..37db6d9 100644
--- a/src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java
+++ b/src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java
@@ -52,6 +52,7 @@
         super(sstable, file, key, indexEntry, slices, columns, ifile);
     }
 
+    @SuppressWarnings("resource") // caller to close
     protected Reader createReaderInternal(RowIndexEntry indexEntry, FileDataInput file, boolean shouldCloseFile)
     {
         return indexEntry.isIndexed()
diff --git a/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java b/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java
index d8eb0e7..e6cc2fa 100644
--- a/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java
+++ b/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java
@@ -195,7 +195,7 @@
                     interrupted = true;
                 }
             }
-            
+
             if (interrupted)
             {
                 discardAvailableSegment();
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLog.java b/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
index a832b5e..eb94519 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
@@ -430,11 +430,7 @@
     @Override
     public void setCDCBlockWrites(boolean val)
     {
-        Preconditions.checkState(DatabaseDescriptor.isCDCEnabled(),
-                                 "Unable to set block_writes (%s): CDC is not enabled.", val);
-        Preconditions.checkState(segmentManager instanceof CommitLogSegmentManagerCDC,
-                                 "CDC is enabled but we have the wrong CommitLogSegmentManager type: %s. " +
-                                 "Please report this as bug.", segmentManager.getClass().getName());
+        ensureCDCEnabled("Unable to set block_writes.");
         boolean oldVal = DatabaseDescriptor.getCDCBlockWrites();
         CommitLogSegment currentSegment = segmentManager.allocatingFrom();
         // Update the current segment CDC state to PERMITTED if block_writes is disabled now, and it was in FORBIDDEN state
@@ -444,6 +440,29 @@
         logger.info("Updated CDC block_writes from {} to {}", oldVal, val);
     }
 
+
+    @Override
+    public boolean isCDCOnRepairEnabled()
+    {
+        return DatabaseDescriptor.isCDCOnRepairEnabled();
+    }
+
+    @Override
+    public void setCDCOnRepairEnabled(boolean value)
+    {
+        ensureCDCEnabled("Unable to set cdc_on_repair_enabled.");
+        DatabaseDescriptor.setCDCOnRepairEnabled(value);
+        logger.info("Set cdc_on_repair_enabled to {}", value);
+    }
+
+    private void ensureCDCEnabled(String hint)
+    {
+        Preconditions.checkState(DatabaseDescriptor.isCDCEnabled(), "CDC is not enabled. %s", hint);
+        Preconditions.checkState(segmentManager instanceof CommitLogSegmentManagerCDC,
+                                 "CDC is enabled but we have the wrong CommitLogSegmentManager type: %s. " +
+                                 "Please report this as bug.", segmentManager.getClass().getName());
+    }
+
     /**
      * Shuts down the threads used by the commit log, blocking until completion.
      * TODO this should accept a timeout, and throw TimeoutException
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java
index 7e8deca..189916c 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java
@@ -88,4 +88,10 @@
     public boolean getCDCBlockWrites();
 
     public void setCDCBlockWrites(boolean val);
+
+    /** Returns true if internodes streaming of CDC data should go through write path */
+    boolean isCDCOnRepairEnabled();
+
+    /** Set whether enable write path for CDC data during internodes streaming, e.g. repair */
+    void setCDCOnRepairEnabled(boolean value);
 }
diff --git a/src/java/org/apache/cassandra/db/guardrails/DisableFlag.java b/src/java/org/apache/cassandra/db/guardrails/DisableFlag.java
deleted file mode 100644
index 9ec1951..0000000
--- a/src/java/org/apache/cassandra/db/guardrails/DisableFlag.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.db.guardrails;
-
-import java.util.function.Predicate;
-import javax.annotation.Nullable;
-
-import org.apache.cassandra.service.ClientState;
-
-/**
- * A guardrail that completely disables the use of a particular feature.
- *
- * <p>Note that this guardrail only aborts operations (if the feature is disabled) so is only meant for
- * query-based guardrails (we're happy to reject queries deemed dangerous, but we don't want to create a guardrail
- * that breaks compaction for instance).
- */
-public class DisableFlag extends Guardrail
-{
-    private final Predicate<ClientState> disabled;
-    private final String what;
-
-    /**
-     * Creates a new {@link DisableFlag} guardrail.
-     *
-     * @param name     the identifying name of the guardrail
-     * @param disabled a {@link ClientState}-based supplier of boolean indicating whether the feature guarded by this
-     *                 guardrail must be disabled.
-     * @param what     The feature that is guarded by this guardrail (for reporting in error messages),
-     *                 {@link DisableFlag#ensureEnabled(String, ClientState)} can specify a different {@code what}.
-     */
-    public DisableFlag(String name, Predicate<ClientState> disabled, String what)
-    {
-        super(name);
-        this.disabled = disabled;
-        this.what = what;
-    }
-
-    /**
-     * Aborts the operation if this guardrail is disabled.
-     *
-     * <p>This must be called when the feature guarded by this guardrail is used to ensure such use is in fact
-     * allowed.
-     *
-     * @param state The client state, used to skip the check if the query is internal or is done by a superuser.
-     *              A {@code null} value means that the check should be done regardless of the query.
-     */
-    public void ensureEnabled(@Nullable ClientState state)
-    {
-        ensureEnabled(what, state);
-    }
-
-    /**
-     * Aborts the operation if this guardrail is disabled.
-     *
-     * <p>This must be called when the feature guarded by this guardrail is used to ensure such use is in fact
-     * allowed.
-     *
-     * @param what  The feature that is guarded by this guardrail (for reporting in error messages).
-     * @param state The client state, used to skip the check if the query is internal or is done by a superuser.
-     *              A {@code null} value means that the check should be done regardless of the query, although it won't
-     *              throw any exception if the failure threshold is exceeded. This is so because checks without an
-     *              associated client come from asynchronous processes such as compaction, and we don't want to
-     *              interrupt such processes.
-     */
-    public void ensureEnabled(String what, @Nullable ClientState state)
-    {
-        if (enabled(state) && disabled.test(state))
-            fail(what + " is not allowed", state);
-    }
-}
diff --git a/src/java/org/apache/cassandra/db/guardrails/EnableFlag.java b/src/java/org/apache/cassandra/db/guardrails/EnableFlag.java
new file mode 100644
index 0000000..aba013a
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/guardrails/EnableFlag.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.function.Predicate;
+import javax.annotation.Nullable;
+
+import org.apache.cassandra.service.ClientState;
+
+/**
+ * A guardrail that enables the use of a particular feature.
+ *
+ * <p>Note that this guardrail only aborts operations (if the feature is not enabled) so is only meant for query-based
+ * guardrails (we're happy to reject queries deemed dangerous, but we don't want to create a guardrail that breaks
+ * compaction for instance).
+ */
+public class EnableFlag extends Guardrail
+{
+    private final Predicate<ClientState> enabled;
+    private final String featureName;
+
+    /**
+     * Creates a new {@link EnableFlag} guardrail.
+     *
+     * @param name        the identifying name of the guardrail
+     * @param enabled     a {@link ClientState}-based supplier of boolean indicating whether the feature guarded by this
+     *                    guardrail is enabled.
+     * @param featureName The feature that is guarded by this guardrail (for reporting in error messages), {@link
+     *                    EnableFlag#ensureEnabled(String, ClientState)} can specify a different {@code featureName}.
+     */
+    public EnableFlag(String name, Predicate<ClientState> enabled, String featureName)
+    {
+        super(name);
+        this.enabled = enabled;
+        this.featureName = featureName;
+    }
+
+    /**
+     * Aborts the operation if this guardrail is not enabled.
+     *
+     * <p>This must be called when the feature guarded by this guardrail is used to ensure such use is in fact
+     * allowed.
+     *
+     * @param state The client state, used to skip the check if the query is internal or is done by a superuser.
+     *              A {@code null} value means that the check should be done regardless of the query.
+     */
+    public void ensureEnabled(@Nullable ClientState state)
+    {
+        ensureEnabled(featureName, state);
+    }
+
+    /**
+     * Aborts the operation if this guardrail is not enabled.
+     *
+     * <p>This must be called when the feature guarded by this guardrail is used to ensure such use is in fact
+     * allowed.
+     *
+     * @param featureName The feature that is guarded by this guardrail (for reporting in error messages).
+     * @param state       The client state, used to skip the check if the query is internal or is done by a superuser. A
+     *                    {@code null} value means that the check should be done regardless of the query, although it
+     *                    won't throw any exception if the failure threshold is exceeded. This is so because checks
+     *                    without an associated client come from asynchronous processes such as compaction, and we don't
+     *                    want to interrupt such processes.
+     */
+    public void ensureEnabled(String featureName, @Nullable ClientState state)
+    {
+        if (enabled(state) && !enabled.test(state))
+            fail(featureName + " is not allowed", state);
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/guardrails/Guardrails.java b/src/java/org/apache/cassandra/db/guardrails/Guardrails.java
index 9d08ab0..1381655 100644
--- a/src/java/org/apache/cassandra/db/guardrails/Guardrails.java
+++ b/src/java/org/apache/cassandra/db/guardrails/Guardrails.java
@@ -104,10 +104,10 @@
     /**
      * Guardrail disabling user's ability to create secondary indexes
      */
-    public static final DisableFlag createSecondaryIndexesEnabled =
-    new DisableFlag("secondary_indexes",
-                    state -> !CONFIG_PROVIDER.getOrCreate(state).getSecondaryIndexesEnabled(),
-                    "User creation of secondary indexes");
+    public static final EnableFlag createSecondaryIndexesEnabled =
+    new EnableFlag("secondary_indexes",
+                   state -> CONFIG_PROVIDER.getOrCreate(state).getSecondaryIndexesEnabled(),
+                   "User creation of secondary indexes");
 
     /**
      * Guardrail on the number of materialized views per table.
@@ -135,36 +135,55 @@
     /**
      * Guardrail disabling user-provided timestamps.
      */
-    public static final DisableFlag userTimestampsEnabled =
-    new DisableFlag("user_timestamps",
-                    state -> !CONFIG_PROVIDER.getOrCreate(state).getUserTimestampsEnabled(),
-                    "User provided timestamps (USING TIMESTAMP)");
+    public static final EnableFlag userTimestampsEnabled =
+    new EnableFlag("user_timestamps",
+                   state -> CONFIG_PROVIDER.getOrCreate(state).getUserTimestampsEnabled(),
+                   "User provided timestamps (USING TIMESTAMP)");
 
-    public static final DisableFlag groupByEnabled =
-    new DisableFlag("group_by",
-                    state -> !CONFIG_PROVIDER.getOrCreate(state).getGroupByEnabled(),
-                    "GROUP BY functionality");
+    public static final EnableFlag groupByEnabled =
+    new EnableFlag("group_by",
+                   state -> CONFIG_PROVIDER.getOrCreate(state).getGroupByEnabled(),
+                   "GROUP BY functionality");
 
-    public static final DisableFlag dropTruncateTableEnabled =
-    new DisableFlag("drop_truncate_table_enabled",
-                    state -> !CONFIG_PROVIDER.getOrCreate(state).getDropTruncateTableEnabled(),
-                    "DROP and TRUNCATE TABLE functionality");
+    /**
+     * Guardrail disabling ALTER TABLE column mutation access.
+     */
+    public static final EnableFlag alterTableEnabled =
+    new EnableFlag("alter_table",
+                    state -> CONFIG_PROVIDER.getOrCreate(state).getAlterTableEnabled(),
+                    "User access to ALTER TABLE statement for column mutation");
+
+    /**
+     * Guardrail disabling DROP / TRUNCATE TABLE behavior
+     */
+    public static final EnableFlag dropTruncateTableEnabled =
+    new EnableFlag("drop_truncate_table_enabled",
+                   state -> CONFIG_PROVIDER.getOrCreate(state).getDropTruncateTableEnabled(),
+                   "DROP and TRUNCATE TABLE functionality");
+
+    /**
+     * Guardrail disabling DROP KEYSPACE behavior
+     */
+    public static final EnableFlag dropKeyspaceEnabled =
+    new EnableFlag("drop_keyspace_enabled",
+                    state -> CONFIG_PROVIDER.getOrCreate(state).getDropKeyspaceEnabled(),
+                    "DROP KEYSPACE functionality");
 
     /**
      * Guardrail disabling user's ability to turn off compression
      */
-    public static final DisableFlag uncompressedTablesEnabled =
-    new DisableFlag("uncompressed_tables_enabled",
-                    state -> !CONFIG_PROVIDER.getOrCreate(state).getUncompressedTablesEnabled(),
-                    "Uncompressed table");
+    public static final EnableFlag uncompressedTablesEnabled =
+    new EnableFlag("uncompressed_tables_enabled",
+                   state -> CONFIG_PROVIDER.getOrCreate(state).getUncompressedTablesEnabled(),
+                   "Uncompressed table");
 
     /**
      * Guardrail disabling the creation of new COMPACT STORAGE tables
      */
-    public static final DisableFlag compactTablesEnabled =
-    new DisableFlag("compact_tables",
-                    state -> !CONFIG_PROVIDER.getOrCreate(state).getCompactTablesEnabled(),
-                    "Creation of new COMPACT STORAGE tables");
+    public static final EnableFlag compactTablesEnabled =
+    new EnableFlag("compact_tables",
+                   state -> CONFIG_PROVIDER.getOrCreate(state).getCompactTablesEnabled(),
+                   "Creation of new COMPACT STORAGE tables");
 
     /**
      * Guardrail on the number of elements returned within page.
@@ -197,18 +216,26 @@
     /**
      * Guardrail disabling operations on lists that require read before write.
      */
-    public static final DisableFlag readBeforeWriteListOperationsEnabled =
-    new DisableFlag("read_before_write_list_operations",
-                    state -> !CONFIG_PROVIDER.getOrCreate(state).getReadBeforeWriteListOperationsEnabled(),
-                    "List operation requiring read before write");
+    public static final EnableFlag readBeforeWriteListOperationsEnabled =
+    new EnableFlag("read_before_write_list_operations",
+                   state -> CONFIG_PROVIDER.getOrCreate(state).getReadBeforeWriteListOperationsEnabled(),
+                   "List operation requiring read before write");
 
     /**
      * Guardrail disabling ALLOW FILTERING statement within a query
      */
-    public static final DisableFlag allowFilteringEnabled =
-    new DisableFlag("allow_filtering",
-                    state -> !CONFIG_PROVIDER.getOrCreate(state).getAllowFilteringEnabled(),
-                    "Querying with ALLOW FILTERING");
+    public static final EnableFlag allowFilteringEnabled =
+    new EnableFlag("allow_filtering",
+                   state -> CONFIG_PROVIDER.getOrCreate(state).getAllowFilteringEnabled(),
+                   "Querying with ALLOW FILTERING");
+
+    /**
+     * Guardrail disabling setting SimpleStrategy via keyspace creation or alteration
+     */
+    public static final EnableFlag simpleStrategyEnabled =
+    new EnableFlag("simplestrategy",
+                   state -> CONFIG_PROVIDER.getOrCreate(state).getSimpleStrategyEnabled(),
+                   "SimpleStrategy");
 
     /**
      * Guardrail on the number of restrictions created by a cartesian product of a CQL's {@code IN} query.
@@ -328,10 +355,19 @@
                      state -> CONFIG_PROVIDER.getOrCreate(state).getMinimumReplicationFactorWarnThreshold(),
                      state -> CONFIG_PROVIDER.getOrCreate(state).getMinimumReplicationFactorFailThreshold(),
                      (isWarning, what, value, threshold) ->
-                     isWarning ? format("The keyspace %s has a replication factor of %s, below the warning threshold of %s.",
-                                        what, value, threshold)
-                               : format("The keyspace %s has a replication factor of %s, below the failure threshold of %s.",
-                                        what, value, threshold));
+                     format("The keyspace %s has a replication factor of %s, below the %s threshold of %s.",
+                            what, value, isWarning ? "warning" : "failure", threshold));
+
+    /**
+     * Guardrail on the maximum replication factor.
+     */
+    public static final MaxThreshold maximumReplicationFactor =
+    new MaxThreshold("maximum_replication_factor",
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getMaximumReplicationFactorWarnThreshold(),
+                     state -> CONFIG_PROVIDER.getOrCreate(state).getMaximumReplicationFactorFailThreshold(),
+                     (isWarning, what, value, threshold) ->
+                     format("The keyspace %s has a replication factor of %s, above the %s threshold of %s.",
+                            what, value, isWarning ? "warning" : "failure", threshold));
 
     private Guardrails()
     {
@@ -540,6 +576,18 @@
     }
 
     @Override
+    public boolean getAlterTableEnabled()
+    {
+        return DEFAULT_CONFIG.getAlterTableEnabled();
+    }
+
+    @Override
+    public void setAlterTableEnabled(boolean enabled)
+    {
+        DEFAULT_CONFIG.setAlterTableEnabled(enabled);
+    }
+
+    @Override
     public boolean getAllowFilteringEnabled()
     {
         return DEFAULT_CONFIG.getAllowFilteringEnabled();
@@ -552,6 +600,18 @@
     }
 
     @Override
+    public boolean getSimpleStrategyEnabled()
+    {
+        return DEFAULT_CONFIG.getSimpleStrategyEnabled();
+    }
+
+    @Override
+    public void setSimpleStrategyEnabled(boolean enabled)
+    {
+        DEFAULT_CONFIG.setSimpleStrategyEnabled(enabled);
+    }
+
+    @Override
     public boolean getUncompressedTablesEnabled()
     {
         return DEFAULT_CONFIG.getUncompressedTablesEnabled();
@@ -600,6 +660,18 @@
     }
 
     @Override
+    public boolean getDropKeyspaceEnabled()
+    {
+        return DEFAULT_CONFIG.getDropKeyspaceEnabled();
+    }
+
+    @Override
+    public void setDropKeyspaceEnabled(boolean enabled)
+    {
+        DEFAULT_CONFIG.setDropKeyspaceEnabled(enabled);
+    }
+
+    @Override
     public int getPageSizeWarnThreshold()
     {
         return DEFAULT_CONFIG.getPageSizeWarnThreshold();
@@ -818,6 +890,24 @@
     }
 
     @Override
+    public int getMaximumReplicationFactorWarnThreshold()
+    {
+        return DEFAULT_CONFIG.getMaximumReplicationFactorWarnThreshold();
+    }
+
+    @Override
+    public int getMaximumReplicationFactorFailThreshold()
+    {
+        return DEFAULT_CONFIG.getMaximumReplicationFactorFailThreshold();
+    }
+
+    @Override
+    public void setMaximumReplicationFactorThreshold (int warn, int fail)
+    {
+        DEFAULT_CONFIG.setMaximumReplicationFactorThreshold(warn, fail);
+    }
+
+    @Override
     public int getDataDiskUsagePercentageWarnThreshold()
     {
         return DEFAULT_CONFIG.getDataDiskUsagePercentageWarnThreshold();
diff --git a/src/java/org/apache/cassandra/db/guardrails/GuardrailsConfig.java b/src/java/org/apache/cassandra/db/guardrails/GuardrailsConfig.java
index a52eeb0..d21b899 100644
--- a/src/java/org/apache/cassandra/db/guardrails/GuardrailsConfig.java
+++ b/src/java/org/apache/cassandra/db/guardrails/GuardrailsConfig.java
@@ -133,6 +133,13 @@
     boolean getUserTimestampsEnabled();
 
     /**
+     * Returns whether users are allowed access to the ALTER TABLE statement to mutate columns or not
+     *
+     * @return {@code true} if ALTER TABLE ADD/REMOVE/RENAME is allowed, {@code false} otherwise.
+     */
+    boolean getAlterTableEnabled();
+
+    /**
      * Returns whether tables can be uncompressed
      *
      * @return {@code true} if user's can disable compression, {@code false} otherwise.
@@ -161,6 +168,13 @@
     boolean getDropTruncateTableEnabled();
 
     /**
+     * Returns whether DROP on keyspaces is allowed
+     *
+     * @return {@code true} if allowed, {@code false} otherwise.
+     */
+    boolean getDropKeyspaceEnabled();
+
+    /**
      * @return The threshold to warn when page size exceeds given size.
      */
     int getPageSizeWarnThreshold();
@@ -185,6 +199,13 @@
     boolean getAllowFilteringEnabled();
 
     /**
+     * Returns whether setting SimpleStrategy via keyspace creation or alteration is enabled
+     *
+     * @return {@code true} if SimpleStrategy is allowed, {@code false} otherwise.
+     */
+    boolean getSimpleStrategyEnabled();
+
+    /**
      * @return The threshold to warn when an IN query creates a cartesian product with a size exceeding threshold.
      * -1 means disabled.
      */
@@ -277,4 +298,13 @@
      */
     int getMinimumReplicationFactorFailThreshold();
 
+    /**
+     * @return The threshold to warn when replication factor is greater than threshold.
+     */
+    int getMaximumReplicationFactorWarnThreshold();
+
+    /**
+     * @return The threshold to fail when replication factor is greater than threshold.
+     */
+    int getMaximumReplicationFactorFailThreshold();
 }
diff --git a/src/java/org/apache/cassandra/db/guardrails/GuardrailsMBean.java b/src/java/org/apache/cassandra/db/guardrails/GuardrailsMBean.java
index ad2edda..e410d5c 100644
--- a/src/java/org/apache/cassandra/db/guardrails/GuardrailsMBean.java
+++ b/src/java/org/apache/cassandra/db/guardrails/GuardrailsMBean.java
@@ -223,6 +223,20 @@
     void setAllowFilteringEnabled(boolean enabled);
 
     /**
+     * Returns whether SimpleStrategy is allowed on keyspace creation or alteration
+     *
+     * @return {@code true} if SimpleStrategy is allowed; {@code false} otherwise
+     */
+    boolean getSimpleStrategyEnabled();
+
+    /**
+     * Sets whether SimpleStrategy is allowed on keyspace creation or alteration
+     *
+     * @param enabled {@code true} if SimpleStrategy is allowed, {@code false} otherwise.
+     */
+    void setSimpleStrategyEnabled(boolean enabled);
+
+    /**
      * Returns whether users can disable compression on tables
      *
      * @return {@code true} if users can disable compression on a table, {@code false} otherwise.
@@ -251,6 +265,20 @@
     void setCompactTablesEnabled(boolean enabled);
 
     /**
+     * Gets whether users can use the ALTER TABLE statement to change columns
+     *
+     * @return {@code true} if ALTER TABLE is allowed, {@code false} otherwise.
+     */
+    boolean getAlterTableEnabled();
+
+    /**
+     * Sets whether users can use the ALTER TABLE statement to change columns
+     *
+     * @param enabled {@code true} if changing columns is allowed, {@code false} otherwise.
+     */
+    void setAlterTableEnabled(boolean enabled);
+
+    /**
      * Returns whether GROUP BY queries are allowed.
      *
      * @return {@code true} if allowed, {@code false} otherwise.
@@ -277,6 +305,18 @@
     void setDropTruncateTableEnabled(boolean enabled);
 
     /**
+     * Returns whether users can DROP a keyspace
+     *
+     * @return {@code true} if allowed, {@code false} otherwise.
+     */
+    boolean getDropKeyspaceEnabled();
+
+    /**
+     * Sets whether users can DROP a keyspace
+     */
+    void setDropKeyspaceEnabled(boolean enabled);
+
+    /**
      * @return The threshold to warn when requested page size greater than threshold.
      * -1 means disabled.
      */
@@ -522,21 +562,38 @@
     void setDataDiskUsageMaxDiskSize(@Nullable String size);
 
     /**
-     * @return The threshold to warn when replication factor is lesser threshold.
+     * @return The threshold to warn when replication factor is lesser than threshold.
      */
     int getMinimumReplicationFactorWarnThreshold();
 
     /**
-     * @return The threshold to fail when replication factor is lesser threshold.
+     * @return The threshold to fail when replication factor is lesser than threshold.
      */
     int getMinimumReplicationFactorFailThreshold();
 
     /**
-     * @param warn the threshold to warn when the minimum replication factor is lesser than
-     *             threshold -1 means disabled.
-     * @param fail the threshold to fail when the minimum replication factor is lesser than
-     *             threshold -1 means disabled.
+     * @param warn The threshold to warn when the minimum replication factor is lesser than threshold.
+     *             -1 means disabled.
+     * @param fail The threshold to fail when the minimum replication factor is lesser than threshold.
+     *            -1 means disabled.
      */
     void setMinimumReplicationFactorThreshold (int warn, int fail);
 
+    /**
+     * @return The threshold to fail when replication factor is greater than threshold.
+     */
+    int getMaximumReplicationFactorWarnThreshold();
+
+    /**
+     * @return The threshold to fail when replication factor is greater than threshold.
+     */
+    int getMaximumReplicationFactorFailThreshold();
+
+    /**
+     * @param warn The threshold to warn when the maximum replication factor is greater than threshold.
+     *             -1 means disabled.
+     * @param fail The threshold to fail when the maximum replication factor is greater than threshold.
+     *             -1 means disabled.
+     */
+    void setMaximumReplicationFactorThreshold (int warn, int fail);
 }
diff --git a/src/java/org/apache/cassandra/db/marshal/AbstractTimeUUIDType.java b/src/java/org/apache/cassandra/db/marshal/AbstractTimeUUIDType.java
index b6b10d5..38af812 100644
--- a/src/java/org/apache/cassandra/db/marshal/AbstractTimeUUIDType.java
+++ b/src/java/org/apache/cassandra/db/marshal/AbstractTimeUUIDType.java
@@ -24,9 +24,11 @@
 import org.apache.cassandra.cql3.Constants;
 import org.apache.cassandra.cql3.Term;
 import org.apache.cassandra.serializers.MarshalException;
-import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.serializers.UUIDSerializer;
 import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUIDAsBytes;
@@ -44,6 +46,7 @@
         return true;
     }
 
+    @Override
     public <VL, VR> int compareCustom(VL left, ValueAccessor<VL> accessorL, VR right, ValueAccessor<VR> accessorR)
     {
         // Compare for length
@@ -58,12 +61,12 @@
 
         long msb1 = accessorL.getLong(left, 0);
         long msb2 = accessorR.getLong(right, 0);
+        verifyVersion(msb1);
+        verifyVersion(msb2);
+
         msb1 = reorderTimestampBytes(msb1);
         msb2 = reorderTimestampBytes(msb2);
 
-        assert (msb1 & topbyte(0xf0L)) == topbyte(0x10L);
-        assert (msb2 & topbyte(0xf0L)) == topbyte(0x10L);
-
         int c = Long.compare(msb1, msb2);
         if (c != 0)
             return c;
@@ -75,6 +78,40 @@
         return Long.compare(lsb1, lsb2);
     }
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        if (accessor.isEmpty(data))
+            return null;
+
+        long hiBits = accessor.getLong(data, 0);
+        verifyVersion(hiBits);
+        ByteBuffer swizzled = ByteBuffer.allocate(16);
+        swizzled.putLong(0, TimeUUIDType.reorderTimestampBytes(hiBits));
+        swizzled.putLong(8, accessor.getLong(data, 8) ^ 0x8080808080808080L);
+
+        return ByteSource.fixedLength(swizzled);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        // Optional-style encoding of empty values as null sources
+        if (comparableBytes == null)
+            return accessor.empty();
+
+        // The non-lexical UUID bits are stored as an unsigned fixed-length 128-bit integer.
+        long hiBits = ByteSourceInverse.getUnsignedFixedLengthAsLong(comparableBytes, 8);
+        long loBits = ByteSourceInverse.getUnsignedFixedLengthAsLong(comparableBytes, 8);
+
+        hiBits = reorderBackTimestampBytes(hiBits);
+        verifyVersion(hiBits);
+        // In addition, TimeUUIDType also touches the low bits of the UUID (see CASSANDRA-8730 and DB-1758).
+        loBits ^= 0x8080808080808080L;
+
+        return UUIDType.makeUuidBytes(accessor, hiBits, loBits);
+    }
+
     // takes as input 8 signed bytes in native machine order
     // returns the first byte unchanged, and the following 7 bytes converted to an unsigned representation
     // which is the same as a 2's complement long in native format
@@ -83,16 +120,30 @@
         return signedBytes ^ 0x0080808080808080L;
     }
 
-    private static long topbyte(long topbyte)
+    private void verifyVersion(long hiBits)
     {
-        return topbyte << 56;
+        long version = (hiBits >>> 12) & 0xF;
+        if (version != 1)
+            throw new MarshalException(String.format("Invalid UUID version %d for timeuuid",
+                                                     version));
     }
 
     protected static long reorderTimestampBytes(long input)
     {
-        return    (input <<  48)
-                  | ((input <<  16) & 0xFFFF00000000L)
-                  |  (input >>> 32);
+        return (input <<  48)
+               | ((input <<  16) & 0xFFFF00000000L)
+               |  (input >>> 32);
+    }
+
+    protected static long reorderBackTimestampBytes(long input)
+    {
+        // In a time-based UUID the high bits are significantly more shuffled than in other UUIDs - if [X] represents a
+        // 16-bit tuple, [1][2][3][4] should become [3][4][2][1].
+        // See the UUID Javadoc (and more specifically the high bits layout of a Leach-Salz UUID) to understand the
+        // reasoning behind this bit twiddling in the first place (in the context of comparisons).
+        return (input << 32)
+               | ((input >>> 16) & 0xFFFF0000L)
+               | (input >>> 48);
     }
 
     public ByteBuffer fromString(String source) throws MarshalException
diff --git a/src/java/org/apache/cassandra/db/marshal/AbstractType.java b/src/java/org/apache/cassandra/db/marshal/AbstractType.java
index 74d4006..8f54cb6 100644
--- a/src/java/org/apache/cassandra/db/marshal/AbstractType.java
+++ b/src/java/org/apache/cassandra/db/marshal/AbstractType.java
@@ -40,6 +40,9 @@
 import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 import org.github.jamm.Unmetered;
 
 import static org.apache.cassandra.db.marshal.AbstractType.ComparisonType.CUSTOM;
@@ -55,6 +58,8 @@
 @Unmetered
 public abstract class AbstractType<T> implements Comparator<ByteBuffer>, AssignmentTestable
 {
+    private final static int VARIABLE_LENGTH = -1;
+
     public final Comparator<ByteBuffer> reverseComparator;
 
     public enum ComparisonType
@@ -449,11 +454,28 @@
     }
 
     /**
-     * The length of values for this type if all values are of fixed length, -1 otherwise.
+     * The length of values for this type if all values are of fixed length, -1 otherwise. This has an impact on
+     * serialization.
+     * <lu>
+     *  <li> see {@link #writeValue} </li>
+     *  <li> see {@link #read} </li>
+     *  <li> see {@link #writtenLength} </li>
+     *  <li> see {@link #skipValue} </li>
+     * </lu>
      */
     public int valueLengthIfFixed()
     {
-        return -1;
+        return VARIABLE_LENGTH;
+    }
+
+    /**
+     * Checks if all values are of fixed length.
+     *
+     * @return {@code true} if all values are of fixed length, {@code false} otherwise.
+     */
+    public final boolean isValueLengthFixed()
+    {
+        return valueLengthIfFixed() != VARIABLE_LENGTH;
     }
 
     // This assumes that no empty values are passed
@@ -599,6 +621,69 @@
     }
 
     /**
+     * Produce a byte-comparable representation of the given value, i.e. a sequence of bytes that compares the same way
+     * using lexicographical unsigned byte comparison as the original value using the type's comparator.
+     *
+     * We use a slightly stronger requirement to be able to use the types in tuples. Precisely, for any pair x, y of
+     * non-equal valid values of this type and any bytes b1, b2 between 0x10 and 0xEF,
+     * (+ stands for concatenation)
+     *   compare(x, y) == compareLexicographicallyUnsigned(asByteComparable(x)+b1, asByteComparable(y)+b2)
+     * (i.e. the values compare like the original type, and an added 0x10-0xEF byte at the end does not change that) and:
+     *   asByteComparable(x)+b1 is not a prefix of asByteComparable(y)      (weakly prefix free)
+     * (i.e. a valid representation of a value may be a prefix of another valid representation of a value only if the
+     * following byte in the latter is smaller than 0x10 or larger than 0xEF). These properties are trivially true if
+     * the encoding compares correctly and is prefix free, but also permits a little more freedom that enables somewhat
+     * more efficient encoding of arbitrary-length byte-comparable blobs.
+     *
+     * Depending on the type, this method can be called for null or empty input, in which case the output is allowed to
+     * be null (the clustering/tuple encoding will accept and handle it).
+     */
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V value, ByteComparable.Version version)
+    {
+        if (isByteOrderComparable)
+        {
+            // When a type is byte-ordered on its own, we only need to escape it, so that we can include it in
+            // multi-component types and make the encoding weakly-prefix-free.
+            return ByteSource.of(accessor, value, version);
+        }
+        else
+            // default is only good for byte-comparables
+            throw new UnsupportedOperationException(getClass().getSimpleName() + " does not implement asComparableBytes");
+    }
+
+    public final ByteSource asComparableBytes(ByteBuffer byteBuffer, ByteComparable.Version version)
+    {
+        return asComparableBytes(ByteBufferAccessor.instance, byteBuffer, version);
+    }
+
+    /**
+     * Translates the given byte-ordered representation to the common, non-byte-ordered binary representation of a
+     * payload for this abstract type (the latter, common binary representation is what we mostly work with in the
+     * storage engine internals). If the given bytes don't correspond to the encoding of some payload value for this
+     * abstract type, an {@link IllegalArgumentException} may be thrown.
+     *
+     * @param accessor value accessor used to construct the value.
+     * @param comparableBytes A byte-ordered representation (presumably of a payload for this abstract type).
+     * @param version The byte-comparable version used to construct the representation.
+     * @return A of a payload for this abstract type, corresponding to the given byte-ordered representation,
+     *         constructed using the supplied value accessor.
+     *
+     * @see #asComparableBytes
+     */
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        if (isByteOrderComparable)
+            return accessor.valueOf(ByteSourceInverse.getUnescapedBytes(comparableBytes));
+        else
+            throw new UnsupportedOperationException(getClass().getSimpleName() + " does not implement fromComparableBytes");
+    }
+
+    public final ByteBuffer fromComparableBytes(ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        return fromComparableBytes(ByteBufferAccessor.instance, comparableBytes, version);
+    }
+
+    /**
      * This must be overriden by subclasses if necessary so that for any
      * AbstractType, this == TypeParser.parse(toString()).
      *
diff --git a/src/java/org/apache/cassandra/db/marshal/BooleanType.java b/src/java/org/apache/cassandra/db/marshal/BooleanType.java
index 4ef5f95..d144f4e 100644
--- a/src/java/org/apache/cassandra/db/marshal/BooleanType.java
+++ b/src/java/org/apache/cassandra/db/marshal/BooleanType.java
@@ -26,14 +26,11 @@
 import org.apache.cassandra.serializers.BooleanSerializer;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.transport.ProtocolVersion;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 
 public class BooleanType extends AbstractType<Boolean>
 {
-    private static final Logger logger = LoggerFactory.getLogger(BooleanType.class);
-
     public static final BooleanType instance = new BooleanType();
 
     BooleanType() {super(ComparisonType.CUSTOM);} // singleton
@@ -54,6 +51,26 @@
         return v1 - v2;
     }
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        if (accessor.isEmpty(data))
+            return null;
+        byte b = accessor.toByte(data);
+        if (b != 0)
+            b = 1;
+        return ByteSource.oneByte(b);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        if (comparableBytes == null)
+            return accessor.empty();
+        int b = comparableBytes.next();
+        return accessor.valueOf(b == 1);
+    }
+
     public ByteBuffer fromString(String source) throws MarshalException
     {
 
diff --git a/src/java/org/apache/cassandra/db/marshal/ByteArrayAccessor.java b/src/java/org/apache/cassandra/db/marshal/ByteArrayAccessor.java
index df24a62..d710899 100644
--- a/src/java/org/apache/cassandra/db/marshal/ByteArrayAccessor.java
+++ b/src/java/org/apache/cassandra/db/marshal/ByteArrayAccessor.java
@@ -249,6 +249,13 @@
     }
 
     @Override
+    public int putByte(byte[] dst, int offset, byte value)
+    {
+        dst[offset] = value;
+        return TypeSizes.BYTE_SIZE;
+    }
+
+    @Override
     public int putShort(byte[] dst, int offset, short value)
     {
         ByteArrayUtil.putShort(dst, offset, value);
diff --git a/src/java/org/apache/cassandra/db/marshal/ByteArrayObjectFactory.java b/src/java/org/apache/cassandra/db/marshal/ByteArrayObjectFactory.java
index ea9bf11..9b477ae 100644
--- a/src/java/org/apache/cassandra/db/marshal/ByteArrayObjectFactory.java
+++ b/src/java/org/apache/cassandra/db/marshal/ByteArrayObjectFactory.java
@@ -18,6 +18,7 @@
 
 package org.apache.cassandra.db.marshal;
 
+import org.apache.cassandra.db.AbstractArrayClusteringPrefix;
 import org.apache.cassandra.db.ArrayClustering;
 import org.apache.cassandra.db.ArrayClusteringBound;
 import org.apache.cassandra.db.ArrayClusteringBoundary;
@@ -33,7 +34,7 @@
 
 class ByteArrayObjectFactory implements ValueAccessor.ObjectFactory<byte[]>
 {
-    private static final Clustering<byte[]> EMPTY_CLUSTERING = new ArrayClustering()
+    private static final Clustering<byte[]> EMPTY_CLUSTERING = new ArrayClustering(AbstractArrayClusteringPrefix.EMPTY_VALUES_ARRAY)
     {
         public String toString(TableMetadata metadata)
         {
@@ -41,14 +42,37 @@
         }
     };
 
+    public static final Clustering<byte[]> STATIC_CLUSTERING = new ArrayClustering(AbstractArrayClusteringPrefix.EMPTY_VALUES_ARRAY)
+    {
+        @Override
+        public Kind kind()
+        {
+            return Kind.STATIC_CLUSTERING;
+        }
+
+        @Override
+        public String toString()
+        {
+            return "STATIC";
+        }
+
+        @Override
+        public String toString(TableMetadata metadata)
+        {
+            return toString();
+        }
+    };
+
     static final ValueAccessor.ObjectFactory<byte[]> instance = new ByteArrayObjectFactory();
 
     private ByteArrayObjectFactory() {}
 
     /** The smallest start bound, i.e. the one that starts before any row. */
-    private static final ArrayClusteringBound BOTTOM_BOUND = new ArrayClusteringBound(ClusteringPrefix.Kind.INCL_START_BOUND, new byte[0][]);
+    private static final ArrayClusteringBound BOTTOM_BOUND = new ArrayClusteringBound(ClusteringPrefix.Kind.INCL_START_BOUND,
+                                                                                      AbstractArrayClusteringPrefix.EMPTY_VALUES_ARRAY);
     /** The biggest end bound, i.e. the one that ends after any row. */
-    private static final ArrayClusteringBound TOP_BOUND = new ArrayClusteringBound(ClusteringPrefix.Kind.INCL_END_BOUND, new byte[0][]);
+    private static final ArrayClusteringBound TOP_BOUND = new ArrayClusteringBound(ClusteringPrefix.Kind.INCL_END_BOUND,
+                                                                                   AbstractArrayClusteringPrefix.EMPTY_VALUES_ARRAY);
 
     public Cell<byte[]> cell(ColumnMetadata column, long timestamp, int ttl, int localDeletionTime, byte[] value, CellPath path)
     {
@@ -65,6 +89,11 @@
         return EMPTY_CLUSTERING;
     }
 
+    public Clustering<byte[]> staticClustering()
+    {
+        return STATIC_CLUSTERING;
+    }
+
     public ClusteringBound<byte[]> bound(ClusteringPrefix.Kind kind, byte[]... values)
     {
         return new ArrayClusteringBound(kind, values);
diff --git a/src/java/org/apache/cassandra/db/marshal/ByteBufferAccessor.java b/src/java/org/apache/cassandra/db/marshal/ByteBufferAccessor.java
index 40a3bf4..0712930 100644
--- a/src/java/org/apache/cassandra/db/marshal/ByteBufferAccessor.java
+++ b/src/java/org/apache/cassandra/db/marshal/ByteBufferAccessor.java
@@ -253,6 +253,13 @@
     }
 
     @Override
+    public int putByte(ByteBuffer dst, int offset, byte value)
+    {
+        dst.put(dst.position() + offset, value);
+        return TypeSizes.BYTE_SIZE;
+    }
+
+    @Override
     public int putShort(ByteBuffer dst, int offset, short value)
     {
         dst.putShort(dst.position() + offset, value);
diff --git a/src/java/org/apache/cassandra/db/marshal/ByteBufferObjectFactory.java b/src/java/org/apache/cassandra/db/marshal/ByteBufferObjectFactory.java
index 00f4646..0ac3db9 100644
--- a/src/java/org/apache/cassandra/db/marshal/ByteBufferObjectFactory.java
+++ b/src/java/org/apache/cassandra/db/marshal/ByteBufferObjectFactory.java
@@ -20,6 +20,7 @@
 
 import java.nio.ByteBuffer;
 
+import org.apache.cassandra.db.AbstractBufferClusteringPrefix;
 import org.apache.cassandra.db.BufferClustering;
 import org.apache.cassandra.db.BufferClusteringBound;
 import org.apache.cassandra.db.BufferClusteringBoundary;
@@ -31,24 +32,15 @@
 import org.apache.cassandra.db.rows.Cell;
 import org.apache.cassandra.db.rows.CellPath;
 import org.apache.cassandra.schema.ColumnMetadata;
-import org.apache.cassandra.schema.TableMetadata;
 
 class ByteBufferObjectFactory implements ValueAccessor.ObjectFactory<ByteBuffer>
 {
-    /** Empty clustering for tables having no clustering columns. */
-    private static final Clustering<ByteBuffer> EMPTY_CLUSTERING = new BufferClustering()
-    {
-        @Override
-        public String toString(TableMetadata metadata)
-        {
-            return "EMPTY";
-        }
-    };
-
     /** The smallest start bound, i.e. the one that starts before any row. */
-    private static final BufferClusteringBound BOTTOM_BOUND = new BufferClusteringBound(ClusteringPrefix.Kind.INCL_START_BOUND, new ByteBuffer[0]);
+    private static final BufferClusteringBound BOTTOM_BOUND = new BufferClusteringBound(ClusteringPrefix.Kind.INCL_START_BOUND,
+                                                                                        AbstractBufferClusteringPrefix.EMPTY_VALUES_ARRAY);
     /** The biggest end bound, i.e. the one that ends after any row. */
-    private static final BufferClusteringBound TOP_BOUND = new BufferClusteringBound(ClusteringPrefix.Kind.INCL_END_BOUND, new ByteBuffer[0]);
+    private static final BufferClusteringBound TOP_BOUND = new BufferClusteringBound(ClusteringPrefix.Kind.INCL_END_BOUND,
+                                                                                     AbstractBufferClusteringPrefix.EMPTY_VALUES_ARRAY);
 
     static final ValueAccessor.ObjectFactory<ByteBuffer> instance = new ByteBufferObjectFactory();
 
@@ -66,7 +58,12 @@
 
     public Clustering<ByteBuffer> clustering()
     {
-        return EMPTY_CLUSTERING;
+        return Clustering.EMPTY;
+    }
+
+    public Clustering<ByteBuffer> staticClustering()
+    {
+        return Clustering.STATIC_CLUSTERING;
     }
 
     public ClusteringBound<ByteBuffer> bound(ClusteringPrefix.Kind kind, ByteBuffer... values)
diff --git a/src/java/org/apache/cassandra/db/marshal/ByteType.java b/src/java/org/apache/cassandra/db/marshal/ByteType.java
index f94f4bb..a910fbb 100644
--- a/src/java/org/apache/cassandra/db/marshal/ByteType.java
+++ b/src/java/org/apache/cassandra/db/marshal/ByteType.java
@@ -27,6 +27,10 @@
 import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable.Version;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 public class ByteType extends NumberType<Byte>
 {
@@ -42,6 +46,19 @@
         return accessorL.getByte(left, 0) - accessorR.getByte(right, 0);
     }
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, Version version)
+    {
+        // This type does not allow non-present values, but we do just to avoid future complexity.
+        return ByteSource.optionalSignedFixedLengthNumber(accessor, data);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        return ByteSourceInverse.getOptionalSignedFixedLength(accessor, comparableBytes, 1);
+    }
+
     public ByteBuffer fromString(String source) throws MarshalException
     {
         // Return an empty ByteBuffer for an empty string.
diff --git a/src/java/org/apache/cassandra/db/marshal/CollectionType.java b/src/java/org/apache/cassandra/db/marshal/CollectionType.java
index c52cddc..5e9916e 100644
--- a/src/java/org/apache/cassandra/db/marshal/CollectionType.java
+++ b/src/java/org/apache/cassandra/db/marshal/CollectionType.java
@@ -19,6 +19,7 @@
 
 import java.nio.ByteBuffer;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Iterator;
 
@@ -27,6 +28,7 @@
 import org.apache.cassandra.cql3.Lists;
 import org.apache.cassandra.cql3.Maps;
 import org.apache.cassandra.cql3.Sets;
+import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.rows.Cell;
 import org.apache.cassandra.db.rows.CellPath;
 import org.apache.cassandra.io.util.DataInputPlus;
@@ -35,6 +37,9 @@
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 /**
  * The abstract validator that is the base for maps, sets and lists (both frozen and non-frozen).
@@ -245,6 +250,91 @@
         return this.toString(false);
     }
 
+    static <VL, VR> int compareListOrSet(AbstractType<?> elementsComparator, VL left, ValueAccessor<VL> accessorL, VR right, ValueAccessor<VR> accessorR)
+    {
+        // Note that this is only used if the collection is frozen
+        if (accessorL.isEmpty(left) || accessorR.isEmpty(right))
+            return Boolean.compare(accessorR.isEmpty(right), accessorL.isEmpty(left));
+
+        int sizeL = CollectionSerializer.readCollectionSize(left, accessorL, ProtocolVersion.V3);
+        int offsetL = CollectionSerializer.sizeOfCollectionSize(sizeL, ProtocolVersion.V3);
+        int sizeR = CollectionSerializer.readCollectionSize(right, accessorR, ProtocolVersion.V3);
+        int offsetR = TypeSizes.INT_SIZE;
+
+        for (int i = 0; i < Math.min(sizeL, sizeR); i++)
+        {
+            VL v1 = CollectionSerializer.readValue(left, accessorL, offsetL, ProtocolVersion.V3);
+            offsetL += CollectionSerializer.sizeOfValue(v1, accessorL, ProtocolVersion.V3);
+            VR v2 = CollectionSerializer.readValue(right, accessorR, offsetR, ProtocolVersion.V3);
+            offsetR += CollectionSerializer.sizeOfValue(v2, accessorR, ProtocolVersion.V3);
+            int cmp = elementsComparator.compare(v1, accessorL, v2, accessorR);
+            if (cmp != 0)
+                return cmp;
+        }
+
+        return Integer.compare(sizeL, sizeR);
+    }
+
+    static <V> ByteSource asComparableBytesListOrSet(AbstractType<?> elementsComparator,
+                                                     ValueAccessor<V> accessor,
+                                                     V data,
+                                                     ByteComparable.Version version)
+    {
+        if (accessor.isEmpty(data))
+            return null;
+
+        int offset = 0;
+        int size = CollectionSerializer.readCollectionSize(data, accessor, ProtocolVersion.V3);
+        offset += CollectionSerializer.sizeOfCollectionSize(size, ProtocolVersion.V3);
+        ByteSource[] srcs = new ByteSource[size];
+        for (int i = 0; i < size; ++i)
+        {
+            V v = CollectionSerializer.readValue(data, accessor, offset, ProtocolVersion.V3);
+            offset += CollectionSerializer.sizeOfValue(v, accessor, ProtocolVersion.V3);
+            srcs[i] = elementsComparator.asComparableBytes(accessor, v, version);
+        }
+        return ByteSource.withTerminatorMaybeLegacy(version, 0x00, srcs);
+    }
+
+    static <V> V fromComparableBytesListOrSet(ValueAccessor<V> accessor,
+                                              ByteSource.Peekable comparableBytes,
+                                              ByteComparable.Version version,
+                                              AbstractType<?> elementType)
+    {
+        if (comparableBytes == null)
+            return accessor.empty();
+        assert version != ByteComparable.Version.LEGACY; // legacy translation is not reversible
+
+        List<V> buffers = new ArrayList<>();
+        int separator = comparableBytes.next();
+        while (separator != ByteSource.TERMINATOR)
+        {
+            if (!ByteSourceInverse.nextComponentNull(separator))
+                buffers.add(elementType.fromComparableBytes(accessor, comparableBytes, version));
+            else
+                buffers.add(null);
+            separator = comparableBytes.next();
+        }
+        return CollectionSerializer.pack(buffers, accessor, buffers.size(), ProtocolVersion.V3);
+    }
+
+    public static String setOrListToJsonString(ByteBuffer buffer, AbstractType elementsType, ProtocolVersion protocolVersion)
+    {
+        ByteBuffer value = buffer.duplicate();
+        StringBuilder sb = new StringBuilder("[");
+        int size = CollectionSerializer.readCollectionSize(value, ByteBufferAccessor.instance, protocolVersion);
+        int offset = CollectionSerializer.sizeOfCollectionSize(size, protocolVersion);
+        for (int i = 0; i < size; i++)
+        {
+            if (i > 0)
+                sb.append(", ");
+            ByteBuffer element = CollectionSerializer.readValue(value, ByteBufferAccessor.instance, offset, protocolVersion);
+            offset += CollectionSerializer.sizeOfValue(element, ByteBufferAccessor.instance, protocolVersion);
+            sb.append(elementsType.toJSONString(element, protocolVersion));
+        }
+        return sb.append("]").toString();
+    }
+
     private static class CollectionPathSerializer implements CellPath.Serializer
     {
         public void serialize(CellPath path, DataOutputPlus out) throws IOException
diff --git a/src/java/org/apache/cassandra/db/marshal/CompositeType.java b/src/java/org/apache/cassandra/db/marshal/CompositeType.java
index bf5e914a..00cbeb5 100644
--- a/src/java/org/apache/cassandra/db/marshal/CompositeType.java
+++ b/src/java/org/apache/cassandra/db/marshal/CompositeType.java
@@ -24,6 +24,7 @@
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 
@@ -31,6 +32,9 @@
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable.Version;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 import static com.google.common.collect.Iterables.any;
 import static com.google.common.collect.Iterables.transform;
@@ -165,6 +169,86 @@
         return types.get(i);
     }
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, Version version)
+    {
+        if (data == null || accessor.isEmpty(data))
+            return null;
+
+        ByteSource[] srcs = new ByteSource[types.size() * 2 + 1];
+        int length = accessor.size(data);
+
+        // statics go first
+        boolean isStatic = readIsStaticInternal(data, accessor);
+        int offset = startingOffsetInternal(isStatic);
+        srcs[0] = isStatic ? null : ByteSource.EMPTY;
+
+        int i = 0;
+        byte lastEoc = 0;
+        while (offset < length)
+        {
+            // Only the end-of-component byte of the last component of this composite can be non-zero, so the
+            // component before can't have a non-zero end-of-component byte.
+            assert lastEoc == 0 : lastEoc;
+
+            int componentLength = accessor.getUnsignedShort(data, offset);
+            offset += 2;
+            srcs[i * 2 + 1] = types.get(i).asComparableBytes(accessor, accessor.slice(data, offset, componentLength), version);
+            offset += componentLength;
+            lastEoc = accessor.getByte(data, offset);
+            offset += 1;
+            srcs[i * 2 + 2] = ByteSource.oneByte(lastEoc & 0xFF ^ 0x80); // end-of-component also takes part in comparison as signed byte
+            ++i;
+        }
+        // A composite may be leaving some values unspecified. If this is the case, make sure we terminate early
+        // so that translations created before an extra field was added match translations that have the field but don't
+        // specify a value for it.
+        if (i * 2 + 1 < srcs.length)
+            srcs = Arrays.copyOfRange(srcs, 0, i * 2 + 1);
+
+        return ByteSource.withTerminatorMaybeLegacy(version, ByteSource.END_OF_STREAM, srcs);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, Version version)
+    {
+        // For ByteComparable.Version.LEGACY the terminator byte is ByteSource.END_OF_STREAM. The latter means that it's
+        // indistinguishable from the END_OF_STREAM byte that gets returned _after_ the terminator byte has already
+        // been consumed, when the composite is part of a multi-component sequence. So if in such a scenario we consume
+        // the ByteSource.END_OF_STREAM terminator here, this will result in actually consuming the multi-component
+        // sequence separator after it and jumping directly into the bytes of the next component, when we try to
+        // consume the (already consumed) separator.
+        // Instead of trying to find a way around the situation, we can just take advantage of the fact that we don't
+        // need to decode from Version.LEGACY, assume that we never do that, and assert it here.
+        assert version != Version.LEGACY;
+
+        if (comparableBytes == null)
+            return accessor.empty();
+
+        int separator = comparableBytes.next();
+        boolean isStatic = ByteSourceInverse.nextComponentNull(separator);
+        int i = 0;
+        V[] buffers = accessor.createArray(types.size());
+        byte lastEoc = 0;
+
+        while ((separator = comparableBytes.next()) != ByteSource.TERMINATOR && i < types.size())
+        {
+            // Only the end-of-component byte of the last component of this composite can be non-zero, so the
+            // component before can't have a non-zero end-of-component byte.
+            assert lastEoc == 0 : lastEoc;
+
+            // Get the next type and decode its payload.
+            AbstractType<?> type = types.get(i);
+            V decoded = type.fromComparableBytes(accessor,
+                                                 ByteSourceInverse.nextComponentSource(comparableBytes, separator),
+                                                 version);
+            buffers[i++] = decoded;
+
+            lastEoc = ByteSourceInverse.getSignedByte(ByteSourceInverse.nextComponentSource(comparableBytes));
+        }
+        return build(accessor, isStatic, Arrays.copyOf(buffers, i), lastEoc);
+    }
+
     protected ParsedComparator parseComparator(int i, String part)
     {
         return new StaticParsedComparator(types.get(i), part);
@@ -371,6 +455,12 @@
     @SafeVarargs
     public static <V> V build(ValueAccessor<V> accessor, boolean isStatic, V... values)
     {
+        return build(accessor, isStatic, values, (byte) 0);
+    }
+
+    @VisibleForTesting
+    public static <V> V build(ValueAccessor<V> accessor, boolean isStatic, V[] values, byte lastEoc)
+    {
         int totalLength = isStatic ? 2 : 0;
         for (V v : values)
             totalLength += 2 + accessor.size(v) + 1;
@@ -380,11 +470,12 @@
         if (isStatic)
             out.putShort((short)STATIC_MARKER);
 
-        for (V v : values)
+        for (int i = 0; i < values.length; ++i)
         {
+            V v = values[i];
             ByteBufferUtil.writeShortLength(out, accessor.size(v));
             accessor.write(v, out);
-            out.put((byte) 0);
+            out.put(i != values.length - 1 ? (byte) 0 : lastEoc);
         }
         out.flip();
         return accessor.valueOf(out);
diff --git a/src/java/org/apache/cassandra/db/marshal/DateType.java b/src/java/org/apache/cassandra/db/marshal/DateType.java
index 473cedf..595106d 100644
--- a/src/java/org/apache/cassandra/db/marshal/DateType.java
+++ b/src/java/org/apache/cassandra/db/marshal/DateType.java
@@ -31,6 +31,9 @@
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 /**
  * This is the old version of TimestampType, but has been replaced as it wasn't comparing pre-epoch timestamps
@@ -50,6 +53,19 @@
         return true;
     }
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        // While BYTE_ORDER would still work for this type, making use of the fixed length is more efficient.
+        return ByteSource.optionalFixedLength(accessor, data);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        return ByteSourceInverse.getOptionalFixedLength(accessor, comparableBytes, 8);
+    }
+
     public ByteBuffer fromString(String source) throws MarshalException
     {
       // Return an empty ByteBuffer for an empty string.
diff --git a/src/java/org/apache/cassandra/db/marshal/DecimalType.java b/src/java/org/apache/cassandra/db/marshal/DecimalType.java
index 5740fdc..3e02dc9 100644
--- a/src/java/org/apache/cassandra/db/marshal/DecimalType.java
+++ b/src/java/org/apache/cassandra/db/marshal/DecimalType.java
@@ -24,6 +24,8 @@
 import java.nio.ByteBuffer;
 import java.util.Objects;
 
+import com.google.common.primitives.Ints;
+
 import org.apache.cassandra.cql3.CQL3Type;
 import org.apache.cassandra.cql3.Constants;
 import org.apache.cassandra.cql3.Term;
@@ -32,6 +34,8 @@
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 
 public class DecimalType extends NumberType<BigDecimal>
 {
@@ -41,6 +45,16 @@
     private static final int MAX_SCALE = 1000;
     private static final MathContext MAX_PRECISION = new MathContext(10000);
 
+    // Constants or escaping values needed to encode/decode variable-length floating point numbers (decimals) in our
+    // custom byte-ordered encoding scheme.
+    private static final int POSITIVE_DECIMAL_HEADER_MASK = 0x80;
+    private static final int NEGATIVE_DECIMAL_HEADER_MASK = 0x00;
+    private static final int DECIMAL_EXPONENT_LENGTH_HEADER_MASK = 0x40;
+    private static final byte DECIMAL_LAST_BYTE = (byte) 0x00;
+    private static final BigInteger HUNDRED = BigInteger.valueOf(100);
+
+    private static final ByteBuffer ZERO_BUFFER = instance.decompose(BigDecimal.ZERO);
+
     DecimalType() {super(ComparisonType.CUSTOM);} // singleton
 
     public boolean isEmptyValueMeaningless()
@@ -59,6 +73,196 @@
         return compareComposed(left, accessorL, right, accessorR, this);
     }
 
+    /**
+     * Constructs a byte-comparable representation.
+     * This is rather difficult and involves reconstructing the decimal.
+     *
+     * To compare, we need a normalized value, i.e. one with a sign, exponent and (0,1) mantissa. To avoid
+     * loss of precision, both exponent and mantissa need to be base-100.  We can't get this directly off the serialized
+     * bytes, as they have base-10 scale and base-256 unscaled part.
+     *
+     * We store:
+     *     - sign bit inverted * 0x80 + 0x40 + signed exponent length, where exponent is negated if value is negative
+     *     - zero or more exponent bytes (as given by length)
+     *     - 0x80 + first pair of decimal digits, negative if value is negative, rounded to -inf
+     *     - zero or more 0x80 + pair of decimal digits, always positive
+     *     - trailing 0x00
+     * Zero is special-cased as 0x80.
+     *
+     * Because the trailing 00 cannot be produced from a pair of decimal digits (positive or not), no value can be
+     * a prefix of another.
+     *
+     * Encoding examples:
+     *    1.1    as       c1 = 0x80 (positive number) + 0x40 + (positive exponent) 0x01 (exp length 1)
+     *                    01 = exponent 1 (100^1)
+     *                    81 = 0x80 + 01 (0.01)
+     *                    8a = 0x80 + 10 (....10)   0.0110e2
+     *                    00
+     *    -1     as       3f = 0x00 (negative number) + 0x40 - (negative exponent) 0x01 (exp length 1)
+     *                    ff = exponent -1. negative number, thus 100^1
+     *                    7f = 0x80 - 01 (-0.01)    -0.01e2
+     *                    00
+     *    -99.9  as       3f = 0x00 (negative number) + 0x40 - (negative exponent) 0x01 (exp length 1)
+     *                    ff = exponent -1. negative number, thus 100^1
+     *                    1c = 0x80 - 100 (-1.00)
+     *                    8a = 0x80 + 10  (+....10) -0.999e2
+     *                    00
+     *
+     */
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        BigDecimal value = compose(data, accessor);
+        if (value == null)
+            return null;
+        if (value.compareTo(BigDecimal.ZERO) == 0)  // Note: 0.equals(0.0) returns false!
+            return ByteSource.oneByte(POSITIVE_DECIMAL_HEADER_MASK);
+
+        long scale = (((long) value.scale()) - value.precision()) & ~1;
+        boolean negative = value.signum() < 0;
+        // Make a base-100 exponent (this will always fit in an int).
+        int exponent = Math.toIntExact(-scale >> 1);
+        // Flip the exponent sign for negative numbers, so that ones with larger magnitudes are propely treated as smaller.
+        final int modulatedExponent = negative ? -exponent : exponent;
+        // We should never have scale > Integer.MAX_VALUE, as we're always subtracting the non-negative precision of
+        // the encoded BigDecimal, and furthermore we're rounding to negative infinity.
+        assert scale <= Integer.MAX_VALUE;
+        // However, we may end up overflowing on the negative side.
+        if (scale < Integer.MIN_VALUE)
+        {
+            // As scaleByPowerOfTen needs an int scale, do the scaling in two steps.
+            int mv = Integer.MIN_VALUE;
+            value = value.scaleByPowerOfTen(mv);
+            scale -= mv;
+        }
+        final BigDecimal mantissa = value.scaleByPowerOfTen(Ints.checkedCast(scale)).stripTrailingZeros();
+        // We now have a smaller-than-one signed mantissa, and a signed and modulated base-100 exponent.
+        assert mantissa.abs().compareTo(BigDecimal.ONE) < 0;
+
+        return new ByteSource()
+        {
+            // Start with up to 5 bytes for sign + exponent.
+            int exponentBytesLeft = 5;
+            BigDecimal current = mantissa;
+
+            @Override
+            public int next()
+            {
+                if (exponentBytesLeft > 0)
+                {
+                    --exponentBytesLeft;
+                    if (exponentBytesLeft == 4)
+                    {
+                        // Skip leading zero bytes in the modulatedExponent.
+                        exponentBytesLeft -= Integer.numberOfLeadingZeros(Math.abs(modulatedExponent)) / 8;
+                        // Now prepare the leading byte which includes the sign of the number plus the sign and length of the modulatedExponent.
+                        int explen = DECIMAL_EXPONENT_LENGTH_HEADER_MASK + (modulatedExponent < 0 ? -exponentBytesLeft : exponentBytesLeft);
+                        return explen + (negative ? NEGATIVE_DECIMAL_HEADER_MASK : POSITIVE_DECIMAL_HEADER_MASK);
+                    }
+                    else
+                        return (modulatedExponent >> (exponentBytesLeft * 8)) & 0xFF;
+                }
+                else if (current == null)
+                {
+                    return END_OF_STREAM;
+                }
+                else if (current.compareTo(BigDecimal.ZERO) == 0)
+                {
+                    current = null;
+                    return 0x00;
+                }
+                else
+                {
+                    BigDecimal v = current.scaleByPowerOfTen(2);
+                    BigDecimal floor = v.setScale(0, RoundingMode.FLOOR);
+                    current = v.subtract(floor);
+                    return floor.byteValueExact() + 0x80;
+                }
+            }
+        };
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        if (comparableBytes == null)
+            return accessor.empty();
+
+        int headerBits = comparableBytes.next();
+        if (headerBits == POSITIVE_DECIMAL_HEADER_MASK)
+            return accessor.valueOf(ZERO_BUFFER);
+
+        // I. Extract the exponent.
+        // The sign of the decimal, and the sign and the length (in bytes) of the decimal exponent, are all encoded in
+        // the first byte.
+        // Get the sign of the decimal...
+        boolean isNegative = headerBits < POSITIVE_DECIMAL_HEADER_MASK;
+        headerBits -= isNegative ? NEGATIVE_DECIMAL_HEADER_MASK : POSITIVE_DECIMAL_HEADER_MASK;
+        headerBits -= DECIMAL_EXPONENT_LENGTH_HEADER_MASK;
+        // Get the sign and the length of the exponent (the latter is encoded as its negative if the sign of the
+        // exponent is negative)...
+        boolean isExponentNegative = headerBits < 0;
+        headerBits = isExponentNegative ? -headerBits : headerBits;
+        // Now consume the exponent bytes. If the exponent is negative and uses less than 4 bytes, the remaining bytes
+        // should be padded with 1s, in order for the constructed int to contain the correct (negative) exponent value.
+        // So, if the exponent is negative, we can just start with all bits set to 1 (i.e. we can start with -1).
+        int exponent = isExponentNegative ? -1 : 0;
+        for (int i = 0; i < headerBits; ++i)
+            exponent = (exponent << 8) | comparableBytes.next();
+        // The encoded exponent also contains the decimal sign, in order to correctly compare exponents in case of
+        // negative decimals (e.g. x * 10^y > x * 10^z if x < 0 && y < z). After the decimal sign is "removed", what's
+        // left is a base-100 exponent following BigDecimal's convention for the exponent sign.
+        exponent = isNegative ? -exponent : exponent;
+
+        // II. Extract the mantissa as a BigInteger value. It was encoded as a BigDecimal value between 0 and 1, in
+        // order to be used for comparison (after the sign of the decimal and the sign and the value of the exponent),
+        // but when decoding we don't need that property on the transient mantissa value.
+        BigInteger mantissa = BigInteger.ZERO;
+        int curr = comparableBytes.next();
+        while (curr != DECIMAL_LAST_BYTE)
+        {
+            // The mantissa value is constructed by a standard positional notation value calculation.
+            // The value of the next digit is the next most-significant mantissa byte as an unsigned integer,
+            // offset by a predetermined value (in this case, 0x80)...
+            int currModified = curr - 0x80;
+            // ...multiply the current value by the base (in this case, 100)...
+            mantissa = mantissa.multiply(HUNDRED);
+            // ...then add the next digit to the modified current value...
+            mantissa = mantissa.add(BigInteger.valueOf(currModified));
+            // ...and finally, adjust the base-100, BigDecimal format exponent accordingly.
+            --exponent;
+            curr = comparableBytes.next();
+        }
+
+        // III. Construct the final BigDecimal value, by combining the mantissa and the exponent, guarding against
+        // underflow or overflow when exponents are close to their boundary values.
+        long base10NonBigDecimalFormatExp = 2L * exponent;
+        // When expressing a sufficiently big decimal, BigDecimal's internal scale value will be negative with very
+        // big absolute value. To compute the encoded exponent, this internal scale has the number of digits of the
+        // unscaled value subtracted from it, after which it's divided by 2, rounding down to negative infinity
+        // (before accounting for the decimal sign). When decoding, this exponent is converted to a base-10 exponent in
+        // non-BigDecimal format, which means that it can very well overflow Integer.MAX_VALUE.
+        // For example, see how <code>new BigDecimal(BigInteger.TEN, Integer.MIN_VALUE)</code> is encoded and decoded.
+        if (base10NonBigDecimalFormatExp > Integer.MAX_VALUE)
+        {
+            // If the base-10 exponent will result in an overflow, some of its powers of 10 need to be absorbed by the
+            // mantissa. How much exactly? As little as needed, in order to avoid complex BigInteger operations, which
+            // means exactly as much as to have a scale of -Integer.MAX_VALUE.
+            int exponentReduction = (int) (base10NonBigDecimalFormatExp - Integer.MAX_VALUE);
+            mantissa = mantissa.multiply(BigInteger.TEN.pow(exponentReduction));
+            base10NonBigDecimalFormatExp = Integer.MAX_VALUE;
+        }
+        assert base10NonBigDecimalFormatExp >= Integer.MIN_VALUE && base10NonBigDecimalFormatExp <= Integer.MAX_VALUE;
+        // Here we negate the exponent, as we are not using BigDecimal.scaleByPowerOfTen, where a positive number means
+        // "multiplying by a positive power of 10", but to BigDecimal's internal scale representation, where a positive
+        // number means "dividing by a positive power of 10".
+        byte[] mantissaBytes = mantissa.toByteArray();
+        V resultBuf = accessor.allocate(4 + mantissaBytes.length);
+        accessor.putInt(resultBuf, 0, (int) -base10NonBigDecimalFormatExp);
+        accessor.copyByteArrayTo(mantissaBytes, 0, resultBuf, 4, mantissaBytes.length);
+        return resultBuf;
+    }
+
     public ByteBuffer fromString(String source) throws MarshalException
     {
         // Return an empty ByteBuffer for an empty string.
diff --git a/src/java/org/apache/cassandra/db/marshal/DoubleType.java b/src/java/org/apache/cassandra/db/marshal/DoubleType.java
index 570d420..56ae013 100644
--- a/src/java/org/apache/cassandra/db/marshal/DoubleType.java
+++ b/src/java/org/apache/cassandra/db/marshal/DoubleType.java
@@ -27,6 +27,9 @@
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 public class DoubleType extends NumberType<Double>
 {
@@ -50,6 +53,18 @@
         return compareComposed(left, accessorL, right, accessorR, this);
     }
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        return ByteSource.optionalSignedFixedLengthFloat(accessor, data);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        return ByteSourceInverse.getOptionalSignedFixedLengthFloat(accessor, comparableBytes, 8);
+    }
+
     public ByteBuffer fromString(String source) throws MarshalException
     {
       // Return an empty ByteBuffer for an empty string.
diff --git a/src/java/org/apache/cassandra/db/marshal/DynamicCompositeType.java b/src/java/org/apache/cassandra/db/marshal/DynamicCompositeType.java
index 5df3600..e7a2360 100644
--- a/src/java/org/apache/cassandra/db/marshal/DynamicCompositeType.java
+++ b/src/java/org/apache/cassandra/db/marshal/DynamicCompositeType.java
@@ -19,9 +19,16 @@
 
 import java.nio.ByteBuffer;
 import java.nio.charset.CharacterCodingException;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -34,6 +41,9 @@
 import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable.Version;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 import static com.google.common.collect.Iterables.any;
 
@@ -60,7 +70,11 @@
 {
     private static final Logger logger = LoggerFactory.getLogger(DynamicCompositeType.class);
 
+    private static final ByteSource[] EMPTY_BYTE_SOURCE_ARRAY = new ByteSource[0];
+    private static final String REVERSED_TYPE = ReversedType.class.getSimpleName();
+
     private final Map<Byte, AbstractType<?>> aliases;
+    private final Map<AbstractType<?>, Byte> inverseMapping;
 
     // interning instances
     private static final ConcurrentHashMap<Map<Byte, AbstractType<?>>, DynamicCompositeType> instances = new ConcurrentHashMap<>();
@@ -81,6 +95,9 @@
     private DynamicCompositeType(Map<Byte, AbstractType<?>> aliases)
     {
         this.aliases = aliases;
+        this.inverseMapping = new HashMap<>();
+        for (Map.Entry<Byte, AbstractType<?>> en : aliases.entrySet())
+            this.inverseMapping.put(en.getValue(), en.getKey());
     }
 
     protected <V> boolean readIsStatic(V value, ValueAccessor<V> accessor)
@@ -197,6 +214,196 @@
         }
     }
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, Version version)
+    {
+        List<ByteSource> srcs = new ArrayList<>();
+        int length = accessor.size(data);
+
+        // statics go first
+        boolean isStatic = readIsStatic(data, accessor);
+        int offset = startingOffset(isStatic);
+        srcs.add(isStatic ? null : ByteSource.EMPTY);
+
+        byte lastEoc = 0;
+        int i = 0;
+        while (offset < length)
+        {
+            // Only the end-of-component byte of the last component of this composite can be non-zero, so the
+            // component before can't have a non-zero end-of-component byte.
+            assert lastEoc == 0 : lastEoc;
+
+            AbstractType<?> comp = getComparator(data, accessor, offset);
+            offset += getComparatorSize(i, data, accessor, offset);
+            // The comparable bytes for the component need to ensure comparisons consistent with
+            // AbstractCompositeType.compareCustom(ByteBuffer, ByteBuffer) and
+            // DynamicCompositeType.getComparator(int, ByteBuffer, ByteBuffer):
+            if (version == Version.LEGACY || !(comp instanceof ReversedType))
+            {
+                // ...most often that means just adding the short name of the type, followed by the full name of the type.
+                srcs.add(ByteSource.of(comp.getClass().getSimpleName(), version));
+                srcs.add(ByteSource.of(comp.getClass().getName(), version));
+            }
+            else
+            {
+                // ...however some times the component uses a complex type (currently the only supported complex type
+                // is ReversedType - we can't have elements that are of MapType, CompositeType, TupleType, etc.)...
+                ReversedType<?> reversedComp = (ReversedType<?>) comp;
+                // ...in this case, we need to add the short name of ReversedType before the short name of the base
+                // type, to ensure consistency with DynamicCompositeType.getComparator(int, ByteBuffer, ByteBuffer).
+                srcs.add(ByteSource.of(REVERSED_TYPE, version));
+                srcs.add(ByteSource.of(reversedComp.baseType.getClass().getSimpleName(), version));
+                srcs.add(ByteSource.of(reversedComp.baseType.getClass().getName(), version));
+            }
+            // Only then the payload of the component gets encoded.
+            int componentLength = accessor.getUnsignedShort(data, offset);
+            offset += 2;
+            srcs.add(comp.asComparableBytes(accessor, accessor.slice(data, offset, componentLength), version));
+            offset += componentLength;
+            // The end-of-component byte also takes part in the comparison, and therefore needs to be encoded.
+            lastEoc = accessor.getByte(data, offset);
+            offset += 1;
+            srcs.add(ByteSource.oneByte(version == Version.LEGACY ? lastEoc : lastEoc & 0xFF ^ 0x80));
+            ++i;
+        }
+
+        return ByteSource.withTerminatorMaybeLegacy(version, ByteSource.END_OF_STREAM, srcs.toArray(EMPTY_BYTE_SOURCE_ARRAY));
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, Version version)
+    {
+        // For ByteComparable.Version.LEGACY the terminator byte is ByteSource.END_OF_STREAM. Just like with
+        // CompositeType, this means that in multi-component sequences the terminator may be transformed to a regular
+        // component separator, but unlike CompositeType (where we have the expected number of types/components),
+        // this can make the end of the whole dynamic composite type indistinguishable from the end of a component
+        // somewhere in the middle of the dynamic composite type. Because of that, DynamicCompositeType elements
+        // cannot always be safely decoded using that encoding version.
+        // Even more so than with CompositeType, we just take advantage of the fact that we don't need to decode from
+        // Version.LEGACY, assume that we never do that, and assert it here.
+        assert version != Version.LEGACY;
+
+        if (comparableBytes == null)
+            return accessor.empty();
+
+        // The first byte is the isStatic flag which we don't need but must consume to continue past it.
+        comparableBytes.next();
+
+        List<AbstractType<?>> types = new ArrayList<>();
+        List<V> values = new ArrayList<>();
+        byte lastEoc = 0;
+
+        for (int separator = comparableBytes.next(); separator != ByteSource.TERMINATOR; separator = comparableBytes.next())
+        {
+            // Solely the end-of-component byte of the last component of this composite can be non-zero.
+            assert lastEoc == 0 : lastEoc;
+
+            boolean isReversed = false;
+            // Decode the next type's simple class name that is encoded before its fully qualified class name (in order
+            // for comparisons to work correctly).
+            String simpleClassName = ByteSourceInverse.getString(ByteSourceInverse.nextComponentSource(comparableBytes, separator));
+            if (REVERSED_TYPE.equals(simpleClassName))
+            {
+                // Special-handle if the type is reversed (and decode the actual base type simple class name).
+                isReversed = true;
+                simpleClassName = ByteSourceInverse.getString(ByteSourceInverse.nextComponentSource(comparableBytes));
+            }
+
+            // Decode the type's fully qualified class name and parse the actual type from it.
+            String fullClassName = ByteSourceInverse.getString(ByteSourceInverse.nextComponentSource(comparableBytes));
+            assert fullClassName.endsWith(simpleClassName);
+            if (isReversed)
+                fullClassName = REVERSED_TYPE + '(' + fullClassName + ')';
+            AbstractType<?> type = TypeParser.parse(fullClassName);
+            assert type != null;
+            types.add(type);
+
+            // Decode the payload from this type.
+            V value = type.fromComparableBytes(accessor, ByteSourceInverse.nextComponentSource(comparableBytes), version);
+            values.add(value);
+
+            // Also decode the corresponding end-of-component byte - the last one we decode will be taken into
+            // account when we deserialize the decoded data into an object.
+            lastEoc = ByteSourceInverse.getSignedByte(ByteSourceInverse.nextComponentSource(comparableBytes));
+        }
+        return build(accessor, types, inverseMapping, values, lastEoc);
+    }
+
+    public static ByteBuffer build(List<String> types, List<ByteBuffer> values)
+    {
+        return build(ByteBufferAccessor.instance,
+                     Lists.transform(types, TypeParser::parse),
+                     Collections.emptyMap(),
+                     values,
+                     (byte) 0);
+    }
+
+    @VisibleForTesting
+    public static <V> V build(ValueAccessor<V> accessor,
+                              List<AbstractType<?>> types,
+                              Map<AbstractType<?>, Byte> inverseMapping,
+                              List<V> values,
+                              byte lastEoc)
+    {
+        assert types.size() == values.size();
+
+        int numComponents = types.size();
+        // Compute the total number of bytes that we'll need to store the types and their payloads.
+        int totalLength = 0;
+        for (int i = 0; i < numComponents; ++i)
+        {
+            AbstractType<?> type = types.get(i);
+            Byte alias = inverseMapping.get(type);
+            int typeNameLength = alias == null ? type.toString().getBytes(StandardCharsets.UTF_8).length : 0;
+            // The type data will be stored by means of the type's fully qualified name, not by aliasing, so:
+            //   1. The type data header should be the fully qualified name length in bytes.
+            //   2. The length should be small enough so that it fits in 15 bits (2 bytes with the first bit zero).
+            assert typeNameLength <= 0x7FFF;
+            int valueLength = accessor.size(values.get(i));
+            // The value length should also expect its first bit to be 0, as the length should be stored as a signed
+            // 2-byte value (short).
+            assert valueLength <= 0x7FFF;
+            totalLength += 2 + typeNameLength + 2 + valueLength + 1;
+        }
+
+        V result = accessor.allocate(totalLength);
+        int offset = 0;
+        for (int i = 0; i < numComponents; ++i)
+        {
+            AbstractType<?> type = types.get(i);
+            Byte alias = inverseMapping.get(type);
+            if (alias == null)
+            {
+                // Write the type data (2-byte length header + the fully qualified type name in UTF-8).
+                byte[] typeNameBytes = type.toString().getBytes(StandardCharsets.UTF_8);
+                accessor.putShort(result,
+                                  offset,
+                                  (short) typeNameBytes.length); // this should work fine also if length >= 32768
+                offset += 2;
+                accessor.copyByteArrayTo(typeNameBytes, 0, result, offset, typeNameBytes.length);
+                offset += typeNameBytes.length;
+            }
+            else
+            {
+                accessor.putShort(result, offset, (short) (alias | 0x8000));
+                offset += 2;
+            }
+
+            // Write the type payload data (2-byte length header + the payload).
+            V value = values.get(i);
+            int bytesToCopy = accessor.size(value);
+            accessor.putShort(result, offset, (short) bytesToCopy);
+            offset += 2;
+            accessor.copyTo(value, 0, result, accessor, offset, bytesToCopy);
+            offset += bytesToCopy;
+
+            // Write the end-of-component byte.
+            accessor.putByte(result, offset, i != numComponents - 1 ? (byte) 0 : lastEoc);
+            offset += 1;
+        }
+        return result;
+    }
+
     protected ParsedComparator parseComparator(int i, String part)
     {
         return new DynamicParsedComparator(part);
diff --git a/src/java/org/apache/cassandra/db/marshal/EmptyType.java b/src/java/org/apache/cassandra/db/marshal/EmptyType.java
index 357b6e8..dcc57b7 100644
--- a/src/java/org/apache/cassandra/db/marshal/EmptyType.java
+++ b/src/java/org/apache/cassandra/db/marshal/EmptyType.java
@@ -33,6 +33,8 @@
 import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 import org.apache.cassandra.utils.NoSpamLogger;
 
 /**
@@ -68,6 +70,18 @@
 
     private EmptyType() {super(ComparisonType.CUSTOM);} // singleton
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        return null;
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        return accessor.empty();
+    }
+
     public <VL, VR> int compareCustom(VL left, ValueAccessor<VL> accessorL, VR right, ValueAccessor<VR> accessorR)
     {
         return 0;
diff --git a/src/java/org/apache/cassandra/db/marshal/FloatType.java b/src/java/org/apache/cassandra/db/marshal/FloatType.java
index 35abee0..2adb127 100644
--- a/src/java/org/apache/cassandra/db/marshal/FloatType.java
+++ b/src/java/org/apache/cassandra/db/marshal/FloatType.java
@@ -27,6 +27,9 @@
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 
 public class FloatType extends NumberType<Float>
@@ -51,6 +54,18 @@
         return compareComposed(left, accessorL, right, accessorR, this);
     }
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        return ByteSource.optionalSignedFixedLengthFloat(accessor, data);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        return ByteSourceInverse.getOptionalSignedFixedLengthFloat(accessor, comparableBytes, 4);
+    }
+
     public ByteBuffer fromString(String source) throws MarshalException
     {
       // Return an empty ByteBuffer for an empty string.
diff --git a/src/java/org/apache/cassandra/db/marshal/Int32Type.java b/src/java/org/apache/cassandra/db/marshal/Int32Type.java
index 98f4c83..6dee26e 100644
--- a/src/java/org/apache/cassandra/db/marshal/Int32Type.java
+++ b/src/java/org/apache/cassandra/db/marshal/Int32Type.java
@@ -28,6 +28,9 @@
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 public class Int32Type extends NumberType<Integer>
 {
@@ -55,6 +58,18 @@
         return ValueAccessor.compare(left, accessorL, right, accessorR);
     }
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        return ByteSource.optionalSignedFixedLengthNumber(accessor, data);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        return ByteSourceInverse.getOptionalSignedFixedLength(accessor, comparableBytes, 4);
+    }
+
     public ByteBuffer fromString(String source) throws MarshalException
     {
         // Return an empty ByteBuffer for an empty string.
diff --git a/src/java/org/apache/cassandra/db/marshal/IntegerType.java b/src/java/org/apache/cassandra/db/marshal/IntegerType.java
index 4c913d5..b52bda8 100644
--- a/src/java/org/apache/cassandra/db/marshal/IntegerType.java
+++ b/src/java/org/apache/cassandra/db/marshal/IntegerType.java
@@ -30,11 +30,23 @@
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 public final class IntegerType extends NumberType<BigInteger>
 {
     public static final IntegerType instance = new IntegerType();
 
+    // Constants or escaping values needed to encode/decode variable-length integers in our custom byte-ordered
+    // encoding scheme.
+    private static final int POSITIVE_VARINT_HEADER = 0x80;
+    private static final int NEGATIVE_VARINT_LENGTH_HEADER = 0x00;
+    private static final int POSITIVE_VARINT_LENGTH_HEADER = 0xFF;
+    private static final byte BIG_INTEGER_NEGATIVE_LEADING_ZERO = (byte) 0xFF;
+    private static final byte BIG_INTEGER_POSITIVE_LEADING_ZERO = (byte) 0x00;
+    public static final int FULL_FORM_THRESHOLD = 7;
+
     private static <V> int findMostSignificantByte(V value, ValueAccessor<V> accessor)
     {
         int len = accessor.size(value) - 1;
@@ -131,6 +143,301 @@
         return 0;
     }
 
+    /**
+     * Constructs a byte-comparable representation of the number.
+     *
+     * In the current format we represent it:
+     *    directly as varint, if the length is 6 or smaller (the encoding has non-00/FF first byte)
+     *    <signbyte><length as unsigned integer - 7><7 or more bytes>, otherwise
+     * where <signbyte> is 00 for negative numbers and FF for positive ones, and the length's bytes are inverted if
+     * the number is negative (so that longer length sorts smaller).
+     *
+     * Because we present the sign separately, we don't need to include 0x00 prefix for positive integers whose first
+     * byte is >= 0x80 or 0xFF prefix for negative integers whose first byte is < 0x80. Note that we do this before
+     * taking the length for the purposes of choosing between varint and full-form encoding.
+     *
+     * The representations are prefix-free, because the choice between varint and full-form encoding is determined by
+     * the first byte where varints are properly ordered between full-form negative and full-form positive, varint
+     * encoding is prefix-free, and full-form representations of different length always have length bytes that differ.
+     *
+     * Examples:
+     *    -1            as 7F
+     *    0             as 80
+     *    1             as 81
+     *    127           as C07F
+     *    255           as C0FF
+     *    2^32-1        as F8FFFFFFFF
+     *    2^32          as F900000000
+     *    2^56-1        as FEFFFFFFFFFFFFFF
+     *    2^56          as FF000100000000000000
+     *
+     * See {@link #asComparableBytesLegacy} for description of the legacy format.
+     */
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        final int limit = accessor.size(data);
+        if (limit == 0)
+            return null;
+
+        // skip any leading sign-only byte(s)
+        int p = 0;
+        final byte signbyte = accessor.getByte(data, p);
+        if (signbyte == BIG_INTEGER_NEGATIVE_LEADING_ZERO || signbyte == BIG_INTEGER_POSITIVE_LEADING_ZERO)
+        {
+            while (p + 1 < limit)
+            {
+                if (accessor.getByte(data, ++p) != signbyte)
+                    break;
+            }
+        }
+
+        if (version != ByteComparable.Version.LEGACY)
+            return (limit - p < FULL_FORM_THRESHOLD)
+                   ? encodeAsVarInt(accessor, data, limit)
+                   : asComparableBytesCurrent(accessor, data, p, limit, (signbyte >> 7) & 0xFF);
+        else
+            return asComparableBytesLegacy(accessor, data, p, limit, signbyte);
+    }
+
+    /**
+     * Encode the BigInteger stored in the given buffer as a variable-length signed integer.
+     * The length of the number is given in the limit argument, and must be <= 8.
+     */
+    private <V> ByteSource encodeAsVarInt(ValueAccessor<V> accessor, V data, int limit)
+    {
+        long v;
+        switch (limit)
+        {
+            case 1:
+                v = accessor.getByte(data, 0);
+                break;
+            case 2:
+                v = accessor.getShort(data, 0);
+                break;
+            case 3:
+                v = (accessor.getShort(data, 0) << 8) | (accessor.getByte(data, 2) & 0xFF);
+                break;
+            case 4:
+                v = accessor.getInt(data, 0);
+                break;
+            case 5:
+                v = ((long) accessor.getInt(data, 0) << 8) | (accessor.getByte(data, 4) & 0xFF);
+                break;
+            case 6:
+                v = ((long) accessor.getInt(data, 0) << 16) | (accessor.getShort(data, 4) & 0xFFFF);
+                break;
+            case 7:
+                v = ((long) accessor.getInt(data, 0) << 24) | ((accessor.getShort(data, 4) & 0xFFFF) << 8) | (accessor.getByte(data, 6) & 0xFF);
+                break;
+            case 8:
+                // This is not reachable within the encoding; added for completeness.
+                v = accessor.getLong(data, 0);
+                break;
+            default:
+                throw new AssertionError();
+        }
+        return ByteSource.variableLengthInteger(v);
+    }
+
+    /**
+     * Constructs a full-form byte-comparable representation of the number in the current format.
+     *
+     * This contains:
+     *    <signbyte><length as unsigned integer - 7><7 or more bytes>, otherwise
+     * where <signbyte> is 00 for negative numbers and FF for positive ones, and the length's bytes are inverted if
+     * the number is negative (so that longer length sorts smaller).
+     *
+     * Because we present the sign separately, we don't need to include 0x00 prefix for positive integers whose first
+     * byte is >= 0x80 or 0xFF prefix for negative integers whose first byte is < 0x80.
+     *
+     * The representations are prefix-free, because representations of different length always have length bytes that
+     * differ.
+     */
+    private <V> ByteSource asComparableBytesCurrent(ValueAccessor<V> accessor, V data, int startpos, int limit, int signbyte)
+    {
+        // start with sign as a byte, then variable-length-encoded length, then bytes (stripped leading sign)
+        return new ByteSource()
+        {
+            int pos = -2;
+            ByteSource lengthEncoding = new VariableLengthUnsignedInteger(limit - startpos - FULL_FORM_THRESHOLD);
+
+            @Override
+            public int next()
+            {
+                if (pos == -2)
+                {
+                    ++pos;
+                    return signbyte ^ 0xFF; // 00 for negative/FF for positive (01-FE for direct varint encoding)
+                }
+                else if (pos == -1)
+                {
+                    int nextByte = lengthEncoding.next();
+                    if (nextByte != END_OF_STREAM)
+                        return nextByte ^ signbyte;
+                    pos = startpos;
+                }
+
+                if (pos == limit)
+                    return END_OF_STREAM;
+
+                return accessor.getByte(data, pos++) & 0xFF;
+            }
+        };
+    }
+
+    /**
+     * Constructs a byte-comparable representation of the number in the legacy format.
+     * We represent it as
+     *    <zero or more length_bytes where length = 128> <length_byte> <first_significant_byte> <zero or more bytes>
+     * where a length_byte is:
+     *    - 0x80 + (length - 1) for positive numbers (so that longer length sorts bigger)
+     *    - 0x7F - (length - 1) for negative numbers (so that longer length sorts smaller)
+     *
+     * Because we include the sign in the length byte:
+     * - unlike fixed-length ints, we don't need to sign-invert the first significant byte,
+     * - unlike BigInteger, we don't need to include 0x00 prefix for positive integers whose first byte is >= 0x80
+     *   or 0xFF prefix for negative integers whose first byte is < 0x80.
+     *
+     * The representations are prefix-free, because representations of different length always have length bytes that
+     * differ.
+     *
+     * Examples:
+     *    0             as 8000
+     *    1             as 8001
+     *    127           as 807F
+     *    255           as 80FF
+     *    2^31-1        as 837FFFFFFF
+     *    2^31          as 8380000000
+     *    2^32          as 840100000000
+     */
+    private <V> ByteSource asComparableBytesLegacy(ValueAccessor<V> accessor, V data, int startpos, int limit, int signbyte)
+    {
+        return new ByteSource()
+        {
+            int pos = startpos;
+            int sizeToReport = limit - startpos;
+            boolean sizeReported = false;
+
+            public int next()
+            {
+                if (!sizeReported)
+                {
+                    if (sizeToReport >= 128)
+                    {
+                        sizeToReport -= 128;
+                        return signbyte >= 0
+                               ? POSITIVE_VARINT_LENGTH_HEADER
+                               : NEGATIVE_VARINT_LENGTH_HEADER;
+                    }
+                    else
+                    {
+                        sizeReported = true;
+                        return signbyte >= 0
+                               ? POSITIVE_VARINT_HEADER + (sizeToReport - 1)
+                               : POSITIVE_VARINT_HEADER - sizeToReport;
+                    }
+                }
+
+                if (pos == limit)
+                    return END_OF_STREAM;
+
+                return accessor.getByte(data, pos++) & 0xFF;
+            }
+        };
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        assert version != ByteComparable.Version.LEGACY;
+        if (comparableBytes == null)
+            return accessor.empty();
+
+        // Consume the first byte to determine whether the encoded number is positive and
+        // start iterating through the length header bytes and collecting the number of value bytes.
+        int sign = comparableBytes.peek() ^ 0xFF;   // FF if negative, 00 if positive
+        if (sign != 0xFF && sign != 0x00)
+            return extractVarIntBytes(accessor, ByteSourceInverse.getVariableLengthInteger(comparableBytes));
+
+        // consume the sign byte
+        comparableBytes.next();
+
+        // Read the length (inverted if the number is negative)
+        int valueBytes = Math.toIntExact(ByteSourceInverse.getVariableLengthUnsignedIntegerXoring(comparableBytes, sign) + FULL_FORM_THRESHOLD);
+        // Get the bytes.
+        return extractBytes(accessor, comparableBytes, sign, valueBytes);
+    }
+
+    private <V> V extractVarIntBytes(ValueAccessor<V> accessor, long value)
+    {
+        int length = (64 - Long.numberOfLeadingZeros(value ^ (value >> 63)) + 8) / 8;   // number of bytes needed: 7 bits -> one byte, 8 bits -> 2 bytes
+        V buf = accessor.allocate(length);
+        switch (length)
+        {
+            case 1:
+                accessor.putByte(buf, 0, (byte) value);
+                break;
+            case 2:
+                accessor.putShort(buf, 0, (short) value);
+                break;
+            case 3:
+                accessor.putShort(buf, 0, (short) (value >> 8));
+                accessor.putByte(buf, 2, (byte) value);
+                break;
+            case 4:
+                accessor.putInt(buf, 0, (int) value);
+                break;
+            case 5:
+                accessor.putInt(buf, 0, (int) (value >> 8));
+                accessor.putByte(buf, 4, (byte) value);
+                break;
+            case 6:
+                accessor.putInt(buf, 0, (int) (value >> 16));
+                accessor.putShort(buf, 4, (short) value);
+                break;
+            case 7:
+                accessor.putInt(buf, 0, (int) (value >> 24));
+                accessor.putShort(buf, 4, (short) (value >> 8));
+                accessor.putByte(buf, 6, (byte) value);
+                break;
+            case 8:
+                // This is not reachable within the encoding; added for completeness.
+                accessor.putLong(buf, 0, value);
+                break;
+            default:
+                throw new AssertionError();
+        }
+        return buf;
+    }
+
+    private <V> V extractBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, int sign, int valueBytes)
+    {
+        int writtenBytes = 0;
+        V buf;
+        // Add "leading zero" if needed (i.e. in case the leading byte of a positive number corresponds to a negative
+        // value, or in case the leading byte of a negative number corresponds to a non-negative value).
+        // Size the array containing all the value bytes accordingly.
+        int curr = comparableBytes.next();
+        if ((curr & 0x80) != (sign & 0x80))
+        {
+            ++valueBytes;
+            buf = accessor.allocate(valueBytes);
+            accessor.putByte(buf, writtenBytes++, (byte) sign);
+        }
+        else
+            buf = accessor.allocate(valueBytes);
+        // Don't forget to add the first consumed value byte after determining whether leading zero should be added
+        // and sizing the value bytes array.
+        accessor.putByte(buf, writtenBytes++, (byte) curr);
+
+        // Consume exactly the number of expected value bytes.
+        while (writtenBytes < valueBytes)
+            accessor.putByte(buf, writtenBytes++, (byte) comparableBytes.next());
+
+        return buf;
+    }
+
     public ByteBuffer fromString(String source) throws MarshalException
     {
         // Return an empty ByteBuffer for an empty string.
diff --git a/src/java/org/apache/cassandra/db/marshal/LexicalUUIDType.java b/src/java/org/apache/cassandra/db/marshal/LexicalUUIDType.java
index 6dd4161..81ec9d9 100644
--- a/src/java/org/apache/cassandra/db/marshal/LexicalUUIDType.java
+++ b/src/java/org/apache/cassandra/db/marshal/LexicalUUIDType.java
@@ -26,6 +26,9 @@
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.serializers.UUIDSerializer;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 public class LexicalUUIDType extends AbstractType<UUID>
 {
@@ -48,6 +51,46 @@
         return accessorL.toUUID(left).compareTo(accessorR.toUUID(right));
     }
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        if (data == null || accessor.isEmpty(data))
+            return null;
+
+        // fixed-length (hence prefix-free) representation, but
+        // we have to sign-flip the highest bytes of the two longs
+        return new ByteSource()
+        {
+            int bufpos = 0;
+
+            public int next()
+            {
+                if (bufpos >= accessor.size(data))
+                    return END_OF_STREAM;
+                int v = accessor.getByte(data, bufpos) & 0xFF;
+                if (bufpos == 0 || bufpos == 8)
+                    v ^= 0x80;
+                ++bufpos;
+                return v;
+            }
+        };
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        // Optional-style encoding of empty values as null sources
+        if (comparableBytes == null)
+            return accessor.empty();
+
+        long hiBits = ByteSourceInverse.getSignedLong(comparableBytes);
+        long loBits = ByteSourceInverse.getSignedLong(comparableBytes);
+
+        // Lexical UUIDs are stored as just two signed longs. The decoding of these longs flips their sign bit back, so
+        // they can directly be used for constructing the original UUID.
+        return UUIDType.makeUuidBytes(accessor, hiBits, loBits);
+    }
+
     public ByteBuffer fromString(String source) throws MarshalException
     {
         // Return an empty ByteBuffer for an empty string.
diff --git a/src/java/org/apache/cassandra/db/marshal/ListType.java b/src/java/org/apache/cassandra/db/marshal/ListType.java
index 281f7ee..f795def 100644
--- a/src/java/org/apache/cassandra/db/marshal/ListType.java
+++ b/src/java/org/apache/cassandra/db/marshal/ListType.java
@@ -18,21 +18,24 @@
 package org.apache.cassandra.db.marshal;
 
 import java.nio.ByteBuffer;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
 import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.cassandra.cql3.Json;
 import org.apache.cassandra.cql3.Lists;
 import org.apache.cassandra.cql3.Term;
-import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.db.rows.Cell;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.SyntaxException;
-import org.apache.cassandra.serializers.CollectionSerializer;
 import org.apache.cassandra.serializers.ListSerializer;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.utils.TimeUUID;
 import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable.Version;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 
 public class ListType<T> extends CollectionType<List<T>>
 {
@@ -171,29 +174,16 @@
         return compareListOrSet(elements, left, accessorL, right, accessorR);
     }
 
-    static <VL, VR> int compareListOrSet(AbstractType<?> elementsComparator, VL left, ValueAccessor<VL> accessorL, VR right, ValueAccessor<VR> accessorR)
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, Version version)
     {
-        // Note that this is only used if the collection is frozen
-        if (accessorL.isEmpty(left) || accessorR.isEmpty(right))
-            return Boolean.compare(accessorR.isEmpty(right), accessorL.isEmpty(left));
+        return asComparableBytesListOrSet(getElementsType(), accessor, data, version);
+    }
 
-        int sizeL = CollectionSerializer.readCollectionSize(left, accessorL, ProtocolVersion.V3);
-        int offsetL = CollectionSerializer.sizeOfCollectionSize(sizeL, ProtocolVersion.V3);
-        int sizeR = CollectionSerializer.readCollectionSize(right, accessorR, ProtocolVersion.V3);
-        int offsetR = TypeSizes.INT_SIZE;
-
-        for (int i = 0; i < Math.min(sizeL, sizeR); i++)
-        {
-            VL v1 = CollectionSerializer.readValue(left, accessorL, offsetL, ProtocolVersion.V3);
-            offsetL += CollectionSerializer.sizeOfValue(v1, accessorL, ProtocolVersion.V3);
-            VR v2 = CollectionSerializer.readValue(right, accessorR, offsetR, ProtocolVersion.V3);
-            offsetR += CollectionSerializer.sizeOfValue(v2, accessorR, ProtocolVersion.V3);
-            int cmp = elementsComparator.compare(v1, accessorL, v2, accessorR);
-            if (cmp != 0)
-                return cmp;
-        }
-
-        return sizeL == sizeR ? 0 : (sizeL < sizeR ? -1 : 1);
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, Version version)
+    {
+        return fromComparableBytesListOrSet(accessor, comparableBytes, version, getElementsType());
     }
 
     @Override
@@ -242,23 +232,6 @@
         return new Lists.DelayedValue(terms);
     }
 
-    public static String setOrListToJsonString(ByteBuffer buffer, AbstractType elementsType, ProtocolVersion protocolVersion)
-    {
-        ByteBuffer value = buffer.duplicate();
-        StringBuilder sb = new StringBuilder("[");
-        int size = CollectionSerializer.readCollectionSize(value, protocolVersion);
-        int offset = CollectionSerializer.sizeOfCollectionSize(size, protocolVersion);
-        for (int i = 0; i < size; i++)
-        {
-            if (i > 0)
-                sb.append(", ");
-            ByteBuffer element = CollectionSerializer.readValue(value, ByteBufferAccessor.instance, offset, protocolVersion);
-            offset += CollectionSerializer.sizeOfValue(element, ByteBufferAccessor.instance, protocolVersion);
-            sb.append(elementsType.toJSONString(element, protocolVersion));
-        }
-        return sb.append("]").toString();
-    }
-
     public ByteBuffer getSliceFromSerialized(ByteBuffer collection, ByteBuffer from, ByteBuffer to)
     {
         // We don't support slicing on lists so we don't need that function
diff --git a/src/java/org/apache/cassandra/db/marshal/LongType.java b/src/java/org/apache/cassandra/db/marshal/LongType.java
index ad539f7..6bf5e9e 100644
--- a/src/java/org/apache/cassandra/db/marshal/LongType.java
+++ b/src/java/org/apache/cassandra/db/marshal/LongType.java
@@ -28,6 +28,9 @@
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 public class LongType extends NumberType<Long>
 {
@@ -57,6 +60,28 @@
         return ValueAccessor.compare(left, accessorL, right, accessorR);
     }
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        if (accessor.isEmpty(data))
+            return null;
+        if (version == ByteComparable.Version.LEGACY)
+            return ByteSource.signedFixedLengthNumber(accessor, data);
+        else
+            return ByteSource.variableLengthInteger(accessor.getLong(data, 0));
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        if (comparableBytes == null)
+            return accessor.empty();
+        if (version == ByteComparable.Version.LEGACY)
+            return ByteSourceInverse.getSignedFixedLength(accessor, comparableBytes, 8);
+        else
+            return accessor.valueOf(ByteSourceInverse.getVariableLengthInteger(comparableBytes));
+    }
+
     public ByteBuffer fromString(String source) throws MarshalException
     {
         // Return an empty ByteBuffer for an empty string.
diff --git a/src/java/org/apache/cassandra/db/marshal/MapType.java b/src/java/org/apache/cassandra/db/marshal/MapType.java
index 9473e29..be74ff1 100644
--- a/src/java/org/apache/cassandra/db/marshal/MapType.java
+++ b/src/java/org/apache/cassandra/db/marshal/MapType.java
@@ -28,9 +28,13 @@
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.serializers.CollectionSerializer;
-import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.serializers.MapSerializer;
+import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable.Version;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 import org.apache.cassandra.utils.Pair;
 
 public class MapType<K, V> extends CollectionType<Map<K, V>>
@@ -215,7 +219,71 @@
                 return cmp;
         }
 
-        return sizeL == sizeR ? 0 : (sizeL < sizeR ? -1 : 1);
+        return Integer.compare(sizeL, sizeR);
+    }
+
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, Version version)
+    {
+        return asComparableBytesMap(getKeysType(), getValuesType(), accessor, data, version);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, Version version)
+    {
+        return fromComparableBytesMap(accessor, comparableBytes, version, getKeysType(), getValuesType());
+    }
+
+    static <V> ByteSource asComparableBytesMap(AbstractType<?> keysComparator,
+                                               AbstractType<?> valuesComparator,
+                                               ValueAccessor<V> accessor,
+                                               V data,
+                                               Version version)
+    {
+        if (accessor.isEmpty(data))
+            return null;
+
+        ProtocolVersion protocolVersion = ProtocolVersion.V3;
+        int offset = 0;
+        int size = CollectionSerializer.readCollectionSize(data, accessor, protocolVersion);
+        offset += CollectionSerializer.sizeOfCollectionSize(size, protocolVersion);
+        ByteSource[] srcs = new ByteSource[size * 2];
+        for (int i = 0; i < size; ++i)
+        {
+            V k = CollectionSerializer.readValue(data, accessor, offset, protocolVersion);
+            offset += CollectionSerializer.sizeOfValue(k, accessor, protocolVersion);
+            srcs[i * 2 + 0] = keysComparator.asComparableBytes(accessor, k, version);
+            V v = CollectionSerializer.readValue(data, accessor, offset, protocolVersion);
+            offset += CollectionSerializer.sizeOfValue(v, accessor, protocolVersion);
+            srcs[i * 2 + 1] = valuesComparator.asComparableBytes(accessor, v, version);
+        }
+        return ByteSource.withTerminatorMaybeLegacy(version, 0x00, srcs);
+    }
+
+    static <V> V fromComparableBytesMap(ValueAccessor<V> accessor,
+                                        ByteSource.Peekable comparableBytes,
+                                        Version version,
+                                        AbstractType<?> keysComparator,
+                                        AbstractType<?> valuesComparator)
+    {
+        if (comparableBytes == null)
+            return accessor.empty();
+        assert version != ByteComparable.Version.LEGACY; // legacy translation is not reversible
+
+        List<V> buffers = new ArrayList<>();
+        int separator = comparableBytes.next();
+        while (separator != ByteSource.TERMINATOR)
+        {
+            buffers.add(ByteSourceInverse.nextComponentNull(separator)
+                        ? null
+                        : keysComparator.fromComparableBytes(accessor, comparableBytes, version));
+            separator = comparableBytes.next();
+            buffers.add(ByteSourceInverse.nextComponentNull(separator)
+                        ? null
+                        : valuesComparator.fromComparableBytes(accessor, comparableBytes, version));
+            separator = comparableBytes.next();
+        }
+        return CollectionSerializer.pack(buffers, accessor,buffers.size() / 2, ProtocolVersion.V3);
     }
 
     @Override
@@ -286,7 +354,7 @@
     {
         ByteBuffer value = buffer.duplicate();
         StringBuilder sb = new StringBuilder("{");
-        int size = CollectionSerializer.readCollectionSize(value, protocolVersion);
+        int size = CollectionSerializer.readCollectionSize(value, ByteBufferAccessor.instance, protocolVersion);
         int offset = CollectionSerializer.sizeOfCollectionSize(size, protocolVersion);
         for (int i = 0; i < size; i++)
         {
diff --git a/src/java/org/apache/cassandra/db/marshal/PartitionerDefinedOrder.java b/src/java/org/apache/cassandra/db/marshal/PartitionerDefinedOrder.java
index 89241b4..02c28e7 100644
--- a/src/java/org/apache/cassandra/db/marshal/PartitionerDefinedOrder.java
+++ b/src/java/org/apache/cassandra/db/marshal/PartitionerDefinedOrder.java
@@ -22,11 +22,15 @@
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.Term;
+import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.PartitionPosition;
 import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable.Version;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 import org.apache.cassandra.utils.FBUtilities;
 
 /** for sorting columns representing row keys in the row ordering as determined by a partitioner.
@@ -94,6 +98,33 @@
     }
 
     @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, Version version)
+    {
+        // Partitioners work with ByteBuffers only.
+        ByteBuffer buf = ByteBufferAccessor.instance.convert(data, accessor);
+        if (version != Version.LEGACY)
+        {
+            // For ByteComparable.Version.OSS42 and above we encode an empty key with a null byte source. This
+            // way we avoid the need to special-handle a sentinel value when we decode the byte source for such a key
+            // (e.g. for ByteComparable.Version.Legacy we use the minimum key bound of the partitioner's minimum token as
+            // a sentinel value, and that results in the need to go twice through the byte source that is being
+            // decoded).
+            return buf.hasRemaining() ? partitioner.decorateKey(buf).asComparableBytes(version) : null;
+        }
+        return PartitionPosition.ForKey.get(buf, partitioner).asComparableBytes(version);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        assert version != Version.LEGACY;
+        if (comparableBytes == null)
+            return accessor.empty();
+        byte[] keyBytes = DecoratedKey.keyFromByteSource(comparableBytes, version, partitioner);
+        return accessor.valueOf(keyBytes);
+    }
+
+    @Override
     public void validate(ByteBuffer bytes) throws MarshalException
     {
         throw new IllegalStateException("You shouldn't be validating this.");
diff --git a/src/java/org/apache/cassandra/db/marshal/ReversedType.java b/src/java/org/apache/cassandra/db/marshal/ReversedType.java
index ceea84a..eac800a 100644
--- a/src/java/org/apache/cassandra/db/marshal/ReversedType.java
+++ b/src/java/org/apache/cassandra/db/marshal/ReversedType.java
@@ -28,6 +28,8 @@
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 
 public class ReversedType<T> extends AbstractType<T>
 {
@@ -63,6 +65,32 @@
         return baseType.isEmptyValueMeaningless();
     }
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        ByteSource src = baseType.asComparableBytes(accessor, data, version);
+        if (src == null)    // Note: this will only compare correctly if used within a sequence
+            return null;
+        // Invert all bytes.
+        // The comparison requirements for the original type ensure that this encoding will compare correctly with
+        // respect to the reversed comparator function (and, specifically, prefixes of escaped byte-ordered types will
+        // compare as larger). Additionally, the weak prefix-freedom requirement ensures this encoding will also be
+        // weakly prefix-free.
+        return () ->
+        {
+            int v = src.next();
+            if (v == ByteSource.END_OF_STREAM)
+                return v;
+            return v ^ 0xFF;
+        };
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        return baseType.fromComparableBytes(accessor, ReversedPeekableByteSource.of(comparableBytes), version);
+    }
+
     public <VL, VR> int compareCustom(VL left, ValueAccessor<VL> accessorL, VR right, ValueAccessor<VR> accessorR)
     {
         return baseType.compare(right, accessorR, left, accessorL);
@@ -156,4 +184,38 @@
     {
         return getClass().getName() + "(" + baseType + ")";
     }
+
+    private static final class ReversedPeekableByteSource extends ByteSource.Peekable
+    {
+        private final ByteSource.Peekable original;
+
+        static ByteSource.Peekable of(ByteSource.Peekable original)
+        {
+            return original != null ? new ReversedPeekableByteSource(original) : null;
+        }
+
+        private ReversedPeekableByteSource(ByteSource.Peekable original)
+        {
+            super(null);
+            this.original = original;
+        }
+
+        @Override
+        public int next()
+        {
+            int v = original.next();
+            if (v != END_OF_STREAM)
+                return v ^ 0xFF;
+            return END_OF_STREAM;
+        }
+
+        @Override
+        public int peek()
+        {
+            int v = original.peek();
+            if (v != END_OF_STREAM)
+                return v ^ 0xFF;
+            return END_OF_STREAM;
+        }
+    }
 }
diff --git a/src/java/org/apache/cassandra/db/marshal/SetType.java b/src/java/org/apache/cassandra/db/marshal/SetType.java
index e5bdada..67699ac 100644
--- a/src/java/org/apache/cassandra/db/marshal/SetType.java
+++ b/src/java/org/apache/cassandra/db/marshal/SetType.java
@@ -30,6 +30,8 @@
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.serializers.SetSerializer;
 import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 
 public class SetType<T> extends CollectionType<Set<T>>
 {
@@ -154,7 +156,19 @@
 
     public <VL, VR> int compareCustom(VL left, ValueAccessor<VL> accessorL, VR right, ValueAccessor<VR> accessorR)
     {
-        return ListType.compareListOrSet(elements, left, accessorL, right, accessorR);
+        return compareListOrSet(elements, left, accessorL, right, accessorR);
+    }
+
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        return asComparableBytesListOrSet(getElementsType(), accessor, data, version);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        return fromComparableBytesListOrSet(accessor, comparableBytes, version, getElementsType());
     }
 
     public SetSerializer<T> getSerializer()
@@ -210,6 +224,6 @@
     @Override
     public String toJSONString(ByteBuffer buffer, ProtocolVersion protocolVersion)
     {
-        return ListType.setOrListToJsonString(buffer, elements, protocolVersion);
+        return setOrListToJsonString(buffer, elements, protocolVersion);
     }
 }
diff --git a/src/java/org/apache/cassandra/db/marshal/ShortType.java b/src/java/org/apache/cassandra/db/marshal/ShortType.java
index 03dcf5d..013fa95 100644
--- a/src/java/org/apache/cassandra/db/marshal/ShortType.java
+++ b/src/java/org/apache/cassandra/db/marshal/ShortType.java
@@ -28,6 +28,9 @@
 import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 public class ShortType extends NumberType<Short>
 {
@@ -46,6 +49,19 @@
         return ValueAccessor.compare(left, accessorL, right, accessorR);
     }
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        // This type does not allow non-present values, but we do just to avoid future complexity.
+        return ByteSource.optionalSignedFixedLengthNumber(accessor, data);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        return ByteSourceInverse.getOptionalSignedFixedLength(accessor, comparableBytes, 2);
+    }
+
     public ByteBuffer fromString(String source) throws MarshalException
     {
         // Return an empty ByteBuffer for an empty string.
diff --git a/src/java/org/apache/cassandra/db/marshal/SimpleDateType.java b/src/java/org/apache/cassandra/db/marshal/SimpleDateType.java
index 8f1d677..a0de2c2 100644
--- a/src/java/org/apache/cassandra/db/marshal/SimpleDateType.java
+++ b/src/java/org/apache/cassandra/db/marshal/SimpleDateType.java
@@ -28,6 +28,10 @@
 import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable.Version;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 import static org.apache.cassandra.cql3.statements.RequestValidations.invalidRequest;
 
@@ -37,6 +41,20 @@
 
     SimpleDateType() {super(ComparisonType.BYTE_ORDER);} // singleton
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, Version version)
+    {
+        // While BYTE_ORDER would still work for this type, making use of the fixed length is more efficient.
+        // This type does not allow non-present values, but we do just to avoid future complexity.
+        return ByteSource.optionalFixedLength(accessor, data);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        return ByteSourceInverse.getOptionalFixedLength(accessor, comparableBytes, 4);
+    }
+
     public ByteBuffer fromString(String source) throws MarshalException
     {
         return ByteBufferUtil.bytes(SimpleDateSerializer.dateStringToDays(source));
diff --git a/src/java/org/apache/cassandra/db/marshal/TimeType.java b/src/java/org/apache/cassandra/db/marshal/TimeType.java
index fd8fac4..f029b8b 100644
--- a/src/java/org/apache/cassandra/db/marshal/TimeType.java
+++ b/src/java/org/apache/cassandra/db/marshal/TimeType.java
@@ -28,6 +28,10 @@
 import org.apache.cassandra.serializers.TypeSerializer;
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.transport.ProtocolVersion;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable.Version;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 /**
  * Nanosecond resolution time values
@@ -43,6 +47,20 @@
     }
 
     @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, Version version)
+    {
+        // While BYTE_ORDER would still work for this type, making use of the fixed length is more efficient.
+        // This type does not allow non-present values, but we do just to avoid future complexity.
+        return ByteSource.optionalFixedLength(accessor, data);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        return ByteSourceInverse.getOptionalFixedLength(accessor, comparableBytes, 8);
+    }
+
+    @Override
     public boolean isValueCompatibleWithInternal(AbstractType<?> otherType)
     {
         return this == otherType || otherType == LongType.instance;
diff --git a/src/java/org/apache/cassandra/db/marshal/TimestampType.java b/src/java/org/apache/cassandra/db/marshal/TimestampType.java
index ccf1da3..5bca7b1 100644
--- a/src/java/org/apache/cassandra/db/marshal/TimestampType.java
+++ b/src/java/org/apache/cassandra/db/marshal/TimestampType.java
@@ -32,6 +32,9 @@
 import org.apache.cassandra.serializers.TimestampSerializer;
 import org.apache.cassandra.transport.ProtocolVersion;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 import static org.apache.cassandra.cql3.statements.RequestValidations.invalidRequest;
 
@@ -60,6 +63,18 @@
         return LongType.compareLongs(left, accessorL, right, accessorR);
     }
 
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        return ByteSource.optionalSignedFixedLengthNumber(accessor, data);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        return ByteSourceInverse.getOptionalSignedFixedLength(accessor, comparableBytes, 8);
+    }
+
     public ByteBuffer fromString(String source) throws MarshalException
     {
       // Return an empty ByteBuffer for an empty string.
diff --git a/src/java/org/apache/cassandra/db/marshal/TupleType.java b/src/java/org/apache/cassandra/db/marshal/TupleType.java
index cc08487..c203770 100644
--- a/src/java/org/apache/cassandra/db/marshal/TupleType.java
+++ b/src/java/org/apache/cassandra/db/marshal/TupleType.java
@@ -30,11 +30,12 @@
 import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.exceptions.SyntaxException;
 import org.apache.cassandra.serializers.*;
 import org.apache.cassandra.transport.ProtocolVersion;
-import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 
 import static com.google.common.collect.Iterables.any;
 import static com.google.common.collect.Iterables.transform;
@@ -200,47 +201,136 @@
         return true;
     }
 
-    /**
-     * Split a tuple value into its component values.
-     */
-    public ByteBuffer[] split(ByteBuffer value)
+    @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
     {
-        return split(value, size(), this);
+        switch (version)
+        {
+            case LEGACY:
+                return asComparableBytesLegacy(accessor, data);
+            case OSS42:
+                return asComparableBytesNew(accessor, data, version);
+            default:
+                throw new AssertionError();
+        }
+    }
+
+    private <V> ByteSource asComparableBytesLegacy(ValueAccessor<V> accessor, V data)
+    {
+        if (accessor.isEmpty(data))
+            return null;
+
+        V[] bufs = split(accessor, data);  // this may be shorter than types.size -- other srcs remain null in that case
+        ByteSource[] srcs = new ByteSource[types.size()];
+        for (int i = 0; i < bufs.length; ++i)
+            srcs[i] = bufs[i] != null ? types.get(i).asComparableBytes(accessor, bufs[i], ByteComparable.Version.LEGACY) : null;
+
+        // We always have a fixed number of sources, with the trailing ones possibly being nulls.
+        // This can only result in a prefix if the last type in the tuple allows prefixes. Since that type is required
+        // to be weakly prefix-free, so is the tuple.
+        return ByteSource.withTerminatorLegacy(ByteSource.END_OF_STREAM, srcs);
+    }
+
+    private <V> ByteSource asComparableBytesNew(ValueAccessor<V> accessor, V data, ByteComparable.Version version)
+    {
+        if (accessor.isEmpty(data))
+            return null;
+
+        V[] bufs = split(accessor, data);
+        int lengthWithoutTrailingNulls = 0;
+        for (int i = 0; i < bufs.length; ++i)
+            if (bufs[i] != null)
+                lengthWithoutTrailingNulls = i + 1;
+
+        ByteSource[] srcs = new ByteSource[lengthWithoutTrailingNulls];
+        for (int i = 0; i < lengthWithoutTrailingNulls; ++i)
+            srcs[i] = bufs[i] != null ? types.get(i).asComparableBytes(accessor, bufs[i], version) : null;
+
+        // Because we stop early when there are trailing nulls, there needs to be an explicit terminator to make the
+        // type prefix-free.
+        return ByteSource.withTerminator(ByteSource.TERMINATOR, srcs);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        assert version == ByteComparable.Version.OSS42; // Reverse translation is not supported for the legacy version.
+        if (comparableBytes == null)
+            return accessor.empty();
+
+        V[] componentBuffers = accessor.createArray(types.size());
+        for (int i = 0; i < types.size(); ++i)
+        {
+            if (comparableBytes.peek() == ByteSource.TERMINATOR)
+                break;  // the rest of the fields remain null
+            AbstractType<?> componentType = types.get(i);
+            ByteSource.Peekable component = ByteSourceInverse.nextComponentSource(comparableBytes);
+            if (component != null)
+                componentBuffers[i] = componentType.fromComparableBytes(accessor, component, version);
+            else
+                componentBuffers[i] = null;
+        }
+        // consume terminator
+        int terminator = comparableBytes.next();
+        assert terminator == ByteSource.TERMINATOR : String.format("Expected TERMINATOR (0x%2x) after %d components",
+                                                                   ByteSource.TERMINATOR,
+                                                                   types.size());
+        return buildValue(accessor, componentBuffers);
     }
 
     /**
      * Split a tuple value into its component values.
      */
-    public static ByteBuffer[] split(ByteBuffer value, int numberOfElements, TupleType type)
+    public <V> V[] split(ValueAccessor<V> accessor, V value)
     {
-        ByteBuffer[] components = new ByteBuffer[numberOfElements];
-        ByteBuffer input = value.duplicate();
+        return split(accessor, value, size(), this);
+    }
+
+    /**
+     * Split a tuple value into its component values.
+     */
+    public static <V> V[] split(ValueAccessor<V> accessor, V value, int numberOfElements, TupleType type)
+    {
+        V[] components = accessor.createArray(numberOfElements);
+        int length = accessor.size(value);
+        int position = 0;
         for (int i = 0; i < numberOfElements; i++)
         {
-            if (!input.hasRemaining())
+            if (position == length)
                 return Arrays.copyOfRange(components, 0, i);
 
-            int size = input.getInt();
-
-            if (input.remaining() < size)
+            if (position + 4 > length)
                 throw new MarshalException(String.format("Not enough bytes to read %dth component", i));
 
+            int size = accessor.getInt(value, position);
+            position += 4;
+
             // size < 0 means null value
-            components[i] = size < 0 ? null : ByteBufferUtil.readBytes(input, size);
+            if (size >= 0)
+            {
+                if (position + size > length)
+                    throw new MarshalException(String.format("Not enough bytes to read %dth component", i));
+
+                components[i] = accessor.slice(value, position, size);
+                position += size;
+            }
+            else
+                components[i] = null;
         }
 
         // error out if we got more values in the tuple/UDT than we expected
-        if (input.hasRemaining())
+        if (position < length)
         {
-            throw new InvalidRequestException(String.format(
-            "Expected %s %s for %s column, but got more",
-            numberOfElements, numberOfElements == 1 ? "value" : "values", type.asCQL3Type()));
+            throw new MarshalException(String.format("Expected %s %s for %s column, but got more",
+                                                     numberOfElements, numberOfElements == 1 ? "value" : "values",
+                                                     type.asCQL3Type()));
         }
 
         return components;
     }
 
-    public static <V> V buildValue(ValueAccessor<V> accessor, V[] components)
+    @SafeVarargs
+    public static <V> V buildValue(ValueAccessor<V> accessor, V... components)
     {
         int totalLength = 0;
         for (V component : components)
@@ -264,7 +354,7 @@
         return result;
     }
 
-    public static ByteBuffer buildValue(ByteBuffer[] components)
+    public static ByteBuffer buildValue(ByteBuffer... components)
     {
         return buildValue(ByteBufferAccessor.instance, components);
     }
diff --git a/src/java/org/apache/cassandra/db/marshal/UUIDType.java b/src/java/org/apache/cassandra/db/marshal/UUIDType.java
index 55ce59d..9ec8063 100644
--- a/src/java/org/apache/cassandra/db/marshal/UUIDType.java
+++ b/src/java/org/apache/cassandra/db/marshal/UUIDType.java
@@ -30,6 +30,9 @@
 import org.apache.cassandra.serializers.MarshalException;
 import org.apache.cassandra.serializers.UUIDSerializer;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 import org.apache.cassandra.utils.UUIDGen;
 
 /**
@@ -96,10 +99,72 @@
                 return c;
         }
 
+        // Amusingly (or not so much), although UUIDType freely takes time UUIDs (UUIDs with version 1), it compares
+        // them differently than TimeUUIDType. This is evident in the least significant bytes comparison (the code
+        // below for UUIDType), where UUIDType treats them as unsigned bytes, while TimeUUIDType compares the bytes
+        // signed. See CASSANDRA-8730 for details around this discrepancy.
         return UnsignedLongs.compare(accessorL.getLong(left, 8), accessorR.getLong(right, 8));
     }
 
     @Override
+    public <V> ByteSource asComparableBytes(ValueAccessor<V> accessor, V data, ByteComparable.Version v)
+    {
+        if (accessor.isEmpty(data))
+            return null;
+
+        long msb = accessor.getLong(data, 0);
+        long version = ((msb >>> 12) & 0xf);
+        ByteBuffer swizzled = ByteBuffer.allocate(16);
+
+        if (version == 1)
+            swizzled.putLong(0, TimeUUIDType.reorderTimestampBytes(msb));
+        else
+            swizzled.putLong(0, (version << 60) | ((msb >>> 4) & 0x0FFFFFFFFFFFF000L) | (msb & 0xFFFL));
+
+        swizzled.putLong(8, accessor.getLong(data, 8));
+
+        // fixed-length thus prefix-free
+        return ByteSource.fixedLength(swizzled);
+    }
+
+    @Override
+    public <V> V fromComparableBytes(ValueAccessor<V> accessor, ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+    {
+        // Optional-style encoding of empty values as null sources
+        if (comparableBytes == null)
+            return accessor.empty();
+
+        // The UUID bits are stored as an unsigned fixed-length 128-bit integer.
+        long hiBits = ByteSourceInverse.getUnsignedFixedLengthAsLong(comparableBytes, 8);
+        long loBits = ByteSourceInverse.getUnsignedFixedLengthAsLong(comparableBytes, 8);
+
+        long uuidVersion = hiBits >>> 60 & 0xF;
+        if (uuidVersion == 1)
+        {
+            // If the version bits are set to 1, this is a time-based UUID, and its high bits are significantly more
+            // shuffled than in other UUIDs. Revert the shuffle.
+            hiBits = TimeUUIDType.reorderBackTimestampBytes(hiBits);
+        }
+        else
+        {
+            // For non-time UUIDs, the only thing that's needed is to put the version bits back where they were originally.
+            hiBits = hiBits << 4 & 0xFFFFFFFFFFFF0000L
+                     | uuidVersion << 12
+                     | hiBits & 0x0000000000000FFFL;
+        }
+
+        return makeUuidBytes(accessor, hiBits, loBits);
+    }
+
+    static <V> V makeUuidBytes(ValueAccessor<V> accessor, long high, long low)
+    {
+        V buffer = accessor.allocate(16);
+        accessor.putLong(buffer, 0, high);
+        accessor.putLong(buffer, 8, low);
+        return buffer;
+    }
+
+    @Override
     public boolean isValueCompatibleWithInternal(AbstractType<?> otherType)
     {
         return otherType instanceof UUIDType || otherType instanceof TimeUUIDType;
diff --git a/src/java/org/apache/cassandra/db/marshal/UserType.java b/src/java/org/apache/cassandra/db/marshal/UserType.java
index 29afad9..24c05e2 100644
--- a/src/java/org/apache/cassandra/db/marshal/UserType.java
+++ b/src/java/org/apache/cassandra/db/marshal/UserType.java
@@ -258,7 +258,7 @@
     @Override
     public String toJSONString(ByteBuffer buffer, ProtocolVersion protocolVersion)
     {
-        ByteBuffer[] buffers = split(buffer);
+        ByteBuffer[] buffers = split(ByteBufferAccessor.instance, buffer);
         StringBuilder sb = new StringBuilder("{");
         for (int i = 0; i < types.size(); i++)
         {
diff --git a/src/java/org/apache/cassandra/db/marshal/ValueAccessor.java b/src/java/org/apache/cassandra/db/marshal/ValueAccessor.java
index a51836e..d454c5e 100644
--- a/src/java/org/apache/cassandra/db/marshal/ValueAccessor.java
+++ b/src/java/org/apache/cassandra/db/marshal/ValueAccessor.java
@@ -68,6 +68,7 @@
         Cell<V> cell(ColumnMetadata column, long timestamp, int ttl, int localDeletionTime, V value, CellPath path);
         Clustering<V> clustering(V... values);
         Clustering<V> clustering();
+        Clustering<V> staticClustering();
         ClusteringBound<V> bound(ClusteringPrefix.Kind kind, V... values);
         ClusteringBound<V> bound(ClusteringPrefix.Kind kind);
         ClusteringBoundary<V> boundary(ClusteringPrefix.Kind kind, V... values);
@@ -105,7 +106,6 @@
         {
             return boundary(reversed ? INCL_END_EXCL_START_BOUNDARY : EXCL_END_INCL_START_BOUNDARY, boundValues);
         }
-
     }
     /**
      * @return the size of the given value
@@ -331,6 +331,12 @@
     Ballot toBallot(V value);
 
     /**
+     * writes the byte value {@param value} to {@param dst} at offset {@param offset}
+     * @return the number of bytes written to {@param value}
+     */
+    int putByte(V dst, int offset, byte value);
+
+    /**
      * writes the short value {@param value} to {@param dst} at offset {@param offset}
      * @return the number of bytes written to {@param value}
      */
diff --git a/src/java/org/apache/cassandra/db/rows/EncodingStats.java b/src/java/org/apache/cassandra/db/rows/EncodingStats.java
index 37dd34e..518285d 100644
--- a/src/java/org/apache/cassandra/db/rows/EncodingStats.java
+++ b/src/java/org/apache/cassandra/db/rows/EncodingStats.java
@@ -67,7 +67,7 @@
 
     // We should use this sparingly obviously
     public static final EncodingStats NO_STATS = new EncodingStats(TIMESTAMP_EPOCH, DELETION_TIME_EPOCH, TTL_EPOCH);
-    public static long HEAP_SIZE = ObjectSizes.measure(NO_STATS);
+    public static final long HEAP_SIZE = ObjectSizes.measure(NO_STATS);
 
     public static final Serializer serializer = new Serializer();
 
diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraStreamReceiver.java b/src/java/org/apache/cassandra/db/streaming/CassandraStreamReceiver.java
index 48de8b5..b596397 100644
--- a/src/java/org/apache/cassandra/db/streaming/CassandraStreamReceiver.java
+++ b/src/java/org/apache/cassandra/db/streaming/CassandraStreamReceiver.java
@@ -25,18 +25,16 @@
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Iterables;
-
-import org.apache.cassandra.db.lifecycle.LifecycleNewTracker;
-import org.apache.cassandra.io.sstable.SSTable;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.compaction.OperationType;
 import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.lifecycle.LifecycleNewTracker;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.db.rows.ThrottledUnfilteredIterator;
@@ -45,6 +43,7 @@
 import org.apache.cassandra.dht.Bounds;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.io.sstable.ISSTableScanner;
+import org.apache.cassandra.io.sstable.SSTable;
 import org.apache.cassandra.io.sstable.SSTableMultiWriter;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.streaming.IncomingStream;
@@ -172,23 +171,31 @@
         return cfs.metadata().params.cdc;
     }
 
+    // returns true iif it is a cdc table and cdc on repair is enabled.
+    private boolean cdcRequiresWriteCommitLog(ColumnFamilyStore cfs)
+    {
+        return DatabaseDescriptor.isCDCOnRepairEnabled() && hasCDC(cfs);
+    }
+
     /*
      * We have a special path for views and for CDC.
      *
      * For views, since the view requires cleaning up any pre-existing state, we must put all partitions
      * through the same write path as normal mutations. This also ensures any 2is are also updated.
      *
-     * For CDC-enabled tables, we want to ensure that the mutations are run through the CommitLog so they
-     * can be archived by the CDC process on discard.
+     * For CDC-enabled tables and write path for CDC is enabled, we want to ensure that the mutations are
+     * run through the CommitLog, so they can be archived by the CDC process on discard.
      */
     private boolean requiresWritePath(ColumnFamilyStore cfs)
     {
-        return hasCDC(cfs) || cfs.streamToMemtable() || (session.streamOperation().requiresViewBuild() && hasViews(cfs));
+        return cdcRequiresWriteCommitLog(cfs)
+               || cfs.streamToMemtable()
+               || (session.streamOperation().requiresViewBuild() && hasViews(cfs));
     }
 
     private void sendThroughWritePath(ColumnFamilyStore cfs, Collection<SSTableReader> readers)
     {
-        boolean hasCdc = hasCDC(cfs);
+        boolean writeCDCCommitLog = cdcRequiresWriteCommitLog(cfs);
         ColumnFilter filter = ColumnFilter.all(cfs.metadata());
         for (SSTableReader reader : readers)
         {
@@ -206,7 +213,7 @@
                     // If the CFS has CDC, however, these updates need to be written to the CommitLog
                     // so they get archived into the cdc_raw folder
                     ks.apply(new Mutation(PartitionUpdate.fromIterator(throttledPartitions.next(), filter)),
-                             hasCdc,
+                             writeCDCCommitLog,
                              true,
                              false);
                 }
diff --git a/src/java/org/apache/cassandra/db/virtual/QueriesTable.java b/src/java/org/apache/cassandra/db/virtual/QueriesTable.java
new file mode 100644
index 0000000..aeba61c
--- /dev/null
+++ b/src/java/org/apache/cassandra/db/virtual/QueriesTable.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.db.virtual;
+
+import org.apache.cassandra.concurrent.DebuggableTask;
+import org.apache.cassandra.concurrent.SharedExecutorPool;
+import org.apache.cassandra.db.marshal.LongType;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.dht.LocalPartitioner;
+import org.apache.cassandra.schema.TableMetadata;
+
+import static java.lang.Long.max;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.apache.cassandra.utils.MonotonicClock.Global.approxTime;
+
+/**
+ * Virtual table that lists currently running queries on the NTR (coordinator) and Read/Mutation (local) stages
+ *
+ * Example:
+ * <pre>
+ * cqlsh> SELECT * FROM system_views.queries;
+ *
+ *  thread_id                   | queued_micros |  running_micros | task
+ * ------------------------------+---------------+-----------------+--------------------------------------------------------------------------------
+ *  Native-Transport-Requests-7 |         72923 |            7611 |                      QUERY select * from system_views.queries; [pageSize = 100]
+ *              MutationStage-2 |         18249 |            2084 | Mutation(keyspace='distributed_test_keyspace', key='000000f8', modifications...
+ *                  ReadStage-2 |         72447 |           10121 |                                         SELECT * FROM keyspace.table LIMIT 5000
+ * </pre>
+ */    
+final class QueriesTable extends AbstractVirtualTable
+{
+    private static final String TABLE_NAME = "queries";
+    private static final String ID = "thread_id";
+    private static final String QUEUED = "queued_micros";
+    private static final String RUNNING = "running_micros";
+    private static final String DESC = "task";
+
+    QueriesTable(String keyspace)
+    {
+        super(TableMetadata.builder(keyspace, TABLE_NAME)
+                           .comment("Lists currently running queries")
+                           .kind(TableMetadata.Kind.VIRTUAL)
+                           .partitioner(new LocalPartitioner(UTF8Type.instance))
+                           // The thread name is unique since the id given to each SEPWorker is unique
+                           .addPartitionKeyColumn(ID, UTF8Type.instance)
+                           .addRegularColumn(QUEUED, LongType.instance)
+                           .addRegularColumn(RUNNING, LongType.instance)
+                           .addRegularColumn(DESC, UTF8Type.instance)
+                           .build());
+    }
+
+    /**
+     * Walks the {@link SharedExecutorPool} workers for any {@link DebuggableTask} instances and populates the table.
+     */
+    @Override
+    public DataSet data()
+    {
+        SimpleDataSet result = new SimpleDataSet(metadata());
+        
+        for (DebuggableTask.RunningDebuggableTask task : SharedExecutorPool.SHARED.runningTasks())
+        {
+            if (!task.hasTask()) continue;
+            
+            long creationTimeNanos = task.creationTimeNanos();
+            long startTimeNanos = task.startTimeNanos();
+            long now = approxTime.now();
+
+            long queuedMicros = NANOSECONDS.toMicros(max((startTimeNanos > 0 ? startTimeNanos : now) - creationTimeNanos, 0));
+            long runningMicros = startTimeNanos > 0 ? NANOSECONDS.toMicros(now - startTimeNanos) : 0;
+            
+            result.row(task.threadId())
+                  .column(QUEUED, queuedMicros)
+                  .column(RUNNING, runningMicros)
+                  .column(DESC, task.description());
+        }
+        
+        return result;
+    }
+}
diff --git a/src/java/org/apache/cassandra/db/virtual/SystemViewsKeyspace.java b/src/java/org/apache/cassandra/db/virtual/SystemViewsKeyspace.java
index f13e61c..59a0aba 100644
--- a/src/java/org/apache/cassandra/db/virtual/SystemViewsKeyspace.java
+++ b/src/java/org/apache/cassandra/db/virtual/SystemViewsKeyspace.java
@@ -47,6 +47,7 @@
                     .add(new BatchMetricsTable(VIRTUAL_VIEWS))
                     .add(new StreamingVirtualTable(VIRTUAL_VIEWS))
                     .add(new GossipInfoTable(VIRTUAL_VIEWS))
+                    .add(new QueriesTable(VIRTUAL_VIEWS))
                     .addAll(LocalRepairTables.getAll(VIRTUAL_VIEWS))
                     .build());
     }
diff --git a/src/java/org/apache/cassandra/dht/ByteOrderedPartitioner.java b/src/java/org/apache/cassandra/dht/ByteOrderedPartitioner.java
index 3a5db52..2b0e2a2 100644
--- a/src/java/org/apache/cassandra/dht/ByteOrderedPartitioner.java
+++ b/src/java/org/apache/cassandra/dht/ByteOrderedPartitioner.java
@@ -26,6 +26,9 @@
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.Hex;
 import org.apache.cassandra.utils.ObjectSizes;
@@ -102,6 +105,12 @@
         }
 
         @Override
+        public ByteSource asComparableBytes(ByteComparable.Version version)
+        {
+            return ByteSource.of(token, version);
+        }
+
+        @Override
         public IPartitioner getPartitioner()
         {
             return instance;
@@ -222,6 +231,11 @@
 
     private final Token.TokenFactory tokenFactory = new Token.TokenFactory()
     {
+        public Token fromComparableBytes(ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+        {
+            return new BytesToken(ByteSourceInverse.getUnescapedBytes(comparableBytes));
+        }
+
         public ByteBuffer toByteArray(Token token)
         {
             BytesToken bytesToken = (BytesToken) token;
diff --git a/src/java/org/apache/cassandra/dht/LocalPartitioner.java b/src/java/org/apache/cassandra/dht/LocalPartitioner.java
index 09cd2b7..127c5b7 100644
--- a/src/java/org/apache/cassandra/dht/LocalPartitioner.java
+++ b/src/java/org/apache/cassandra/dht/LocalPartitioner.java
@@ -26,7 +26,10 @@
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.CachedHashDecoratedKey;
 import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.ByteBufferAccessor;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 import org.apache.cassandra.utils.ObjectSizes;
 import org.apache.cassandra.utils.memory.HeapCloner;
 
@@ -83,6 +86,12 @@
 
     private final Token.TokenFactory tokenFactory = new Token.TokenFactory()
     {
+        public Token fromComparableBytes(ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+        {
+            ByteBuffer tokenData = comparator.fromComparableBytes(ByteBufferAccessor.instance, comparableBytes, version);
+            return new LocalToken(tokenData);
+        }
+
         public ByteBuffer toByteArray(Token token)
         {
             return ((LocalToken)token).token;
@@ -175,6 +184,12 @@
         }
 
         @Override
+        public ByteSource asComparableBytes(ByteComparable.Version version)
+        {
+            return comparator.asComparableBytes(ByteBufferAccessor.instance, token, version);
+        }
+
+        @Override
         public IPartitioner getPartitioner()
         {
             return LocalPartitioner.this;
diff --git a/src/java/org/apache/cassandra/dht/Murmur3Partitioner.java b/src/java/org/apache/cassandra/dht/Murmur3Partitioner.java
index e2daac4..015610f 100644
--- a/src/java/org/apache/cassandra/dht/Murmur3Partitioner.java
+++ b/src/java/org/apache/cassandra/dht/Murmur3Partitioner.java
@@ -33,6 +33,9 @@
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 import org.apache.cassandra.utils.MurmurHash;
 import org.apache.cassandra.utils.ObjectSizes;
 
@@ -177,6 +180,12 @@
         }
 
         @Override
+        public ByteSource asComparableBytes(ByteComparable.Version version)
+        {
+            return ByteSource.of(token);
+        }
+
+        @Override
         public IPartitioner getPartitioner()
         {
             return instance;
@@ -326,6 +335,12 @@
 
     private final Token.TokenFactory tokenFactory = new Token.TokenFactory()
     {
+        public Token fromComparableBytes(ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+        {
+            long tokenData = ByteSourceInverse.getSignedLong(comparableBytes);
+            return new LongToken(tokenData);
+        }
+
         public ByteBuffer toByteArray(Token token)
         {
             LongToken longToken = (LongToken) token;
diff --git a/src/java/org/apache/cassandra/dht/OrderPreservingPartitioner.java b/src/java/org/apache/cassandra/dht/OrderPreservingPartitioner.java
index 16c5db1..2d4def9 100644
--- a/src/java/org/apache/cassandra/dht/OrderPreservingPartitioner.java
+++ b/src/java/org/apache/cassandra/dht/OrderPreservingPartitioner.java
@@ -33,6 +33,9 @@
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.apache.cassandra.utils.bytecomparable.ByteSourceInverse;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.ObjectSizes;
 import org.apache.cassandra.utils.Pair;
@@ -128,6 +131,11 @@
 
     private final Token.TokenFactory tokenFactory = new Token.TokenFactory()
     {
+        public Token fromComparableBytes(ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+        {
+            return new StringToken(ByteSourceInverse.getString(comparableBytes));
+        }
+
         public ByteBuffer toByteArray(Token token)
         {
             StringToken stringToken = (StringToken) token;
@@ -194,6 +202,12 @@
         {
             return EMPTY_SIZE + ObjectSizes.sizeOf(token);
         }
+
+        @Override
+        public ByteSource asComparableBytes(ByteComparable.Version version)
+        {
+            return ByteSource.of(token, version);
+        }
     }
 
     public StringToken getToken(ByteBuffer key)
diff --git a/src/java/org/apache/cassandra/dht/RandomPartitioner.java b/src/java/org/apache/cassandra/dht/RandomPartitioner.java
index 241b785..d02cfd5 100644
--- a/src/java/org/apache/cassandra/dht/RandomPartitioner.java
+++ b/src/java/org/apache/cassandra/dht/RandomPartitioner.java
@@ -27,6 +27,8 @@
 import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.cassandra.db.CachedHashDecoratedKey;
+import org.apache.cassandra.db.marshal.ByteArrayAccessor;
+import org.apache.cassandra.db.marshal.ByteBufferAccessor;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.marshal.AbstractType;
@@ -34,6 +36,8 @@
 import org.apache.cassandra.db.marshal.PartitionerDefinedOrder;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.GuidGenerator;
 import org.apache.cassandra.utils.ObjectSizes;
@@ -158,6 +162,11 @@
 
     private final Token.TokenFactory tokenFactory = new Token.TokenFactory()
     {
+        public Token fromComparableBytes(ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+        {
+            return fromByteArray(IntegerType.instance.fromComparableBytes(ByteBufferAccessor.instance, comparableBytes, version));
+        }
+
         public ByteBuffer toByteArray(Token token)
         {
             BigIntegerToken bigIntegerToken = (BigIntegerToken) token;
@@ -245,6 +254,12 @@
         }
 
         @Override
+        public ByteSource asComparableBytes(ByteComparable.Version version)
+        {
+            return IntegerType.instance.asComparableBytes(ByteArrayAccessor.instance, token.toByteArray(), version);
+        }
+
+        @Override
         public IPartitioner getPartitioner()
         {
             return instance;
diff --git a/src/java/org/apache/cassandra/dht/Token.java b/src/java/org/apache/cassandra/dht/Token.java
index d8e82f8..3543dab 100644
--- a/src/java/org/apache/cassandra/dht/Token.java
+++ b/src/java/org/apache/cassandra/dht/Token.java
@@ -26,6 +26,8 @@
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.util.DataOutputPlus;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 
 public abstract class Token implements RingPosition<Token>, Serializable
 {
@@ -37,8 +39,31 @@
     {
         public abstract ByteBuffer toByteArray(Token token);
         public abstract Token fromByteArray(ByteBuffer bytes);
+
+        /**
+         * Produce a byte-comparable representation of the token.
+         * See {@link Token#asComparableBytes}
+         */
+        public ByteSource asComparableBytes(Token token, ByteComparable.Version version)
+        {
+            return token.asComparableBytes(version);
+        }
+
+        /**
+         * Translates the given byte-comparable representation to a token instance. If the given bytes don't correspond
+         * to the encoding of an instance of the expected token type, an {@link IllegalArgumentException} may be thrown.
+         *
+         * @param comparableBytes A byte-comparable representation (presumably of a token of some expected token type).
+         * @return A new {@link Token} instance, corresponding to the given byte-ordered representation. If we were
+         * to call {@link #asComparableBytes(ByteComparable.Version)} on the returned object, we should get a
+         * {@link ByteSource} equal to the input one as a result.
+         * @throws IllegalArgumentException if the bytes do not encode a valid token.
+         */
+        public abstract Token fromComparableBytes(ByteSource.Peekable comparableBytes, ByteComparable.Version version);
+
         public abstract String toString(Token token); // serialize as string, not necessarily human-readable
         public abstract Token fromString(String string); // deserialize
+
         public abstract void validate(String token) throws ConfigurationException;
 
         public void serialize(Token token, DataOutputPlus out) throws IOException
@@ -100,6 +125,20 @@
     abstract public Object getTokenValue();
 
     /**
+     * Produce a weakly prefix-free byte-comparable representation of the token, i.e. such a sequence of bytes that any
+     * pair x, y of valid tokens of this type and any bytes b1, b2 between 0x10 and 0xEF,
+     * (+ stands for concatenation)
+     *   compare(x, y) == compareLexicographicallyUnsigned(asByteComparable(x)+b1, asByteComparable(y)+b2)
+     * (i.e. the values compare like the original type, and an added 0x10-0xEF byte at the end does not change that) and:
+     *   asByteComparable(x)+b1 is not a prefix of asByteComparable(y)      (weakly prefix free)
+     * (i.e. a valid representation of a value may be a prefix of another valid representation of a value only if the
+     * following byte in the latter is smaller than 0x10 or larger than 0xEF). These properties are trivially true if
+     * the encoding compares correctly and is prefix free, but also permits a little more freedom that enables somewhat
+     * more efficient encoding of arbitrary-length byte-comparable blobs.
+     */
+    abstract public ByteSource asComparableBytes(ByteComparable.Version version);
+
+    /**
      * Returns a measure for the token space covered between this token and next.
      * Used by the token allocation algorithm (see CASSANDRA-7032).
      */
@@ -128,7 +167,7 @@
 
     /*
      * A token corresponds to the range of all the keys having this token.
-     * A token is thus no comparable directly to a key. But to be able to select
+     * A token is thus not comparable directly to a key. But to be able to select
      * keys given tokens, we introduce two "fake" keys for each token T:
      *   - lowerBoundKey: a "fake" key representing the lower bound T represents.
      *                    In other words, lowerBoundKey is the smallest key that
@@ -190,6 +229,20 @@
                 return ((pos instanceof KeyBound) && !((KeyBound)pos).isMinimumBound) ? 0 : 1;
         }
 
+        @Override
+        public ByteSource asComparableBytes(Version version)
+        {
+            int terminator = isMinimumBound ? ByteSource.LT_NEXT_COMPONENT : ByteSource.GT_NEXT_COMPONENT;
+            return ByteSource.withTerminator(terminator, token.asComparableBytes(version));
+        }
+
+        @Override
+        public ByteComparable asComparableBound(boolean before)
+        {
+            // This class is already a bound thus nothing needs to be changed from its representation
+            return this;
+        }
+
         public IPartitioner getPartitioner()
         {
             return getToken().getPartitioner();
diff --git a/src/java/org/apache/cassandra/gms/Gossiper.java b/src/java/org/apache/cassandra/gms/Gossiper.java
index 4a46ca3..e06d073 100644
--- a/src/java/org/apache/cassandra/gms/Gossiper.java
+++ b/src/java/org/apache/cassandra/gms/Gossiper.java
@@ -85,6 +85,7 @@
 import static org.apache.cassandra.utils.FBUtilities.getBroadcastAddressAndPort;
 import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis;
 import static org.apache.cassandra.utils.Clock.Global.nanoTime;
+import static org.apache.cassandra.gms.VersionedValue.BOOTSTRAPPING_STATUS;
 
 /**
  * This module is responsible for Gossiping information for the local endpoint. This abstraction
@@ -1500,11 +1501,54 @@
         return pieces[0];
     }
 
+    /**
+     * Gossip offers no happens-before relationship, but downstream subscribers assume a happens-before relationship
+     * before being notified!  To attempt to be nicer to subscribers, this {@link Comparator} attempts to order EndpointState
+     * within a map based off a few heuristics:
+     * <ol>
+     *     <li>STATUS - some STATUS depends on other instance STATUS, so make sure they are last; eg. BOOT, and BOOT_REPLACE</li>
+     *     <li>generation - normally defined as system clock millis, this can be skewed and is a best effort</li>
+     *     <li>address - tie breaker to make sure order is consistent</li>
+     * </ol>
+     * <p>
+     * Problems:
+     * Generation is normally defined as system clock millis, which can be skewed and in-consistent cross nodes
+     * (generations do not have a happens-before relationship, so ordering is sketchy at best).
+     * <p>
+     * Motivations:
+     * {@link Map#entrySet()} returns data in effectivlly random order, so can get into a situation such as the following example.
+     * {@code
+     * 3 node cluster: n1, n2, and n3
+     * n2 goes down and n4 does host replacement and fails before completion
+     * h5 tries to do a host replacement against n4 (ignore the fact this doesn't make sense)
+     * }
+     * In that case above, the {@link Map#entrySet()} ordering can be random, causing h4 to apply before h2, which will
+     * be rejected by subscripers (only after updating gossip causing zero retries).
+     */
+    private static Comparator<Entry<InetAddressAndPort, EndpointState>> STATE_MAP_ORDERING =
+    ((Comparator<Entry<InetAddressAndPort, EndpointState>>) (e1, e2) -> {
+        // check status first, make sure bootstrap status happens-after all others
+        if (BOOTSTRAPPING_STATUS.contains(getGossipStatus(e1.getValue())))
+            return 1;
+        if (BOOTSTRAPPING_STATUS.contains(getGossipStatus(e2.getValue())))
+            return -1;
+        return 0;
+    })
+    .thenComparingInt((Entry<InetAddressAndPort, EndpointState> e) -> e.getValue().getHeartBeatState().getGeneration())
+    .thenComparing(Entry::getKey);
+
+    private static Iterable<Entry<InetAddressAndPort, EndpointState>> order(Map<InetAddressAndPort, EndpointState> epStateMap)
+    {
+        List<Entry<InetAddressAndPort, EndpointState>> list = new ArrayList<>(epStateMap.entrySet());
+        Collections.sort(list, STATE_MAP_ORDERING);
+        return list;
+    }
+
     @VisibleForTesting
     public void applyStateLocally(Map<InetAddressAndPort, EndpointState> epStateMap)
     {
         checkProperThreadForStateMutation();
-        for (Entry<InetAddressAndPort, EndpointState> entry : epStateMap.entrySet())
+        for (Entry<InetAddressAndPort, EndpointState> entry : order(epStateMap))
         {
             InetAddressAndPort ep = entry.getKey();
             if (ep.equals(getBroadcastAddressAndPort()) && !isInShadowRound())
diff --git a/src/java/org/apache/cassandra/gms/VersionedValue.java b/src/java/org/apache/cassandra/gms/VersionedValue.java
index 26644e1..519fffa 100644
--- a/src/java/org/apache/cassandra/gms/VersionedValue.java
+++ b/src/java/org/apache/cassandra/gms/VersionedValue.java
@@ -27,6 +27,7 @@
 import static java.nio.charset.StandardCharsets.ISO_8859_1;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
 
 import org.apache.cassandra.db.TypeSizes;
@@ -84,6 +85,8 @@
     // values for ApplicationState.REMOVAL_COORDINATOR
     public final static String REMOVAL_COORDINATOR = "REMOVER";
 
+    public static Set<String> BOOTSTRAPPING_STATUS = ImmutableSet.of(STATUS_BOOTSTRAPPING, STATUS_BOOTSTRAPPING_REPLACE);
+
     public final int version;
     public final String value;
 
diff --git a/src/java/org/apache/cassandra/io/sstable/Descriptor.java b/src/java/org/apache/cassandra/io/sstable/Descriptor.java
index 83bafd4..589e46b 100644
--- a/src/java/org/apache/cassandra/io/sstable/Descriptor.java
+++ b/src/java/org/apache/cassandra/io/sstable/Descriptor.java
@@ -218,6 +218,7 @@
 
     /**
      * Parse a sstable filename, extracting both the {@code Descriptor} and {@code Component} part.
+     * The keyspace/table name will be extracted from the directory path.
      *
      * @param file the {@code File} object for the filename to parse.
      * @return a pair of the descriptor and component corresponding to the provided {@code file}.
@@ -233,6 +234,58 @@
         if (!file.isAbsolute())
             file = file.toAbsolute();
 
+        SSTableInfo info = validateAndExtractInfo(file);
+        String name = file.name();
+
+        File directory = parentOf(name, file);
+        File tableDir = directory;
+
+        // Check if it's a 2ndary index directory (not that it doesn't exclude it to be also a backup or snapshot)
+        String indexName = "";
+        if (tableDir.name().startsWith(Directories.SECONDARY_INDEX_NAME_SEPARATOR))
+        {
+            indexName = tableDir.name();
+            tableDir = parentOf(name, tableDir);
+        }
+
+        // Then it can be a backup or a snapshot
+        if (tableDir.name().equals(Directories.BACKUPS_SUBDIR))
+            tableDir = tableDir.parent();
+        else if (parentOf(name, tableDir).name().equals(Directories.SNAPSHOT_SUBDIR))
+            tableDir = parentOf(name, parentOf(name, tableDir));
+
+        String table = tableDir.name().split("-")[0] + indexName;
+        String keyspace = parentOf(name, tableDir).name();
+
+        return Pair.create(new Descriptor(info.version, directory, keyspace, table, info.id, info.format), info.component);
+    }
+
+    /**
+     * Parse a sstable filename, extracting both the {@code Descriptor} and {@code Component} part.
+     *
+     * @param file     the {@code File} object for the filename to parse.
+     * @param keyspace The keyspace name of the file. If <code>null</code>, then the keyspace name will be extracted
+     *                 from the directory path.
+     * @param table    The table name of the file. If <code>null</code>, then the table name will be extracted from the
+     *                 directory path.
+     * @return a pair of the descriptor and component corresponding to the provided {@code file}.
+     * @throws IllegalArgumentException if the provided {@code file} does point to a valid sstable filename. This could
+     *                                  mean either that the filename doesn't look like a sstable file, or that it is for an old and unsupported
+     *                                  versions.
+     */
+    public static Pair<Descriptor, Component> fromFilenameWithComponent(File file, String keyspace, String table)
+    {
+        if (null == keyspace || null == table)
+        {
+            return fromFilenameWithComponent(file);
+        }
+
+        SSTableInfo info = validateAndExtractInfo(file);
+        return Pair.create(new Descriptor(info.version, parentOf(file.name(), file), keyspace, table, info.id, info.format), info.component);
+    }
+
+    private static SSTableInfo validateAndExtractInfo(File file)
+    {
         String name = file.name();
         List<String> tokens = filenameSplitter.splitToList(name);
         int size = tokens.size();
@@ -245,9 +298,7 @@
             // Note that we assume it's an old format sstable if it has the right number of tokens: this is not perfect
             // but we're just trying to be helpful, not perfect.
             if (size == 5 || size == 6)
-                throw new IllegalArgumentException(String.format("%s is of version %s which is now unsupported and cannot be read.",
-                                                                 name,
-                                                                 tokens.get(size - 3)));
+                throw new IllegalArgumentException(String.format("%s is of version %s which is now unsupported and cannot be read.", name, tokens.get(size - 3)));
             throw new IllegalArgumentException(String.format("Invalid sstable file %s: the name doesn't look like a supported sstable file name", name));
         }
 
@@ -282,27 +333,23 @@
         if (!version.isCompatible())
             throw invalidSSTable(name, "incompatible sstable version (%s); you should have run upgradesstables before upgrading", versionString);
 
-        File directory = parentOf(name, file);
-        File tableDir = directory;
+        return new SSTableInfo(version, id, format, component);
+    }
 
-        // Check if it's a 2ndary index directory (not that it doesn't exclude it to be also a backup or snapshot)
-        String indexName = "";
-        if (Directories.isSecondaryIndexFolder(tableDir))
+    private static class SSTableInfo
+    {
+        final Version version;
+        final SSTableId id;
+        final SSTableFormat.Type format;
+        final Component component;
+
+        SSTableInfo(Version version, SSTableId id, SSTableFormat.Type format, Component component)
         {
-            indexName = tableDir.name();
-            tableDir = parentOf(name, tableDir);
+            this.version = version;
+            this.id = id;
+            this.format = format;
+            this.component = component;
         }
-
-        // Then it can be a backup or a snapshot
-        if (tableDir.name().equals(Directories.BACKUPS_SUBDIR))
-            tableDir = tableDir.parent();
-        else if (parentOf(name, tableDir).name().equals(Directories.SNAPSHOT_SUBDIR))
-            tableDir = parentOf(name, parentOf(name, tableDir));
-
-        String table = tableDir.name().split("-")[0] + indexName;
-        String keyspace = parentOf(name, tableDir).name();
-
-        return Pair.create(new Descriptor(version, directory, keyspace, table, id, format), component);
     }
 
     private static File parentOf(String name, File file)
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTable.java b/src/java/org/apache/cassandra/io/sstable/SSTable.java
index 81030c2..5194abb 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTable.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTable.java
@@ -217,6 +217,28 @@
     }
 
     /**
+     * Parse a sstable filename into both a {@link Descriptor} and {@code Component} object.
+     *
+     * @param file     the filename to parse.
+     * @param keyspace The keyspace name of the file.
+     * @param table    The table name of the file.
+     * @return a pair of the {@code Descriptor} and {@code Component} corresponding to {@code file} if it corresponds to
+     * a valid and supported sstable filename, {@code null} otherwise. Note that components of an unknown type will be
+     * returned as CUSTOM ones.
+     */
+    public static Pair<Descriptor, Component> tryComponentFromFilename(File file, String keyspace, String table)
+    {
+        try
+        {
+            return Descriptor.fromFilenameWithComponent(file, keyspace, table);
+        }
+        catch (Throwable e)
+        {
+            return null;
+        }
+    }
+
+    /**
      * Parse a sstable filename into a {@link Descriptor} object.
      * <p>
      * Note that this method ignores the component part of the filename; if this is not what you want, use
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java b/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java
index 3d9e0f4..71bd025 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java
@@ -48,6 +48,7 @@
 {
     private final File directory;
     private final String keyspace;
+    private final String table;
     private final Client client;
     private final int connectionsPerHost;
     private final OutputHandler outputHandler;
@@ -63,8 +64,14 @@
 
     public SSTableLoader(File directory, Client client, OutputHandler outputHandler, int connectionsPerHost, String targetKeyspace)
     {
+        this(directory, client, outputHandler, connectionsPerHost, targetKeyspace, null);
+    }
+
+    public SSTableLoader(File directory, Client client, OutputHandler outputHandler, int connectionsPerHost, String targetKeyspace, String targetTable)
+    {
         this.directory = directory;
         this.keyspace = targetKeyspace != null ? targetKeyspace : directory.parent().name();
+        this.table = targetTable;
         this.client = client;
         this.outputHandler = outputHandler;
         this.connectionsPerHost = connectionsPerHost;
@@ -87,7 +94,16 @@
                                               return false;
                                           }
 
-                                          Pair<Descriptor, Component> p = SSTable.tryComponentFromFilename(file);
+                                          Pair<Descriptor, Component> p;
+                                          if (null != keyspace && null != table)
+                                          {
+                                              p = SSTable.tryComponentFromFilename(file, keyspace, table);
+                                          }
+                                          else
+                                          {
+                                              p = SSTable.tryComponentFromFilename(file);
+                                          }
+
                                           Descriptor desc = p == null ? null : p.left;
                                           if (p == null || !p.right.equals(Component.DATA))
                                               return false;
diff --git a/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java b/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
index 1d39bbe..9d1989e 100644
--- a/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
+++ b/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
@@ -350,6 +350,7 @@
                 String dc = e.getKey();
                 ReplicationFactor rf = getReplicationFactor(dc);
                 Guardrails.minimumReplicationFactor.guard(rf.fullReplicas, keyspaceName, false, state);
+                Guardrails.maximumReplicationFactor.guard(rf.fullReplicas, keyspaceName, false, state);
                 int nodeCount = dcsNodes.get(dc).size();
                 // nodeCount==0 on many tests
                 if (rf.fullReplicas > nodeCount && nodeCount != 0)
diff --git a/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java b/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
index b950ec3..4ff726c 100644
--- a/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
+++ b/src/java/org/apache/cassandra/locator/ReconnectableSnitchHelper.java
@@ -30,6 +30,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.cassandra.auth.IInternodeAuthenticator.InternodeConnectionDirection.OUTBOUND_PRECONNECT;
+
 /**
  * Sidekick helper for snitches that want to reconnect from one IP addr for a node to another.
  * Typically, this is for situations like EC2 where a node will have a public address and a private address,
@@ -64,7 +66,8 @@
     @VisibleForTesting
     static void reconnect(InetAddressAndPort publicAddress, InetAddressAndPort localAddress, IEndpointSnitch snitch, String localDc)
     {
-        if (!new OutboundConnectionSettings(publicAddress, localAddress).withDefaults(ConnectionCategory.MESSAGING).authenticate())
+        final OutboundConnectionSettings settings = new OutboundConnectionSettings(publicAddress, localAddress).withDefaults(ConnectionCategory.MESSAGING);
+        if (!settings.authenticator().authenticate(settings.to.getAddress(), settings.to.getPort(), null, OUTBOUND_PRECONNECT))
         {
             logger.debug("InternodeAuthenticator said don't reconnect to {} on {}", publicAddress, localAddress);
             return;
diff --git a/src/java/org/apache/cassandra/locator/SimpleStrategy.java b/src/java/org/apache/cassandra/locator/SimpleStrategy.java
index e5b9210..488b601 100644
--- a/src/java/org/apache/cassandra/locator/SimpleStrategy.java
+++ b/src/java/org/apache/cassandra/locator/SimpleStrategy.java
@@ -109,6 +109,7 @@
             int nodeCount = StorageService.instance.getHostIdToEndpoint().size();
             // nodeCount==0 on many tests
             Guardrails.minimumReplicationFactor.guard(rf.fullReplicas, keyspaceName, false, state);
+            Guardrails.maximumReplicationFactor.guard(rf.fullReplicas, keyspaceName, false, state);
             if (rf.fullReplicas > nodeCount && nodeCount != 0)
             {
                 String msg = "Your replication factor " + rf.fullReplicas
diff --git a/src/java/org/apache/cassandra/net/InboundConnectionInitiator.java b/src/java/org/apache/cassandra/net/InboundConnectionInitiator.java
index c5ed064..f3dc28a 100644
--- a/src/java/org/apache/cassandra/net/InboundConnectionInitiator.java
+++ b/src/java/org/apache/cassandra/net/InboundConnectionInitiator.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.SocketAddress;
+import java.security.cert.Certificate;
 import java.util.List;
 import java.util.NoSuchElementException;
 import java.util.concurrent.Future;
@@ -46,6 +47,7 @@
 import io.netty.handler.logging.LoggingHandler;
 import io.netty.handler.ssl.SslContext;
 import io.netty.handler.ssl.SslHandler;
+import org.apache.cassandra.auth.IInternodeAuthenticator;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.EncryptionOptions;
 import org.apache.cassandra.exceptions.ConfigurationException;
@@ -60,7 +62,11 @@
 
 import static java.lang.Math.*;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.apache.cassandra.auth.IInternodeAuthenticator.InternodeConnectionDirection.INBOUND;
 import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory;
+import static org.apache.cassandra.net.InternodeConnectionUtils.DISCARD_HANDLER_NAME;
+import static org.apache.cassandra.net.InternodeConnectionUtils.SSL_HANDLER_NAME;
+import static org.apache.cassandra.net.InternodeConnectionUtils.certificates;
 import static org.apache.cassandra.net.MessagingService.*;
 import static org.apache.cassandra.net.SocketFactory.WIRETRACE;
 import static org.apache.cassandra.net.SocketFactory.newSslHandler;
@@ -102,7 +108,7 @@
 
             pipelineInjector.accept(pipeline);
 
-            // order of handlers: ssl -> logger -> handshakeHandler
+            // order of handlers: ssl -> client-authentication -> logger -> handshakeHandler
             // For either unencrypted or transitional modes, allow Ssl optionally.
             switch(settings.encryption.tlsEncryptionPolicy())
             {
@@ -111,14 +117,17 @@
                     pipeline.addAfter(PIPELINE_INTERNODE_ERROR_EXCLUSIONS, "rejectssl", new RejectSslHandler());
                     break;
                 case OPTIONAL:
-                    pipeline.addAfter(PIPELINE_INTERNODE_ERROR_EXCLUSIONS, "ssl", new OptionalSslHandler(settings.encryption));
+                    pipeline.addAfter(PIPELINE_INTERNODE_ERROR_EXCLUSIONS, SSL_HANDLER_NAME, new OptionalSslHandler(settings.encryption));
                     break;
                 case ENCRYPTED:
                     SslHandler sslHandler = getSslHandler("creating", channel, settings.encryption);
-                    pipeline.addAfter(PIPELINE_INTERNODE_ERROR_EXCLUSIONS, "ssl", sslHandler);
+                    pipeline.addAfter(PIPELINE_INTERNODE_ERROR_EXCLUSIONS, SSL_HANDLER_NAME, sslHandler);
                     break;
             }
 
+            // Pipeline for performing client authentication
+            pipeline.addLast("client-authentication", new ClientAuthenticationHandler(settings.authenticator));
+
             if (WIRETRACE)
                 pipeline.addLast("logger", new LoggingHandler(LogLevel.INFO));
 
@@ -199,6 +208,61 @@
     }
 
     /**
+     * Handler to perform authentication for internode inbound connections.
+     * This handler is called even before messaging handshake starts.
+     */
+    private static class ClientAuthenticationHandler extends ByteToMessageDecoder
+    {
+        private final IInternodeAuthenticator authenticator;
+
+        public ClientAuthenticationHandler(IInternodeAuthenticator authenticator)
+        {
+            this.authenticator = authenticator;
+        }
+
+        @Override
+        protected void decode(ChannelHandlerContext channelHandlerContext, ByteBuf byteBuf, List<Object> list) throws Exception
+        {
+            // Extract certificates from SSL handler(handler with name "ssl").
+            final Certificate[] certificates = certificates(channelHandlerContext.channel());
+            if (!authenticate(channelHandlerContext.channel().remoteAddress(), certificates))
+            {
+                logger.error("Unable to authenticate peer {} for internode authentication", channelHandlerContext.channel());
+
+                // To release all the pending buffered data, replace authentication handler with discard handler.
+                // This avoids pending inbound data to be fired through the pipeline
+                channelHandlerContext.pipeline().replace(this, DISCARD_HANDLER_NAME, new InternodeConnectionUtils.ByteBufDiscardHandler());
+                channelHandlerContext.pipeline().close();
+            }
+            else
+            {
+                channelHandlerContext.pipeline().remove(this);
+            }
+        }
+
+        private boolean authenticate(SocketAddress socketAddress, final Certificate[] certificates) throws IOException
+        {
+            if (socketAddress.getClass().getSimpleName().equals("EmbeddedSocketAddress"))
+                return true;
+
+            if (!(socketAddress instanceof InetSocketAddress))
+                throw new IOException(String.format("Unexpected SocketAddress type: %s, %s", socketAddress.getClass(), socketAddress));
+
+            InetSocketAddress addr = (InetSocketAddress) socketAddress;
+            if (!authenticator.authenticate(addr.getAddress(), addr.getPort(), certificates, INBOUND))
+            {
+                // Log at info level as anything that can reach the inbound port could hit this
+                // and trigger a log of noise.  Failed outbound connections to known cluster endpoints
+                // still fail with an ERROR message and exception to alert operators that aren't watching logs closely.
+                logger.info("Authenticate rejected inbound internode connection from {}", addr);
+                return false;
+            }
+            return true;
+        }
+
+    }
+
+    /**
      * 'Server-side' component that negotiates the internode handshake when establishing a new connection.
      * This handler will be the first in the netty channel for each incoming connection (secure socket (TLS) notwithstanding),
      * and once the handshake is successful, it will configure the proper handlers ({@link InboundMessageHandler}
@@ -223,8 +287,7 @@
         }
 
         /**
-         * On registration, immediately schedule a timeout to kill this connection if it does not handshake promptly,
-         * and authenticate the remote address.
+         * On registration, immediately schedule a timeout to kill this connection if it does not handshake promptly.
          */
         public void handlerAdded(ChannelHandlerContext ctx) throws Exception
         {
@@ -232,31 +295,6 @@
                 logger.error("Timeout handshaking with {} (on {})", SocketFactory.addressId(initiate.from, (InetSocketAddress) ctx.channel().remoteAddress()), settings.bindAddress);
                 failHandshake(ctx);
             }, HandshakeProtocol.TIMEOUT_MILLIS, MILLISECONDS);
-
-            if (!authenticate(ctx.channel().remoteAddress()))
-            {
-                failHandshake(ctx);
-            }
-        }
-
-        private boolean authenticate(SocketAddress socketAddress) throws IOException
-        {
-            if (socketAddress.getClass().getSimpleName().equals("EmbeddedSocketAddress"))
-                return true;
-
-            if (!(socketAddress instanceof InetSocketAddress))
-                throw new IOException(String.format("Unexpected SocketAddress type: %s, %s", socketAddress.getClass(), socketAddress));
-
-            InetSocketAddress addr = (InetSocketAddress)socketAddress;
-            if (!settings.authenticate(addr.getAddress(), addr.getPort()))
-            {
-                // Log at info level as anything that can reach the inbound port could hit this
-                // and trigger a log of noise.  Failed outbound connections to known cluster endpoints
-                // still fail with an ERROR message and exception to alert operators that aren't watching logs closely.
-                logger.info("Authenticate rejected inbound internode connection from {}", addr);
-                return false;
-            }
-            return true;
         }
 
         @Override
@@ -562,7 +600,7 @@
             {
                 // Connection uses SSL/TLS, replace the detection handler with a SslHandler and so use encryption.
                 SslHandler sslHandler = getSslHandler("replacing optional", ctx.channel(), encryptionOptions);
-                ctx.pipeline().replace(this, "ssl", sslHandler);
+                ctx.pipeline().replace(this, SSL_HANDLER_NAME, sslHandler);
             }
             else
             {
diff --git a/src/java/org/apache/cassandra/net/InboundConnectionSettings.java b/src/java/org/apache/cassandra/net/InboundConnectionSettings.java
index 2eab9bc..44c2c49 100644
--- a/src/java/org/apache/cassandra/net/InboundConnectionSettings.java
+++ b/src/java/org/apache/cassandra/net/InboundConnectionSettings.java
@@ -71,16 +71,6 @@
         this(null, null, null, null, null, null, null, null, null);
     }
 
-    public boolean authenticate(InetAddressAndPort endpoint)
-    {
-        return authenticator.authenticate(endpoint.getAddress(), endpoint.getPort());
-    }
-
-    public boolean authenticate(InetAddress address, int port)
-    {
-        return authenticator.authenticate(address, port);
-    }
-
     public String toString()
     {
         return format("address: (%s), nic: %s, encryption: %s",
diff --git a/src/java/org/apache/cassandra/net/InternodeConnectionUtils.java b/src/java/org/apache/cassandra/net/InternodeConnectionUtils.java
new file mode 100644
index 0000000..39a0879
--- /dev/null
+++ b/src/java/org/apache/cassandra/net/InternodeConnectionUtils.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.net;
+
+import java.security.cert.Certificate;
+import javax.net.ssl.SSLPeerUnverifiedException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInboundHandlerAdapter;
+import io.netty.handler.ssl.SslHandler;
+
+/**
+ * Class that contains certificate utility methods.
+ */
+class InternodeConnectionUtils
+{
+    public static String SSL_HANDLER_NAME = "ssl";
+    public static String DISCARD_HANDLER_NAME = "discard";
+    private static final Logger logger = LoggerFactory.getLogger(InternodeConnectionUtils.class);
+
+    public static Certificate[] certificates(Channel channel)
+    {
+        final SslHandler sslHandler = (SslHandler) channel.pipeline().get(SSL_HANDLER_NAME);
+        Certificate[] certificates = null;
+        if (sslHandler != null)
+        {
+            try
+            {
+                certificates = sslHandler.engine()
+                                         .getSession()
+                                         .getPeerCertificates();
+            }
+            catch (SSLPeerUnverifiedException e)
+            {
+                logger.debug("Failed to get peer certificates for peer {}", channel.remoteAddress(), e);
+            }
+        }
+        return certificates;
+    }
+
+    /**
+     * Discard handler releases the received data silently. when internode authentication fails, the channel is closed,
+     * but the pending buffered data may still be fired through the pipeline. To avoid that, authentication handler is
+     * replaced with this DiscardHandler to release all the buffered data, to avoid handling unauthenticated data in the
+     * following handlers.
+     */
+    public static class ByteBufDiscardHandler extends ChannelInboundHandlerAdapter
+    {
+        @Override
+        public void channelRead(ChannelHandlerContext ctx, Object msg)
+        {
+            if (msg instanceof ByteBuf)
+            {
+                ((ByteBuf) msg).release();
+            }
+            else
+            {
+                ctx.fireChannelRead(msg);
+            }
+        }
+    }
+}
diff --git a/src/java/org/apache/cassandra/net/MessagingService.java b/src/java/org/apache/cassandra/net/MessagingService.java
index ea019fd..d968a0c 100644
--- a/src/java/org/apache/cassandra/net/MessagingService.java
+++ b/src/java/org/apache/cassandra/net/MessagingService.java
@@ -475,7 +475,10 @@
     {
         OutboundConnections pool = channelManagers.get(to);
         if (pool != null)
+        {
             pool.interrupt();
+            logger.info("Interrupted outbound connections to {}", to);
+        }
     }
 
     /**
diff --git a/src/java/org/apache/cassandra/net/OutboundConnectionInitiator.java b/src/java/org/apache/cassandra/net/OutboundConnectionInitiator.java
index a187068..7e38dd8 100644
--- a/src/java/org/apache/cassandra/net/OutboundConnectionInitiator.java
+++ b/src/java/org/apache/cassandra/net/OutboundConnectionInitiator.java
@@ -21,13 +21,16 @@
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.nio.channels.ClosedChannelException;
+import java.security.cert.Certificate;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import io.netty.util.concurrent.Future; //checkstyle: permit this import
 import io.netty.util.concurrent.Promise; //checkstyle: permit this import
 import org.apache.cassandra.utils.concurrent.AsyncPromise;
-import org.apache.cassandra.utils.concurrent.ImmediateFuture;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -56,9 +59,15 @@
 import org.apache.cassandra.security.ISslContextFactory;
 import org.apache.cassandra.security.SSLFactory;
 import org.apache.cassandra.utils.JVMStabilityInspector;
+import org.apache.cassandra.utils.concurrent.ImmediateFuture;
 import org.apache.cassandra.utils.memory.BufferPools;
 
 import static java.util.concurrent.TimeUnit.*;
+import static org.apache.cassandra.auth.IInternodeAuthenticator.InternodeConnectionDirection.OUTBOUND;
+import static org.apache.cassandra.auth.IInternodeAuthenticator.InternodeConnectionDirection.OUTBOUND_PRECONNECT;
+import static org.apache.cassandra.net.InternodeConnectionUtils.DISCARD_HANDLER_NAME;
+import static org.apache.cassandra.net.InternodeConnectionUtils.SSL_HANDLER_NAME;
+import static org.apache.cassandra.net.InternodeConnectionUtils.certificates;
 import static org.apache.cassandra.net.MessagingService.VERSION_40;
 import static org.apache.cassandra.net.HandshakeProtocol.*;
 import static org.apache.cassandra.net.ConnectionType.STREAMING;
@@ -130,13 +139,14 @@
         if (logger.isTraceEnabled())
             logger.trace("creating outbound bootstrap to {}, requestVersion: {}", settings, requestMessagingVersion);
 
-        if (!settings.authenticate())
+        if (!settings.authenticator.authenticate(settings.to.getAddress(), settings.to.getPort(), null, OUTBOUND_PRECONNECT))
         {
             // interrupt other connections, so they must attempt to re-authenticate
             MessagingService.instance().interruptOutbound(settings.to);
             return ImmediateFuture.failure(new IOException("authentication failed to " + settings.connectToId()));
         }
 
+
         // this is a bit ugly, but is the easiest way to ensure that if we timeout we can propagate a suitable error message
         // and still guarantee that, if on timing out we raced with success, the successfully created channel is handled
         AtomicBoolean timedout = new AtomicBoolean();
@@ -198,7 +208,7 @@
         {
             ChannelPipeline pipeline = channel.pipeline();
 
-            // order of handlers: ssl -> logger -> handshakeHandler
+            // order of handlers: ssl -> server-authentication -> logger -> handshakeHandler
             if (settings.withEncryption())
             {
                 // check if we should actually encrypt this connection
@@ -209,8 +219,9 @@
                 InetSocketAddress peer = settings.encryption.require_endpoint_verification ? new InetSocketAddress(address.getAddress(), address.getPort()) : null;
                 SslHandler sslHandler = newSslHandler(channel, sslContext, peer);
                 logger.trace("creating outbound netty SslContext: context={}, engine={}", sslContext.getClass().getName(), sslHandler.engine().getClass().getName());
-                pipeline.addFirst("ssl", sslHandler);
+                pipeline.addFirst(SSL_HANDLER_NAME, sslHandler);
             }
+            pipeline.addLast("server-authentication", new ServerAuthenticationHandler(settings));
 
             if (WIRETRACE)
                 pipeline.addLast("logger", new LoggingHandler(LogLevel.INFO));
@@ -220,6 +231,45 @@
 
     }
 
+    /**
+     * Authenticates the server before an outbound connection is established. If a connection is SSL based connection
+     * Server's identity is verified during ssl handshake using root certificate in truststore. One may choose to ignore
+     * outbound authentication or perform required authentication for outbound connections in the implementation
+     * of IInternodeAuthenticator interface.
+     */
+    @VisibleForTesting
+    static class ServerAuthenticationHandler extends ByteToMessageDecoder
+    {
+        final OutboundConnectionSettings settings;
+
+        ServerAuthenticationHandler(OutboundConnectionSettings settings)
+        {
+            this.settings = settings;
+        }
+
+        @Override
+        protected void decode(ChannelHandlerContext channelHandlerContext, ByteBuf byteBuf, List<Object> list) throws Exception
+        {
+            // Extract certificates from SSL handler(handler with name "ssl").
+            final Certificate[] certificates = certificates(channelHandlerContext.channel());
+            if (!settings.authenticator.authenticate(settings.to.getAddress(), settings.to.getPort(), certificates, OUTBOUND))
+            {
+                // interrupt other connections, so they must attempt to re-authenticate
+                MessagingService.instance().interruptOutbound(settings.to);
+                logger.error("authentication failed to " + settings.connectToId());
+
+                // To release all the pending buffered data, replace authentication handler with discard handler.
+                // This avoids pending inbound data to be fired through the pipeline
+                channelHandlerContext.pipeline().replace(this, DISCARD_HANDLER_NAME, new InternodeConnectionUtils.ByteBufDiscardHandler());
+                channelHandlerContext.pipeline().close();
+            }
+            else
+            {
+                channelHandlerContext.pipeline().remove(this);
+            }
+        }
+    }
+
     private class Handler extends ByteToMessageDecoder
     {
         /**
diff --git a/src/java/org/apache/cassandra/net/OutboundConnectionSettings.java b/src/java/org/apache/cassandra/net/OutboundConnectionSettings.java
index 599e717..bcb6064 100644
--- a/src/java/org/apache/cassandra/net/OutboundConnectionSettings.java
+++ b/src/java/org/apache/cassandra/net/OutboundConnectionSettings.java
@@ -25,7 +25,6 @@
 import org.apache.cassandra.auth.IInternodeAuthenticator;
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.config.EncryptionOptions;
 import org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions;
 import org.apache.cassandra.db.SystemKeyspace;
 import org.apache.cassandra.locator.IEndpointSnitch;
@@ -82,7 +81,7 @@
     public final IInternodeAuthenticator authenticator;
     public final InetAddressAndPort to;
     public final InetAddressAndPort connectTo; // may be represented by a different IP address on this node's local network
-    public final EncryptionOptions encryption;
+    public final ServerEncryptionOptions encryption;
     public final Framing framing;
     public final Integer socketSendBufferSizeInBytes;
     public final Integer applicationSendQueueCapacityInBytes;
@@ -112,7 +111,7 @@
     private OutboundConnectionSettings(IInternodeAuthenticator authenticator,
                                        InetAddressAndPort to,
                                        InetAddressAndPort connectTo,
-                                       EncryptionOptions encryption,
+                                       ServerEncryptionOptions encryption,
                                        Framing framing,
                                        Integer socketSendBufferSizeInBytes,
                                        Integer applicationSendQueueCapacityInBytes,
@@ -157,11 +156,6 @@
         this.endpointToVersion = endpointToVersion;
     }
 
-    public boolean authenticate()
-    {
-        return authenticator.authenticate(to.getAddress(), to.getPort());
-    }
-
     public boolean withEncryption()
     {
         return encryption != null;
@@ -365,7 +359,7 @@
         return debug != null ? debug : OutboundDebugCallbacks.NONE;
     }
 
-    public EncryptionOptions encryption()
+    public ServerEncryptionOptions encryption()
     {
         return encryption != null ? encryption : defaultEncryptionOptions(to);
     }
@@ -499,7 +493,7 @@
     }
 
     @VisibleForTesting
-    static EncryptionOptions defaultEncryptionOptions(InetAddressAndPort endpoint)
+    static ServerEncryptionOptions defaultEncryptionOptions(InetAddressAndPort endpoint)
     {
         ServerEncryptionOptions options = DatabaseDescriptor.getInternodeMessagingEncyptionOptions();
         return options.shouldEncrypt(endpoint) ? options : null;
diff --git a/src/java/org/apache/cassandra/security/AbstractSslContextFactory.java b/src/java/org/apache/cassandra/security/AbstractSslContextFactory.java
index c2ef851..e4f868f 100644
--- a/src/java/org/apache/cassandra/security/AbstractSslContextFactory.java
+++ b/src/java/org/apache/cassandra/security/AbstractSslContextFactory.java
@@ -178,15 +178,16 @@
             key file in PEM format (see {@link SslContextBuilder#forServer(File, File, String)}). However, we are
             not supporting that now to keep the config/yaml API simple.
          */
-        KeyManagerFactory kmf = buildKeyManagerFactory();
         SslContextBuilder builder;
         if (socketType == SocketType.SERVER)
         {
+            KeyManagerFactory kmf = buildKeyManagerFactory();
             builder = SslContextBuilder.forServer(kmf).clientAuth(this.require_client_auth ? ClientAuth.REQUIRE :
                                                                   ClientAuth.NONE);
         }
         else
         {
+            KeyManagerFactory kmf = buildOutboundKeyManagerFactory();
             builder = SslContextBuilder.forClient().keyManager(kmf);
         }
 
@@ -263,4 +264,12 @@
     abstract protected KeyManagerFactory buildKeyManagerFactory() throws SSLException;
 
     abstract protected TrustManagerFactory buildTrustManagerFactory() throws SSLException;
+
+    /**
+     * Create a {@code KeyManagerFactory} for outbound connections.
+     * It provides a seperate keystore for internode mTLS outbound connections.
+     * @return {@code KeyManagerFactory}
+     * @throws SSLException
+     */
+    abstract protected KeyManagerFactory buildOutboundKeyManagerFactory() throws SSLException;
 }
diff --git a/src/java/org/apache/cassandra/security/DisableSslContextFactory.java b/src/java/org/apache/cassandra/security/DisableSslContextFactory.java
index 9dab062..8058d0a 100644
--- a/src/java/org/apache/cassandra/security/DisableSslContextFactory.java
+++ b/src/java/org/apache/cassandra/security/DisableSslContextFactory.java
@@ -37,12 +37,24 @@
     }
 
     @Override
+    protected KeyManagerFactory buildOutboundKeyManagerFactory() throws SSLException
+    {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
     public boolean hasKeystore()
     {
         return false;
     }
 
     @Override
+    public boolean hasOutboundKeystore()
+    {
+        return false;
+    }
+
+    @Override
     public void initHotReloading() throws SSLException
     {
     }
diff --git a/src/java/org/apache/cassandra/security/FileBasedSslContextFactory.java b/src/java/org/apache/cassandra/security/FileBasedSslContextFactory.java
index 3d47509..5b3ca12 100644
--- a/src/java/org/apache/cassandra/security/FileBasedSslContextFactory.java
+++ b/src/java/org/apache/cassandra/security/FileBasedSslContextFactory.java
@@ -33,7 +33,7 @@
 import javax.net.ssl.SSLException;
 import javax.net.ssl.TrustManagerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -47,38 +47,32 @@
  * {@code CAUTION:} While this is a useful abstraction, please be careful if you need to modify this class
  * given possible custom implementations out there!
  */
-abstract public class FileBasedSslContextFactory extends AbstractSslContextFactory
+public abstract class FileBasedSslContextFactory extends AbstractSslContextFactory
 {
     private static final Logger logger = LoggerFactory.getLogger(FileBasedSslContextFactory.class);
-
-    @VisibleForTesting
-    protected volatile boolean checkedExpiry = false;
+    protected FileBasedStoreContext keystoreContext;
+    protected FileBasedStoreContext outboundKeystoreContext;
+    protected FileBasedStoreContext trustStoreContext;
 
     /**
      * List of files that trigger hot reloading of SSL certificates
      */
     protected volatile List<HotReloadableFile> hotReloadableFiles = new ArrayList<>();
 
-    protected String keystore;
-    protected String keystore_password;
-    protected String truststore;
-    protected String truststore_password;
-
     public FileBasedSslContextFactory()
     {
-        keystore = "conf/.keystore";
-        keystore_password = "cassandra";
-        truststore = "conf/.truststore";
-        truststore_password = "cassandra";
+        keystoreContext = new FileBasedStoreContext("conf/.keystore", "cassandra");
+        outboundKeystoreContext = new FileBasedStoreContext("conf/.keystore", "cassandra");
+        trustStoreContext = new FileBasedStoreContext("conf/.truststore", "cassandra");
     }
 
     public FileBasedSslContextFactory(Map<String, Object> parameters)
     {
         super(parameters);
-        keystore = getString("keystore");
-        keystore_password = getString("keystore_password");
-        truststore = getString("truststore");
-        truststore_password = getString("truststore_password");
+        keystoreContext = new FileBasedStoreContext(getString("keystore"), getString("keystore_password"));
+        outboundKeystoreContext = new FileBasedStoreContext(StringUtils.defaultString(getString("outbound_keystore"), keystoreContext.filePath),
+                                                            StringUtils.defaultString(getString("outbound_keystore_password"), keystoreContext.password));
+        trustStoreContext = new FileBasedStoreContext(getString("truststore"), getString("truststore_password"));
     }
 
     @Override
@@ -90,30 +84,41 @@
     @Override
     public boolean hasKeystore()
     {
-        return keystore != null && new File(keystore).exists();
+        return keystoreContext.hasKeystore();
+    }
+
+    @Override
+    public boolean hasOutboundKeystore()
+    {
+        return outboundKeystoreContext.hasKeystore();
     }
 
     private boolean hasTruststore()
     {
-        return truststore != null && new File(truststore).exists();
+        return trustStoreContext.filePath != null && new File(trustStoreContext.filePath).exists();
     }
 
     @Override
     public synchronized void initHotReloading()
     {
         boolean hasKeystore = hasKeystore();
+        boolean hasOutboundKeystore = hasOutboundKeystore();
         boolean hasTruststore = hasTruststore();
 
-        if (hasKeystore || hasTruststore)
+        if (hasKeystore || hasOutboundKeystore || hasTruststore)
         {
             List<HotReloadableFile> fileList = new ArrayList<>();
             if (hasKeystore)
             {
-                fileList.add(new HotReloadableFile(keystore));
+                fileList.add(new HotReloadableFile(keystoreContext.filePath));
+            }
+            if (hasOutboundKeystore)
+            {
+                fileList.add(new HotReloadableFile(outboundKeystoreContext.filePath));
             }
             if (hasTruststore)
             {
-                fileList.add(new HotReloadableFile(truststore));
+                fileList.add(new HotReloadableFile(trustStoreContext.filePath));
             }
             hotReloadableFiles = fileList;
         }
@@ -129,25 +134,13 @@
     @Override
     protected KeyManagerFactory buildKeyManagerFactory() throws SSLException
     {
+        return getKeyManagerFactory(keystoreContext);
+    }
 
-        try (InputStream ksf = Files.newInputStream(Paths.get(keystore)))
-        {
-            final String algorithm = this.algorithm == null ? KeyManagerFactory.getDefaultAlgorithm() : this.algorithm;
-            KeyManagerFactory kmf = KeyManagerFactory.getInstance(algorithm);
-            KeyStore ks = KeyStore.getInstance(store_type);
-            ks.load(ksf, keystore_password.toCharArray());
-            if (!checkedExpiry)
-            {
-                checkExpiredCerts(ks);
-                checkedExpiry = true;
-            }
-            kmf.init(ks, keystore_password.toCharArray());
-            return kmf;
-        }
-        catch (Exception e)
-        {
-            throw new SSLException("failed to build key manager store for secure connections", e);
-        }
+    @Override
+    protected KeyManagerFactory buildOutboundKeyManagerFactory() throws SSLException
+    {
+        return getKeyManagerFactory(outboundKeystoreContext);
     }
 
     /**
@@ -159,12 +152,12 @@
     @Override
     protected TrustManagerFactory buildTrustManagerFactory() throws SSLException
     {
-        try (InputStream tsf = Files.newInputStream(Paths.get(truststore)))
+        try (InputStream tsf = Files.newInputStream(Paths.get(trustStoreContext.filePath)))
         {
             final String algorithm = this.algorithm == null ? TrustManagerFactory.getDefaultAlgorithm() : this.algorithm;
             TrustManagerFactory tmf = TrustManagerFactory.getInstance(algorithm);
             KeyStore ts = KeyStore.getInstance(store_type);
-            ts.load(tsf, truststore_password.toCharArray());
+            ts.load(tsf, trustStoreContext.password.toCharArray());
             tmf.init(ts);
             return tmf;
         }
@@ -174,6 +167,29 @@
         }
     }
 
+    private KeyManagerFactory getKeyManagerFactory(final FileBasedStoreContext context) throws SSLException
+    {
+        try (InputStream ksf = Files.newInputStream(Paths.get(context.filePath)))
+        {
+            final String algorithm = this.algorithm == null ? KeyManagerFactory.getDefaultAlgorithm() : this.algorithm;
+            KeyManagerFactory kmf = KeyManagerFactory.getInstance(algorithm);
+            KeyStore ks = KeyStore.getInstance(store_type);
+            ks.load(ksf, context.password.toCharArray());
+
+            if (!context.checkedExpiry)
+            {
+                checkExpiredCerts(ks);
+                context.checkedExpiry = true;
+            }
+            kmf.init(ks, context.password.toCharArray());
+            return kmf;
+        }
+        catch (Exception e)
+        {
+            throw new SSLException("failed to build key manager store for secure connections", e);
+        }
+    }
+
     protected boolean checkExpiredCerts(KeyStore ks) throws KeyStoreException
     {
         boolean hasExpiredCerts = false;
@@ -225,4 +241,27 @@
                    '}';
         }
     }
+
+    protected static class FileBasedStoreContext
+    {
+        public volatile boolean checkedExpiry = false;
+        public String filePath;
+        public String password;
+
+        public FileBasedStoreContext(String keystore, String keystorePassword)
+        {
+            this.filePath = keystore;
+            this.password = keystorePassword;
+        }
+
+        protected boolean hasKeystore()
+        {
+            return filePath != null && new File(filePath).exists();
+        }
+
+        protected boolean passwordMatchesIfPresent(String keyPassword)
+        {
+            return StringUtils.isEmpty(password) || keyPassword.equals(password);
+        }
+    }
 }
diff --git a/src/java/org/apache/cassandra/security/ISslContextFactory.java b/src/java/org/apache/cassandra/security/ISslContextFactory.java
index 579c95e..11c4717 100644
--- a/src/java/org/apache/cassandra/security/ISslContextFactory.java
+++ b/src/java/org/apache/cassandra/security/ISslContextFactory.java
@@ -100,6 +100,16 @@
     }
 
     /**
+     * Returns if this factory uses outbound keystore.
+     *
+     * @return {@code true} by default unless the implementation overrides this
+     */
+    default boolean hasOutboundKeystore()
+    {
+        return false;
+    }
+
+    /**
      * Returns the prepared list of accepted protocols.
      *
      * @return array of protocol names suitable for passing to Netty's SslContextBuilder.protocols, or null if the
diff --git a/src/java/org/apache/cassandra/security/PEMBasedSslContextFactory.java b/src/java/org/apache/cassandra/security/PEMBasedSslContextFactory.java
index 8ecbec5..3d3ecc2 100644
--- a/src/java/org/apache/cassandra/security/PEMBasedSslContextFactory.java
+++ b/src/java/org/apache/cassandra/security/PEMBasedSslContextFactory.java
@@ -90,47 +90,55 @@
 {
     public static final String DEFAULT_TARGET_STORETYPE = "PKCS12";
     private static final Logger logger = LoggerFactory.getLogger(PEMBasedSslContextFactory.class);
-    private String pemEncodedKey;
-    private String keyPassword;
-    private String pemEncodedCertificates;
-    private boolean maybeFileBasedPrivateKey;
-    private boolean maybeFileBasedTrustedCertificates;
+    private PEMBasedKeyStoreContext pemEncodedTrustCertificates;
+    private PEMBasedKeyStoreContext pemEncodedKeyContext;
+    private PEMBasedKeyStoreContext pemEncodedOutboundKeyContext;
 
     public PEMBasedSslContextFactory()
     {
     }
 
-    public PEMBasedSslContextFactory(Map<String, Object> parameters)
+    private void validatePasswords()
     {
-        super(parameters);
-        pemEncodedKey = getString(ConfigKey.ENCODED_KEY.getKeyName());
-        keyPassword = getString(ConfigKey.KEY_PASSWORD.getKeyName());
-        if (StringUtils.isEmpty(keyPassword))
+        boolean shouldThrow = !keystoreContext.passwordMatchesIfPresent(pemEncodedKeyContext.password)
+                              || !outboundKeystoreContext.passwordMatchesIfPresent(pemEncodedOutboundKeyContext.password);
+        boolean outboundPasswordMismatch = !outboundKeystoreContext.passwordMatchesIfPresent(pemEncodedOutboundKeyContext.password);
+        String keyName = outboundPasswordMismatch ? "outbound_" : "";
+
+        if (shouldThrow)
         {
-            keyPassword = keystore_password;
-        }
-        else if (!StringUtils.isEmpty(keystore_password) && !keyPassword.equals(keystore_password))
-        {
-            throw new IllegalArgumentException("'keystore_password' and 'key_password' both configurations are given and the " +
-                                               "values do not match");
+            final String msg = String.format("'%skeystore_password' and '%skey_password' both configurations are given and the values do not match", keyName, keyName);
+            throw new IllegalArgumentException(msg);
         }
         else
         {
-            logger.warn("'keystore_password' and 'key_password' both are configured but since the values match it's " +
-                        "okay. Ideally you should only specify one of them.");
+            logger.warn("'{}keystore_password' and '{}key_password' both are configured but since the values match it's " +
+                        "okay. Ideally you should only specify one of them.", keyName, keyName);
         }
+    }
 
-        if (!StringUtils.isEmpty(truststore_password))
+    public PEMBasedSslContextFactory(Map<String, Object> parameters)
+    {
+        super(parameters);
+        final String pemEncodedKey = getString(ConfigKey.ENCODED_KEY.getKeyName());
+        final String pemEncodedKeyPassword = StringUtils.defaultString(getString(ConfigKey.KEY_PASSWORD.getKeyName()), keystoreContext.password);
+        pemEncodedKeyContext = new PEMBasedKeyStoreContext(pemEncodedKey, pemEncodedKeyPassword, StringUtils.isEmpty(pemEncodedKey), keystoreContext);
+
+        final String pemEncodedOutboundKey = StringUtils.defaultString(getString(ConfigKey.OUTBOUND_ENCODED_KEY.getKeyName()), pemEncodedKey);
+        final String outboundKeyPassword = StringUtils.defaultString(StringUtils.defaultString(getString(ConfigKey.OUTBOUND_ENCODED_KEY_PASSWORD.getKeyName()),
+                                                                                               outboundKeystoreContext.password), pemEncodedKeyPassword);
+        pemEncodedOutboundKeyContext = new PEMBasedKeyStoreContext(pemEncodedKey, outboundKeyPassword, StringUtils.isEmpty(pemEncodedOutboundKey), outboundKeystoreContext);
+
+        validatePasswords();
+
+        if (!StringUtils.isEmpty(trustStoreContext.password))
         {
             logger.warn("PEM based truststore should not be using password. Ignoring the given value in " +
                         "'truststore_password' configuration.");
         }
 
-        pemEncodedCertificates = getString(ConfigKey.ENCODED_CERTIFICATES.getKeyName());
-
-        maybeFileBasedPrivateKey = StringUtils.isEmpty(pemEncodedKey);
-        maybeFileBasedTrustedCertificates = StringUtils.isEmpty(pemEncodedCertificates);
-
+        final String pemEncodedCerts = getString(ConfigKey.ENCODED_CERTIFICATES.getKeyName());
+        pemEncodedTrustCertificates = new PEMBasedKeyStoreContext(pemEncodedCerts, null, StringUtils.isEmpty(pemEncodedCerts), trustStoreContext);
         enforceSinglePrivateKeySource();
         enforceSingleTurstedCertificatesSource();
     }
@@ -143,18 +151,22 @@
     @Override
     public boolean hasKeystore()
     {
-        return maybeFileBasedPrivateKey ? keystoreFileExists() :
-               !StringUtils.isEmpty(pemEncodedKey);
+        return pemEncodedKeyContext.maybeFilebasedKey
+               ? keystoreContext.hasKeystore()
+               : !StringUtils.isEmpty(pemEncodedKeyContext.key);
     }
 
     /**
-     * Checks if the keystore file exists.
+     * Decides if this factory has an outbound keystore defined - key material specified in files or inline to the configuration.
      *
-     * @return {@code true} if keystore file exists; {@code false} otherwise
+     * @return {@code true} if there is an outbound keystore defined; {@code false} otherwise
      */
-    private boolean keystoreFileExists()
+    @Override
+    public boolean hasOutboundKeystore()
     {
-        return keystore != null && new File(keystore).exists();
+        return pemEncodedOutboundKeyContext.maybeFilebasedKey
+               ? outboundKeystoreContext.hasKeystore()
+               : !StringUtils.isEmpty(pemEncodedOutboundKeyContext.key);
     }
 
     /**
@@ -165,8 +177,8 @@
      */
     private boolean hasTruststore()
     {
-        return maybeFileBasedTrustedCertificates ? truststoreFileExists() :
-               !StringUtils.isEmpty(pemEncodedCertificates);
+        return pemEncodedTrustCertificates.maybeFilebasedKey ? truststoreFileExists() :
+               !StringUtils.isEmpty(pemEncodedTrustCertificates.key);
     }
 
     /**
@@ -176,7 +188,7 @@
      */
     private boolean truststoreFileExists()
     {
-        return truststore != null && new File(truststore).exists();
+        return trustStoreContext.filePath != null && new File(trustStoreContext.filePath).exists();
     }
 
     /**
@@ -186,13 +198,17 @@
     public synchronized void initHotReloading()
     {
         List<HotReloadableFile> fileList = new ArrayList<>();
-        if (maybeFileBasedPrivateKey && hasKeystore())
+        if (pemEncodedKeyContext.maybeFilebasedKey && hasKeystore())
         {
-            fileList.add(new HotReloadableFile(keystore));
+            fileList.add(new HotReloadableFile(keystoreContext.filePath));
         }
-        if (maybeFileBasedTrustedCertificates && hasTruststore())
+        if (pemEncodedOutboundKeyContext.maybeFilebasedKey && hasOutboundKeystore())
         {
-            fileList.add(new HotReloadableFile(truststore));
+            fileList.add(new HotReloadableFile(outboundKeystoreContext.filePath));
+        }
+        if (pemEncodedTrustCertificates.maybeFilebasedKey && hasTruststore())
+        {
+            fileList.add(new HotReloadableFile(trustStoreContext.filePath));
         }
         if (!fileList.isEmpty())
         {
@@ -210,29 +226,40 @@
     @Override
     protected KeyManagerFactory buildKeyManagerFactory() throws SSLException
     {
+        return buildKeyManagerFactory(pemEncodedKeyContext, keystoreContext);
+    }
+
+    @Override
+    protected KeyManagerFactory buildOutboundKeyManagerFactory() throws SSLException
+    {
+        return buildKeyManagerFactory(pemEncodedOutboundKeyContext, outboundKeystoreContext);
+    }
+
+    private KeyManagerFactory buildKeyManagerFactory(PEMBasedKeyStoreContext pemBasedKeyStoreContext, FileBasedStoreContext keyStoreContext) throws SSLException
+    {
         try
         {
-            if (hasKeystore())
+            if (pemBasedKeyStoreContext.hasKey())
             {
-                if (maybeFileBasedPrivateKey)
+                if (pemBasedKeyStoreContext.maybeFilebasedKey)
                 {
-                    pemEncodedKey = readPEMFile(keystore); // read PEM from the file
+                    pemBasedKeyStoreContext.key = readPEMFile(keyStoreContext.filePath); // read PEM from the file
                 }
 
                 KeyManagerFactory kmf = KeyManagerFactory.getInstance(
                 algorithm == null ? KeyManagerFactory.getDefaultAlgorithm() : algorithm);
-                KeyStore ks = buildKeyStore();
-                if (!checkedExpiry)
+                KeyStore ks = buildKeyStore(pemBasedKeyStoreContext.key, pemBasedKeyStoreContext.password);
+                if (!keyStoreContext.checkedExpiry)
                 {
                     checkExpiredCerts(ks);
-                    checkedExpiry = true;
+                    keyStoreContext.checkedExpiry = true;
                 }
-                kmf.init(ks, keyPassword != null ? keyPassword.toCharArray() : null);
+                kmf.init(ks, pemBasedKeyStoreContext.password != null ? pemBasedKeyStoreContext.password.toCharArray() : null);
                 return kmf;
             }
             else
             {
-                throw new SSLException("Must provide keystore or private_key in configuration for PEMBasedSSlContextFactory");
+                throw new SSLException("Must provide outbound_keystore or outbound_private_key in configuration for PEMBasedSSlContextFactory");
             }
         }
         catch (Exception e)
@@ -254,9 +281,9 @@
         {
             if (hasTruststore())
             {
-                if (maybeFileBasedTrustedCertificates)
+                if (pemEncodedTrustCertificates.maybeFilebasedKey)
                 {
-                    pemEncodedCertificates = readPEMFile(truststore); // read PEM from the file
+                    pemEncodedTrustCertificates.key = readPEMFile(trustStoreContext.filePath); // read PEM from the file
                 }
 
                 TrustManagerFactory tmf = TrustManagerFactory.getInstance(
@@ -286,7 +313,7 @@
      * Builds KeyStore object given the {@link #DEFAULT_TARGET_STORETYPE} out of the PEM formatted private key material.
      * It uses {@code cassandra-ssl-keystore} as the alias for the created key-entry.
      */
-    private KeyStore buildKeyStore() throws GeneralSecurityException, IOException
+    private static KeyStore buildKeyStore(final String pemEncodedKey, final String keyPassword) throws GeneralSecurityException, IOException
     {
         char[] keyPasswordArray = keyPassword != null ? keyPassword.toCharArray() : null;
         PrivateKey privateKey = PEMReader.extractPrivateKey(pemEncodedKey, keyPassword);
@@ -310,7 +337,7 @@
      */
     private KeyStore buildTrustStore() throws GeneralSecurityException, IOException
     {
-        Certificate[] certChainArray = PEMReader.extractCertificates(pemEncodedCertificates);
+        Certificate[] certChainArray = PEMReader.extractCertificates(pemEncodedTrustCertificates.key);
         if (certChainArray == null || certChainArray.length == 0)
         {
             throw new SSLException("Could not read any certificates from the given PEM");
@@ -331,11 +358,16 @@
      */
     private void enforceSinglePrivateKeySource()
     {
-        if (keystoreFileExists() && !StringUtils.isEmpty(pemEncodedKey))
+        if (keystoreContext.hasKeystore() && !StringUtils.isEmpty(pemEncodedKeyContext.key))
         {
             throw new IllegalArgumentException("Configuration must specify value for either keystore or private_key, " +
                                                "not both for PEMBasedSSlContextFactory");
         }
+        if (outboundKeystoreContext.hasKeystore() && !StringUtils.isEmpty(pemEncodedOutboundKeyContext.key))
+        {
+            throw new IllegalArgumentException("Configuration must specify value for either outbound_keystore or outbound_private_key, " +
+                                               "not both for PEMBasedSSlContextFactory");
+        }
     }
 
     /**
@@ -344,17 +376,43 @@
      */
     private void enforceSingleTurstedCertificatesSource()
     {
-        if (truststoreFileExists() && !StringUtils.isEmpty(pemEncodedCertificates))
+        if (truststoreFileExists() && !StringUtils.isEmpty(pemEncodedTrustCertificates.key))
         {
             throw new IllegalArgumentException("Configuration must specify value for either truststore or " +
                                                "trusted_certificates, not both for PEMBasedSSlContextFactory");
         }
     }
 
+    public static class PEMBasedKeyStoreContext
+    {
+        public String key;
+        public final String password;
+        public final boolean maybeFilebasedKey;
+        public final FileBasedStoreContext filebasedKeystoreContext;
+
+        public PEMBasedKeyStoreContext(final String encodedKey, final String getEncodedKeyPassword,
+                                       final boolean maybeFilebasedKey, final FileBasedStoreContext filebasedKeystoreContext)
+        {
+            this.key = encodedKey;
+            this.password = getEncodedKeyPassword;
+            this.maybeFilebasedKey = maybeFilebasedKey;
+            this.filebasedKeystoreContext = filebasedKeystoreContext;
+        }
+
+        public boolean hasKey()
+        {
+            return maybeFilebasedKey
+                   ? filebasedKeystoreContext.hasKeystore()
+                   : !StringUtils.isEmpty(key);
+        }
+    }
+
     public enum ConfigKey
     {
         ENCODED_KEY("private_key"),
         KEY_PASSWORD("private_key_password"),
+        OUTBOUND_ENCODED_KEY("outbound_private_key"),
+        OUTBOUND_ENCODED_KEY_PASSWORD("outbound_private_key_password"),
         ENCODED_CERTIFICATES("trusted_certificates");
 
         final String keyName;
diff --git a/src/java/org/apache/cassandra/serializers/BooleanSerializer.java b/src/java/org/apache/cassandra/serializers/BooleanSerializer.java
index d372a2a..403e6b7 100644
--- a/src/java/org/apache/cassandra/serializers/BooleanSerializer.java
+++ b/src/java/org/apache/cassandra/serializers/BooleanSerializer.java
@@ -24,8 +24,8 @@
 
 public class BooleanSerializer extends TypeSerializer<Boolean>
 {
-    private static final ByteBuffer TRUE = ByteBuffer.wrap(new byte[] {1});
-    private static final ByteBuffer FALSE = ByteBuffer.wrap(new byte[] {0});
+    public static final ByteBuffer TRUE = ByteBuffer.wrap(new byte[] {1});
+    public static final ByteBuffer FALSE = ByteBuffer.wrap(new byte[] {0});
 
     public static final BooleanSerializer instance = new BooleanSerializer();
 
diff --git a/src/java/org/apache/cassandra/serializers/CollectionSerializer.java b/src/java/org/apache/cassandra/serializers/CollectionSerializer.java
index eb2991b..204261d 100644
--- a/src/java/org/apache/cassandra/serializers/CollectionSerializer.java
+++ b/src/java/org/apache/cassandra/serializers/CollectionSerializer.java
@@ -91,11 +91,6 @@
         output.putInt(elements);
     }
 
-    public static int readCollectionSize(ByteBuffer input, ProtocolVersion version)
-    {
-        return readCollectionSize(input, ByteBufferAccessor.instance, version);
-    }
-
     public static <V> int readCollectionSize(V value, ValueAccessor<V> accessor, ProtocolVersion version)
     {
         return accessor.toInt(value);
diff --git a/src/java/org/apache/cassandra/serializers/MapSerializer.java b/src/java/org/apache/cassandra/serializers/MapSerializer.java
index 13468fc..400a8e7 100644
--- a/src/java/org/apache/cassandra/serializers/MapSerializer.java
+++ b/src/java/org/apache/cassandra/serializers/MapSerializer.java
@@ -148,7 +148,7 @@
         try
         {
             ByteBuffer input = collection.duplicate();
-            int n = readCollectionSize(input, ProtocolVersion.V3);
+            int n = readCollectionSize(input, ByteBufferAccessor.instance, ProtocolVersion.V3);
             int offset = sizeOfCollectionSize(n, ProtocolVersion.V3);
             for (int i = 0; i < n; i++)
             {
@@ -184,7 +184,7 @@
         try
         {
             ByteBuffer input = collection.duplicate();
-            int n = readCollectionSize(input, ProtocolVersion.V3);
+            int n = readCollectionSize(input, ByteBufferAccessor.instance, ProtocolVersion.V3);
             input.position(input.position() + sizeOfCollectionSize(n, ProtocolVersion.V3));
             int startPos = input.position();
             int count = 0;
diff --git a/src/java/org/apache/cassandra/serializers/SetSerializer.java b/src/java/org/apache/cassandra/serializers/SetSerializer.java
index 6be919f..54b8496 100644
--- a/src/java/org/apache/cassandra/serializers/SetSerializer.java
+++ b/src/java/org/apache/cassandra/serializers/SetSerializer.java
@@ -156,7 +156,7 @@
     {
         try
         {
-            int n = readCollectionSize(input, ProtocolVersion.V3);
+            int n = readCollectionSize(input, ByteBufferAccessor.instance, ProtocolVersion.V3);
             int offset = sizeOfCollectionSize(n, ProtocolVersion.V3);
 
             for (int i = 0; i < n; i++)
@@ -192,7 +192,7 @@
         try
         {
             ByteBuffer input = collection.duplicate();
-            int n = readCollectionSize(input, ProtocolVersion.V3);
+            int n = readCollectionSize(input, ByteBufferAccessor.instance, ProtocolVersion.V3);
             input.position(input.position() + sizeOfCollectionSize(n, ProtocolVersion.V3));
             int startPos = input.position();
             int count = 0;
diff --git a/src/java/org/apache/cassandra/service/CassandraDaemon.java b/src/java/org/apache/cassandra/service/CassandraDaemon.java
index cca4093..90b9496 100644
--- a/src/java/org/apache/cassandra/service/CassandraDaemon.java
+++ b/src/java/org/apache/cassandra/service/CassandraDaemon.java
@@ -291,24 +291,13 @@
 
         SSTableHeaderFix.fixNonFrozenUDTIfUpgradeFrom30();
 
-        // clean up debris in the rest of the keyspaces
-        for (String keyspaceName : Schema.instance.getKeyspaces())
+        try
         {
-            // Skip system as we've already cleaned it
-            if (keyspaceName.equals(SchemaConstants.SYSTEM_KEYSPACE_NAME))
-                continue;
-
-            for (TableMetadata cfm : Schema.instance.getTablesAndViews(keyspaceName))
-            {
-                try
-                {
-                    ColumnFamilyStore.scrubDataDirectories(cfm);
-                }
-                catch (StartupException e)
-                {
-                    exitOrFail(e.returnCode, e.getMessage(), e.getCause());
-                }
-            }
+            scrubDataDirectories();
+        }
+        catch (StartupException e)
+        {
+            exitOrFail(e.returnCode, e.getMessage(), e.getCause());
         }
 
         Keyspace.setInitialized();
@@ -579,6 +568,22 @@
         VirtualKeyspaceRegistry.instance.register(SystemViewsKeyspace.instance);
     }
 
+    public void scrubDataDirectories() throws StartupException
+    {
+        // clean up debris in the rest of the keyspaces
+        for (String keyspaceName : Schema.instance.getKeyspaces())
+        {
+            // Skip system as we've already cleaned it
+            if (keyspaceName.equals(SchemaConstants.SYSTEM_KEYSPACE_NAME))
+                continue;
+
+            for (TableMetadata cfm : Schema.instance.getTablesAndViews(keyspaceName))
+            {
+                ColumnFamilyStore.scrubDataDirectories(cfm);
+            }
+        }
+    }
+
     public synchronized void initializeClientTransports()
     {
         // Native transport
diff --git a/src/java/org/apache/cassandra/service/StorageProxy.java b/src/java/org/apache/cassandra/service/StorageProxy.java
index 31d5477..e89bdae 100644
--- a/src/java/org/apache/cassandra/service/StorageProxy.java
+++ b/src/java/org/apache/cassandra/service/StorageProxy.java
@@ -39,25 +39,25 @@
 import java.util.function.Function;
 import java.util.stream.Collectors;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.cache.CacheLoader;
 import com.google.common.collect.Iterables;
 import com.google.common.util.concurrent.Uninterruptibles;
 
-import org.apache.cassandra.config.Config;
-import org.apache.cassandra.service.paxos.*;
+import org.apache.cassandra.service.paxos.Ballot;
+import org.apache.cassandra.service.paxos.Commit;
+import org.apache.cassandra.service.paxos.ContentionStrategy;
 import org.apache.cassandra.service.paxos.Paxos;
-import org.apache.cassandra.utils.TimeUUID;
-import org.apache.cassandra.utils.concurrent.CountDownLatch;
-
+import org.apache.cassandra.service.paxos.PaxosState;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.batchlog.Batch;
 import org.apache.cassandra.batchlog.BatchlogManager;
+import org.apache.cassandra.concurrent.DebuggableTask.RunnableDebuggableTask;
 import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.config.CassandraRelevantProperties;
+import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.ConsistencyLevel;
@@ -140,13 +140,17 @@
 import org.apache.cassandra.utils.MonotonicClock;
 import org.apache.cassandra.utils.NoSpamLogger;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.concurrent.CountDownLatch;
 import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException;
 
-import static com.google.common.collect.Iterables.concat;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
+
+import static com.google.common.collect.Iterables.concat;
+import static org.apache.commons.lang3.StringUtils.join;
+
 import static org.apache.cassandra.db.ConsistencyLevel.SERIAL;
-import static org.apache.cassandra.net.Message.out;
 import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.casReadMetrics;
 import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.casWriteMetrics;
 import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.readMetrics;
@@ -154,8 +158,15 @@
 import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.viewWriteMetrics;
 import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.writeMetrics;
 import static org.apache.cassandra.metrics.ClientRequestsMetricsHolder.writeMetricsForLevel;
+import static org.apache.cassandra.net.Message.out;
 import static org.apache.cassandra.net.NoPayload.noPayload;
-import static org.apache.cassandra.net.Verb.*;
+import static org.apache.cassandra.net.Verb.BATCH_STORE_REQ;
+import static org.apache.cassandra.net.Verb.MUTATION_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS_COMMIT_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS_PREPARE_REQ;
+import static org.apache.cassandra.net.Verb.PAXOS_PROPOSE_REQ;
+import static org.apache.cassandra.net.Verb.SCHEMA_VERSION_REQ;
+import static org.apache.cassandra.net.Verb.TRUNCATE_REQ;
 import static org.apache.cassandra.service.BatchlogResponseHandler.BatchlogCleanup;
 import static org.apache.cassandra.service.paxos.Ballot.Flag.GLOBAL;
 import static org.apache.cassandra.service.paxos.Ballot.Flag.LOCAL;
@@ -166,7 +177,6 @@
 import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 import static org.apache.cassandra.utils.TimeUUID.Generator.nextTimeUUID;
 import static org.apache.cassandra.utils.concurrent.CountDownLatch.newCountDownLatch;
-import static org.apache.commons.lang3.StringUtils.join;
 
 public class StorageProxy implements StorageProxyMBean
 {
@@ -830,6 +840,12 @@
             }
 
             @Override
+            public String description()
+            {
+                return "Paxos " + message.payload.toString();
+            }
+
+            @Override
             protected Verb verb()
             {
                 return PAXOS_COMMIT_REQ;
@@ -1264,7 +1280,7 @@
             logger.trace("Sending batchlog store request {} to {} for {} mutations", batch.id, replica, batch.size());
 
             if (replica.isSelf())
-                performLocally(Stage.MUTATION, replica, () -> BatchlogManager.store(batch), handler);
+                performLocally(Stage.MUTATION, replica, () -> BatchlogManager.store(batch), handler, "Batchlog store");
             else
                 MessagingService.instance().sendWithCallback(message, replica.endpoint(), handler);
         }
@@ -1280,7 +1296,7 @@
                 logger.trace("Sending batchlog remove request {} to {}", uuid, target);
 
             if (target.isSelf())
-                performLocally(Stage.MUTATION, target, () -> BatchlogManager.remove(uuid));
+                performLocally(Stage.MUTATION, target, () -> BatchlogManager.remove(uuid), "Batchlog remove");
             else
                 MessagingService.instance().send(message, target.endpoint());
         }
@@ -1524,7 +1540,7 @@
         if (insertLocal)
         {
             Preconditions.checkNotNull(localReplica);
-            performLocally(stage, localReplica, mutation::apply, responseHandler);
+            performLocally(stage, localReplica, mutation::apply, responseHandler, mutation);
         }
 
         if (localDc != null)
@@ -1591,7 +1607,7 @@
         logger.trace("Sending message to {}@{}", message.id(), target);
     }
 
-    private static void performLocally(Stage stage, Replica localReplica, final Runnable runnable)
+    private static void performLocally(Stage stage, Replica localReplica, final Runnable runnable, String description)
     {
         stage.maybeExecuteImmediately(new LocalMutationRunnable(localReplica)
         {
@@ -1608,6 +1624,12 @@
             }
 
             @Override
+            public String description()
+            {
+                return description;
+            }
+
+            @Override
             protected Verb verb()
             {
                 return Verb.MUTATION_REQ;
@@ -1615,7 +1637,7 @@
         });
     }
 
-    private static void performLocally(Stage stage, Replica localReplica, final Runnable runnable, final RequestCallback<?> handler)
+    private static void performLocally(Stage stage, Replica localReplica, final Runnable runnable, final RequestCallback<?> handler, Object description)
     {
         stage.maybeExecuteImmediately(new LocalMutationRunnable(localReplica)
         {
@@ -1635,6 +1657,14 @@
             }
 
             @Override
+            public String description()
+            {
+                // description is an Object and toString() called so we do not have to evaluate the Mutation.toString()
+                // unless expliclitly checked
+                return description.toString();
+            }
+
+            @Override
             protected Verb verb()
             {
                 return Verb.MUTATION_REQ;
@@ -1793,7 +1823,7 @@
     public static PartitionIterator read(SinglePartitionReadCommand.Group group, ConsistencyLevel consistencyLevel, long queryStartNanoTime)
     throws UnavailableException, IsBootstrappingException, ReadFailureException, ReadTimeoutException, InvalidRequestException
     {
-        if (StorageService.instance.isBootstrapMode() && !systemKeyspaceQuery(group.queries))
+        if (!isSafeToPerformRead(group.queries))
         {
             readMetrics.unavailables.mark();
             readMetricsForLevel(consistencyLevel).unavailables.mark();
@@ -1820,6 +1850,16 @@
              : readRegular(group, consistencyLevel, queryStartNanoTime);
     }
 
+    public static boolean isSafeToPerformRead(List<SinglePartitionReadCommand> queries)
+    {
+        return isSafeToPerformRead() || systemKeyspaceQuery(queries);
+    }
+
+    public static boolean isSafeToPerformRead()
+    {
+        return !StorageService.instance.isBootstrapMode();
+    }
+
     private static PartitionIterator readWithPaxos(SinglePartitionReadCommand.Group group, ConsistencyLevel consistencyLevel, long queryStartNanoTime)
     throws InvalidRequestException, UnavailableException, ReadFailureException, ReadTimeoutException
     {
@@ -2088,7 +2128,7 @@
         return concatAndBlockOnRepair(results, repairs);
     }
 
-    public static class LocalReadRunnable extends DroppableRunnable
+    public static class LocalReadRunnable extends DroppableRunnable implements RunnableDebuggableTask
     {
         private final ReadCommand command;
         private final ReadCallback handler;
@@ -2158,6 +2198,24 @@
                 }
             }
         }
+
+        @Override
+        public long creationTimeNanos()
+        {
+            return approxCreationTimeNanos;
+        }
+
+        @Override
+        public long startTimeNanos()
+        {
+            return approxStartTimeNanos;
+        }
+
+        @Override
+        public String description()
+        {
+            return command.toCQLString();
+        }
     }
 
     public static PartitionIterator getRangeSlice(PartitionRangeReadCommand command,
@@ -2468,7 +2526,9 @@
      */
     private static abstract class DroppableRunnable implements Runnable
     {
-        final long approxCreationTimeNanos;
+        protected final long approxCreationTimeNanos;
+        protected volatile long approxStartTimeNanos;
+        
         final Verb verb;
 
         public DroppableRunnable(Verb verb)
@@ -2479,11 +2539,11 @@
 
         public final void run()
         {
-            long approxCurrentTimeNanos = MonotonicClock.Global.approxTime.now();
+            approxStartTimeNanos = MonotonicClock.Global.approxTime.now();
             long expirationTimeNanos = verb.expiresAtNanos(approxCreationTimeNanos);
-            if (approxCurrentTimeNanos > expirationTimeNanos)
+            if (approxStartTimeNanos > expirationTimeNanos)
             {
-                long timeTakenNanos = approxCurrentTimeNanos - approxCreationTimeNanos;
+                long timeTakenNanos = approxStartTimeNanos - approxCreationTimeNanos;
                 MessagingService.instance().metrics.recordSelfDroppedMessage(verb, timeTakenNanos, NANOSECONDS);
                 return;
             }
@@ -2504,9 +2564,10 @@
      * Like DroppableRunnable, but if it aborts, it will rerun (on the mutation stage) after
      * marking itself as a hint in progress so that the hint backpressure mechanism can function.
      */
-    private static abstract class LocalMutationRunnable implements Runnable
+    private static abstract class LocalMutationRunnable implements RunnableDebuggableTask
     {
         private final long approxCreationTimeNanos = MonotonicClock.Global.approxTime.now();
+        private volatile long approxStartTimeNanos;
 
         private final Replica localReplica;
 
@@ -2518,11 +2579,12 @@
         public final void run()
         {
             final Verb verb = verb();
-            long nowNanos = MonotonicClock.Global.approxTime.now();
+            approxStartTimeNanos = MonotonicClock.Global.approxTime.now();
             long expirationTimeNanos = verb.expiresAtNanos(approxCreationTimeNanos);
-            if (nowNanos > expirationTimeNanos)
+            
+            if (approxStartTimeNanos > expirationTimeNanos)
             {
-                long timeTakenNanos = nowNanos - approxCreationTimeNanos;
+                long timeTakenNanos = approxStartTimeNanos - approxCreationTimeNanos;
                 MessagingService.instance().metrics.recordSelfDroppedMessage(Verb.MUTATION_REQ, timeTakenNanos, NANOSECONDS);
 
                 HintRunnable runnable = new HintRunnable(EndpointsForToken.of(localReplica.range().right, localReplica))
@@ -2546,14 +2608,34 @@
             }
         }
 
+        @Override
+        public long creationTimeNanos()
+        {
+            return approxCreationTimeNanos;
+        }
+
+        @Override
+        public long startTimeNanos()
+        {
+            return approxStartTimeNanos;
+        }
+
+        @Override
+        abstract public String description();
+
         abstract protected Verb verb();
         abstract protected void runMayThrow() throws Exception;
     }
 
     public static void logRequestException(Exception exception, Collection<? extends ReadCommand> commands)
     {
+        // Multiple different types of errors can happen, so by dedupping on the error type we can see each error
+        // case rather than just exposing the first error seen; this should make sure more rare issues are exposed
+        // rather than being hidden by more common errors such as timeout or unavailable
+        // see CASSANDRA-17754
+        String msg = exception.getClass().getSimpleName() + " \"{}\" while executing {}";
         NoSpamLogger.log(logger, NoSpamLogger.Level.INFO, FAILURE_LOGGING_INTERVAL_SECONDS, TimeUnit.SECONDS,
-                         "\"{}\" while executing {}",
+                         msg,
                          () -> new Object[]
                                {
                                    exception.getMessage(),
diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java
index 7458c25..6bbc516 100644
--- a/src/java/org/apache/cassandra/service/StorageService.java
+++ b/src/java/org/apache/cassandra/service/StorageService.java
@@ -17,7 +17,6 @@
  */
 package org.apache.cassandra.service;
 
-
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOError;
@@ -1238,6 +1237,7 @@
 
             DatabaseDescriptor.getRoleManager().setup();
             DatabaseDescriptor.getAuthenticator().setup();
+            DatabaseDescriptor.getInternodeAuthenticator().setupInternode();
             DatabaseDescriptor.getAuthorizer().setup();
             DatabaseDescriptor.getNetworkAuthorizer().setup();
             AuthCacheService.initializeAndRegisterCaches();
@@ -1776,11 +1776,18 @@
         {
             if (!isReplacingSameAddress())
             {
+                // Historically BROADCAST_INTERVAL was used, but this is unrelated to ring_delay, so using it to know
+                // how long to sleep only works with the default settings (ring_delay=30s, broadcast=60s).  For users
+                // who are aware of this relationship, this coupling should not be broken, but for most users this
+                // relationship isn't known and instead we should rely on the ring_delay.
+                // See CASSANDRA-17776
+                long sleepDelayMillis = Math.max(LoadBroadcaster.BROADCAST_INTERVAL, ringTimeoutMillis * 2);
                 try
                 {
                     // Sleep additionally to make sure that the server actually is not alive
                     // and giving it more time to gossip if alive.
-                    Thread.sleep(LoadBroadcaster.BROADCAST_INTERVAL);
+                    logger.info("Sleeping for {}ms waiting to make sure no new gossip updates happen for {}", sleepDelayMillis, DatabaseDescriptor.getReplaceAddress());
+                    Thread.sleep(sleepDelayMillis);
                 }
                 catch (InterruptedException e)
                 {
@@ -4136,6 +4143,7 @@
     public Map<String, TabularData> getSnapshotDetails(Map<String, String> options)
     {
         boolean skipExpiring = options != null && Boolean.parseBoolean(options.getOrDefault("no_ttl", "false"));
+        boolean includeEphemeral = options != null && Boolean.parseBoolean(options.getOrDefault("include_ephemeral", "false"));
 
         SnapshotLoader loader = new SnapshotLoader();
         Map<String, TabularData> snapshotMap = new HashMap<>();
@@ -4144,6 +4152,8 @@
         {
             if (skipExpiring && snapshot.isExpiring())
                 continue;
+            if (!includeEphemeral && snapshot.isEphemeral())
+                continue;
 
             TabularDataSupport data = (TabularDataSupport) snapshotMap.get(snapshot.getTag());
             if (data == null)
diff --git a/src/java/org/apache/cassandra/service/paxos/PaxosRepairHistory.java b/src/java/org/apache/cassandra/service/paxos/PaxosRepairHistory.java
index a88e831..5e9fad1 100644
--- a/src/java/org/apache/cassandra/service/paxos/PaxosRepairHistory.java
+++ b/src/java/org/apache/cassandra/service/paxos/PaxosRepairHistory.java
@@ -29,6 +29,7 @@
 
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.db.marshal.ByteBufferAccessor;
 import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.db.marshal.TimeUUIDType;
 import org.apache.cassandra.db.marshal.TupleType;
@@ -181,7 +182,7 @@
         Ballot[] ballotLowBounds = new Ballot[tuples.size()];
         for (int i = 0 ; i < tuples.size() ; ++i)
         {
-            ByteBuffer[] split = TYPE.split(tuples.get(i));
+            ByteBuffer[] split = TYPE.split(ByteBufferAccessor.instance, tuples.get(i));
             if (i < tokenInclusiveUpperBounds.length)
                 tokenInclusiveUpperBounds[i] = TOKEN_FACTORY.fromByteArray(split[0]);
             ballotLowBounds[i] = Ballot.deserialize(split[1]);
diff --git a/src/java/org/apache/cassandra/service/snapshot/SnapshotLoader.java b/src/java/org/apache/cassandra/service/snapshot/SnapshotLoader.java
index 979904b..100606e 100644
--- a/src/java/org/apache/cassandra/service/snapshot/SnapshotLoader.java
+++ b/src/java/org/apache/cassandra/service/snapshot/SnapshotLoader.java
@@ -74,6 +74,11 @@
         this.dataDirectories = dataDirs;
     }
 
+    public SnapshotLoader(Directories directories)
+    {
+        this(directories.getCFDirectories().stream().map(File::toPath).collect(Collectors.toList()));
+    }
+
     public Set<TableSnapshot> loadSnapshots()
     {
         for (Path dataDir : dataDirectories)
diff --git a/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java b/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java
index ba840ef..4ac9bdc 100644
--- a/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java
+++ b/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java
@@ -46,19 +46,25 @@
     @JsonProperty("expires_at")
     public final Instant expiresAt;
 
+    @JsonProperty("ephemeral")
+    public final boolean ephemeral;
+
     /** needed for jackson serialization */
     @SuppressWarnings("unused")
-    private SnapshotManifest() {
+    private SnapshotManifest()
+    {
         this.files = null;
         this.createdAt = null;
         this.expiresAt = null;
+        this.ephemeral = false;
     }
 
-    public SnapshotManifest(List<String> files, DurationSpec.IntSecondsBound ttl, Instant creationTime)
+    public SnapshotManifest(List<String> files, DurationSpec.IntSecondsBound ttl, Instant creationTime, boolean ephemeral)
     {
         this.files = files;
         this.createdAt = creationTime;
         this.expiresAt = ttl == null ? null : createdAt.plusSeconds(ttl.toSeconds());
+        this.ephemeral = ephemeral;
     }
 
     public List<String> getFiles()
@@ -76,6 +82,11 @@
         return expiresAt;
     }
 
+    public boolean isEphemeral()
+    {
+        return ephemeral;
+    }
+
     public void serializeToJsonFile(File outputFile) throws IOException
     {
         FBUtilities.serializeToJsonFile(this, outputFile);
@@ -92,12 +103,15 @@
         if (this == o) return true;
         if (o == null || getClass() != o.getClass()) return false;
         SnapshotManifest manifest = (SnapshotManifest) o;
-        return Objects.equals(files, manifest.files) && Objects.equals(createdAt, manifest.createdAt) && Objects.equals(expiresAt, manifest.expiresAt);
+        return Objects.equals(files, manifest.files)
+               && Objects.equals(createdAt, manifest.createdAt)
+               && Objects.equals(expiresAt, manifest.expiresAt)
+               && Objects.equals(ephemeral, manifest.ephemeral);
     }
 
     @Override
     public int hashCode()
     {
-        return Objects.hash(files, createdAt, expiresAt);
+        return Objects.hash(files, createdAt, expiresAt, ephemeral);
     }
 }
diff --git a/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java b/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java
index 476bad7..243af19 100644
--- a/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java
+++ b/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java
@@ -45,6 +45,7 @@
     private final String tableName;
     private final UUID tableId;
     private final String tag;
+    private final boolean ephemeral;
 
     private final Instant createdAt;
     private final Instant expiresAt;
@@ -53,7 +54,7 @@
 
     public TableSnapshot(String keyspaceName, String tableName, UUID tableId,
                          String tag, Instant createdAt, Instant expiresAt,
-                         Set<File> snapshotDirs)
+                         Set<File> snapshotDirs, boolean ephemeral)
     {
         this.keyspaceName = keyspaceName;
         this.tableName = tableName;
@@ -62,6 +63,7 @@
         this.createdAt = createdAt;
         this.expiresAt = expiresAt;
         this.snapshotDirs = snapshotDirs;
+        this.ephemeral = ephemeral;
     }
 
     /**
@@ -124,6 +126,11 @@
         return snapshotDirs.stream().anyMatch(File::exists);
     }
 
+    public boolean isEphemeral()
+    {
+        return ephemeral;
+    }
+
     public boolean isExpiring()
     {
         return expiresAt != null;
@@ -193,13 +200,13 @@
         return Objects.equals(keyspaceName, snapshot.keyspaceName) && Objects.equals(tableName, snapshot.tableName) &&
                Objects.equals(tableId, snapshot.tableId) && Objects.equals(tag, snapshot.tag) &&
                Objects.equals(createdAt, snapshot.createdAt) && Objects.equals(expiresAt, snapshot.expiresAt) &&
-               Objects.equals(snapshotDirs, snapshot.snapshotDirs);
+               Objects.equals(snapshotDirs, snapshot.snapshotDirs) && Objects.equals(ephemeral, snapshot.ephemeral);
     }
 
     @Override
     public int hashCode()
     {
-        return Objects.hash(keyspaceName, tableName, tableId, tag, createdAt, expiresAt, snapshotDirs);
+        return Objects.hash(keyspaceName, tableName, tableId, tag, createdAt, expiresAt, snapshotDirs, ephemeral);
     }
 
     @Override
@@ -213,6 +220,7 @@
                ", createdAt=" + createdAt +
                ", expiresAt=" + expiresAt +
                ", snapshotDirs=" + snapshotDirs +
+               ", ephemeral=" + ephemeral +
                '}';
     }
 
@@ -224,6 +232,7 @@
 
         private Instant createdAt = null;
         private Instant expiresAt = null;
+        private boolean ephemeral;
 
         private final Set<File> snapshotDirs = new HashSet<>();
 
@@ -239,12 +248,17 @@
         {
             snapshotDirs.add(snapshotDir);
             File manifestFile = new File(snapshotDir, "manifest.json");
-            if (manifestFile.exists() && createdAt == null && expiresAt == null) {
-                loadTimestampsFromManifest(manifestFile);
-            }
+            if (manifestFile.exists() && createdAt == null && expiresAt == null)
+                loadMetadataFromManifest(manifestFile);
+
+            // check if an ephemeral marker file exists only in case it is not already ephemeral
+            // by reading it from manifest
+            // TODO remove this on Cassandra 4.3 release, see CASSANDRA-16911
+            if (!ephemeral && new File(snapshotDir, "ephemeral.snapshot").exists())
+                ephemeral = true;
         }
 
-        private void loadTimestampsFromManifest(File manifestFile)
+        private void loadMetadataFromManifest(File manifestFile)
         {
             try
             {
@@ -252,6 +266,9 @@
                 SnapshotManifest manifest = SnapshotManifest.deserializeFromJsonFile(manifestFile);
                 createdAt = manifest.createdAt;
                 expiresAt = manifest.expiresAt;
+                // a snapshot may be ephemeral when it has a marker file (old way) or flag in manifest (new way)
+                if (!ephemeral)
+                    ephemeral = manifest.ephemeral;
             }
             catch (IOException e)
             {
@@ -261,7 +278,7 @@
 
         TableSnapshot build()
         {
-            return new TableSnapshot(keyspaceName, tableName, tableId, tag, createdAt, expiresAt, snapshotDirs);
+            return new TableSnapshot(keyspaceName, tableName, tableId, tag, createdAt, expiresAt, snapshotDirs, ephemeral);
         }
     }
 
diff --git a/src/java/org/apache/cassandra/streaming/StreamSession.java b/src/java/org/apache/cassandra/streaming/StreamSession.java
index 2da7021..acd7f3a 100644
--- a/src/java/org/apache/cassandra/streaming/StreamSession.java
+++ b/src/java/org/apache/cassandra/streaming/StreamSession.java
@@ -1124,7 +1124,7 @@
             logger.debug("[Stream #{}] Stream session with peer {} is already in a final state on abort.", planId(), peer);
             return;
         }
-
+            
         logger.info("[Stream #{}] Aborting stream session with peer {}...", planId(), peer);
 
         if (channel.connected())
diff --git a/src/java/org/apache/cassandra/tools/BulkLoader.java b/src/java/org/apache/cassandra/tools/BulkLoader.java
index 811df7a..a3a296b 100644
--- a/src/java/org/apache/cassandra/tools/BulkLoader.java
+++ b/src/java/org/apache/cassandra/tools/BulkLoader.java
@@ -66,7 +66,8 @@
                         buildSSLOptions(options.clientEncOptions)),
                         handler,
                         options.connectionsPerHost,
-                        options.targetKeyspace);
+                        options.targetKeyspace,
+                        options.targetTable);
         DatabaseDescriptor.setStreamThroughputOutboundMegabitsPerSec(options.throttle);
         DatabaseDescriptor.setInterDCStreamThroughputOutboundMegabitsPerSec(options.interDcThrottle);
         StreamResultFuture future = null;
@@ -82,7 +83,6 @@
             {
                 future = loader.stream(options.ignores, indicator);
             }
-
         }
         catch (Exception e)
         {
diff --git a/src/java/org/apache/cassandra/tools/LoaderOptions.java b/src/java/org/apache/cassandra/tools/LoaderOptions.java
index d882e5a..27d54a7 100644
--- a/src/java/org/apache/cassandra/tools/LoaderOptions.java
+++ b/src/java/org/apache/cassandra/tools/LoaderOptions.java
@@ -66,6 +66,7 @@
     public static final String ENTIRE_SSTABLE_INTER_DC_THROTTLE_MBITS = "entire-sstable-inter-dc-throttle";
     public static final String TOOL_NAME = "sstableloader";
     public static final String TARGET_KEYSPACE = "target-keyspace";
+    public static final String TARGET_TABLE = "target-table";
 
     /* client encryption options */
     public static final String SSL_TRUSTSTORE = "truststore";
@@ -97,6 +98,7 @@
     public final Set<InetSocketAddress> hosts;
     public final Set<InetAddressAndPort> ignores;
     public final String targetKeyspace;
+    public final String targetTable;
 
     LoaderOptions(Builder builder)
     {
@@ -120,6 +122,7 @@
         hosts = builder.hosts;
         ignores = builder.ignores;
         targetKeyspace = builder.targetKeyspace;
+        targetTable = builder.targetTable;
     }
 
     static class Builder
@@ -147,6 +150,7 @@
         Set<InetSocketAddress> hosts = new HashSet<>();
         Set<InetAddressAndPort> ignores = new HashSet<>();
         String targetKeyspace;
+        String targetTable;
 
         Builder()
         {
@@ -328,6 +332,18 @@
             return this;
         }
 
+        public Builder targetKeyspace(String keyspace)
+        {
+            this.targetKeyspace = keyspace;
+            return this;
+        }
+
+        public Builder targetTable(String table)
+        {
+            this.targetKeyspace = table;
+            return this;
+        }
+
         public Builder parseArgs(String cmdArgs[])
         {
             CommandLineParser parser = new GnuParser();
@@ -566,10 +582,16 @@
                 {
                     targetKeyspace = cmd.getOptionValue(TARGET_KEYSPACE);
                     if (StringUtils.isBlank(targetKeyspace))
-                    {
                         errorMsg("Empty keyspace is not supported.", options);
-                    }
                 }
+
+                if (cmd.hasOption(TARGET_TABLE))
+                {
+                    targetTable = cmd.getOptionValue(TARGET_TABLE);
+                    if (StringUtils.isBlank(targetTable))
+                        errorMsg("Empty table is not supported.", options);
+                }
+
                 return this;
             }
             catch (ParseException | ConfigurationException | MalformedURLException e)
@@ -678,6 +700,7 @@
         options.addOption("ciphers", SSL_CIPHER_SUITES, "CIPHER-SUITES", "Client SSL: comma-separated list of encryption suites to use");
         options.addOption("f", CONFIG_PATH, "path to config file", "cassandra.yaml file path for streaming throughput and client/server SSL.");
         options.addOption("k", TARGET_KEYSPACE, "target keyspace name", "target keyspace name");
+        options.addOption("tb", TARGET_TABLE, "target table name", "target table name");
         return options;
     }
 
diff --git a/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java b/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java
index b70a7a9..803fe5a 100644
--- a/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java
+++ b/src/java/org/apache/cassandra/tools/nodetool/ListSnapshots.java
@@ -40,6 +40,11 @@
     description = "Skip snapshots with TTL")
     private boolean noTTL = false;
 
+    @Option(title = "ephemeral",
+    name = { "-e", "--ephemeral" },
+    description = "Include ephememeral snapshots")
+    private boolean includeEphemeral = false;
+
     @Override
     public void execute(NodeProbe probe)
     {
@@ -50,6 +55,7 @@
 
             Map<String, String> options = new HashMap<>();
             options.put("no_ttl", Boolean.toString(noTTL));
+            options.put("include_ephemeral", Boolean.toString(includeEphemeral));
 
             final Map<String, TabularData> snapshotDetails = probe.getSnapshotDetails(options);
             if (snapshotDetails.isEmpty())
@@ -62,7 +68,11 @@
             TableBuilder table = new TableBuilder();
             // display column names only once
             final List<String> indexNames = snapshotDetails.entrySet().iterator().next().getValue().getTabularType().getIndexNames();
-            table.add(indexNames.toArray(new String[indexNames.size()]));
+
+            if (includeEphemeral)
+                table.add(indexNames.toArray(new String[indexNames.size()]));
+            else
+                table.add(indexNames.subList(0, indexNames.size() - 1).toArray(new String[indexNames.size() - 1]));
 
             for (final Map.Entry<String, TabularData> snapshotDetail : snapshotDetails.entrySet())
             {
@@ -70,12 +80,15 @@
                 for (Object eachValue : values)
                 {
                     final List<?> value = (List<?>) eachValue;
-                    table.add(value.toArray(new String[value.size()]));
+                    if (includeEphemeral)
+                        table.add(value.toArray(new String[value.size()]));
+                    else
+                        table.add(value.subList(0, value.size() - 1).toArray(new String[value.size() - 1]));
                 }
             }
             table.printTo(out);
 
-            out.println("\nTotal TrueDiskSpaceUsed: " + FileUtils.stringifyFileSize(trueSnapshotsSize) + "\n");
+            out.println("\nTotal TrueDiskSpaceUsed: " + FileUtils.stringifyFileSize(trueSnapshotsSize) + '\n');
         }
         catch (Exception e)
         {
diff --git a/src/java/org/apache/cassandra/transport/Dispatcher.java b/src/java/org/apache/cassandra/transport/Dispatcher.java
index da79c3d..8f8a607 100644
--- a/src/java/org/apache/cassandra/transport/Dispatcher.java
+++ b/src/java/org/apache/cassandra/transport/Dispatcher.java
@@ -24,15 +24,16 @@
 import java.util.function.Consumer;
 
 import com.google.common.base.Predicate;
-import org.apache.cassandra.metrics.ClientMetrics;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import io.netty.channel.Channel;
 import io.netty.channel.EventLoop;
 import io.netty.util.AttributeKey;
+import org.apache.cassandra.concurrent.DebuggableTask.RunnableDebuggableTask;
 import org.apache.cassandra.concurrent.LocalAwareExecutorPlus;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.metrics.ClientMetrics;
 import org.apache.cassandra.net.FrameEncoder;
 import org.apache.cassandra.service.ClientWarn;
 import org.apache.cassandra.service.QueryState;
@@ -42,10 +43,10 @@
 import org.apache.cassandra.transport.messages.ErrorMessage;
 import org.apache.cassandra.transport.messages.EventMessage;
 import org.apache.cassandra.utils.JVMStabilityInspector;
+import org.apache.cassandra.utils.MonotonicClock;
 import org.apache.cassandra.utils.NoSpamLogger;
 
 import static org.apache.cassandra.concurrent.SharedExecutorPool.SHARED;
-import static org.apache.cassandra.utils.Clock.Global.nanoTime;
 
 public class Dispatcher
 {
@@ -79,17 +80,60 @@
 
     public void dispatch(Channel channel, Message.Request request, FlushItemConverter forFlusher, Overload backpressure)
     {
-        requestExecutor.submit(() -> processRequest(channel, request, forFlusher, backpressure));
+        requestExecutor.submit(new RequestProcessor(channel, request, forFlusher, backpressure));
         ClientMetrics.instance.markRequestDispatched();
     }
 
+    public class RequestProcessor implements RunnableDebuggableTask
+    {
+        private final Channel channel;
+        private final Message.Request request;
+        private final FlushItemConverter forFlusher;
+        private final Overload backpressure;
+        
+        private final long approxCreationTimeNanos = MonotonicClock.Global.approxTime.now();
+        private volatile long approxStartTimeNanos;
+        
+        public RequestProcessor(Channel channel, Message.Request request, FlushItemConverter forFlusher, Overload backpressure)
+        {
+            this.channel = channel;
+            this.request = request;
+            this.forFlusher = forFlusher;
+            this.backpressure = backpressure;
+        }
+
+        @Override
+        public void run()
+        {
+            approxStartTimeNanos = MonotonicClock.Global.approxTime.now();
+            processRequest(channel, request, forFlusher, backpressure, approxStartTimeNanos);
+        }
+
+        @Override
+        public long creationTimeNanos()
+        {
+            return approxCreationTimeNanos;
+        }
+
+        @Override
+        public long startTimeNanos()
+        {
+            return approxStartTimeNanos;
+        }
+
+        @Override
+        public String description()
+        {
+            return request.toString();
+        }
+    }
+
     /**
      * Note: this method may be executed on the netty event loop, during initial protocol negotiation; the caller is
      * responsible for cleaning up any global or thread-local state. (ex. tracing, client warnings, etc.).
      */
-    private static Message.Response processRequest(ServerConnection connection, Message.Request request, Overload backpressure)
+    private static Message.Response processRequest(ServerConnection connection, Message.Request request, Overload backpressure, long startTimeNanos)
     {
-        long queryStartNanoTime = nanoTime();
         if (connection.getVersion().isGreaterOrEqualTo(ProtocolVersion.V4))
             ClientWarn.instance.captureWarnings();
 
@@ -119,7 +163,7 @@
 
         Message.logger.trace("Received: {}, v={}", request, connection.getVersion());
         connection.requests.inc();
-        Message.Response response = request.execute(qstate, queryStartNanoTime);
+        Message.Response response = request.execute(qstate, startTimeNanos);
 
         if (request.isTrackable())
             CoordinatorWarnings.done();
@@ -130,15 +174,15 @@
         connection.applyStateTransition(request.type, response.type);
         return response;
     }
-
+    
     /**
      * Note: this method may be executed on the netty event loop.
      */
-    static Message.Response processRequest(Channel channel, Message.Request request, Overload backpressure)
+    static Message.Response processRequest(Channel channel, Message.Request request, Overload backpressure, long approxStartTimeNanos)
     {
         try
         {
-            return processRequest((ServerConnection) request.connection(), request, backpressure);
+            return processRequest((ServerConnection) request.connection(), request, backpressure, approxStartTimeNanos);
         }
         catch (Throwable t)
         {
@@ -163,9 +207,9 @@
     /**
      * Note: this method is not expected to execute on the netty event loop.
      */
-    void processRequest(Channel channel, Message.Request request, FlushItemConverter forFlusher, Overload backpressure)
+    void processRequest(Channel channel, Message.Request request, FlushItemConverter forFlusher, Overload backpressure, long approxStartTimeNanos)
     {
-        Message.Response response = processRequest(channel, request, backpressure);
+        Message.Response response = processRequest(channel, request, backpressure, approxStartTimeNanos);
         FlushItem<?> toFlush = forFlusher.toFlushItem(channel, request, response);
         Message.logger.trace("Responding: {}, v={}", response, request.connection().getVersion());
         flush(toFlush);
@@ -201,7 +245,7 @@
      * for delivering events to registered clients is dependent on protocol version and the configuration
      * of the pipeline. For v5 and newer connections, the event message is encoded into an Envelope,
      * wrapped in a FlushItem and then delivered via the pipeline's flusher, in a similar way to
-     * a Response returned from {@link #processRequest(Channel, Message.Request, FlushItemConverter, Overload)}.
+     * a Response returned from {@link #processRequest(Channel, Message.Request, FlushItemConverter, Overload, long)}.
      * It's worth noting that events are not generally fired as a direct response to a client request,
      * so this flush item has a null request attribute. The dispatcher itself is created when the
      * pipeline is first configured during protocol negotiation and is attached to the channel for
diff --git a/src/java/org/apache/cassandra/transport/InitialConnectionHandler.java b/src/java/org/apache/cassandra/transport/InitialConnectionHandler.java
index 75cb72e..e4cff99 100644
--- a/src/java/org/apache/cassandra/transport/InitialConnectionHandler.java
+++ b/src/java/org/apache/cassandra/transport/InitialConnectionHandler.java
@@ -26,6 +26,7 @@
 import java.util.Map;
 
 import org.apache.cassandra.transport.ClientResourceLimits.Overload;
+import org.apache.cassandra.utils.MonotonicClock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -148,7 +149,9 @@
                         promise = new VoidChannelPromise(ctx.channel(), false);
                     }
 
-                    final Message.Response response = Dispatcher.processRequest(ctx.channel(), startup, Overload.NONE);
+                    long approxStartTimeNanos = MonotonicClock.Global.approxTime.now();
+                    final Message.Response response = Dispatcher.processRequest(ctx.channel(), startup, Overload.NONE, approxStartTimeNanos);
+
                     outbound = response.encode(inbound.header.version);
                     ctx.writeAndFlush(outbound, promise);
                     logger.trace("Configured pipeline: {}", ctx.pipeline());
diff --git a/src/java/org/apache/cassandra/transport/Message.java b/src/java/org/apache/cassandra/transport/Message.java
index 75c997e..2c91a76 100644
--- a/src/java/org/apache/cassandra/transport/Message.java
+++ b/src/java/org/apache/cassandra/transport/Message.java
@@ -193,7 +193,8 @@
         this.customPayload = customPayload;
     }
 
-    public String debugString()
+    @Override
+    public String toString()
     {
         return String.format("(%s:%s:%s)", type, streamId, connection == null ? "null" :  connection.getVersion().asInt());
     }
diff --git a/src/java/org/apache/cassandra/transport/messages/QueryMessage.java b/src/java/org/apache/cassandra/transport/messages/QueryMessage.java
index 9a296e4..c295216 100644
--- a/src/java/org/apache/cassandra/transport/messages/QueryMessage.java
+++ b/src/java/org/apache/cassandra/transport/messages/QueryMessage.java
@@ -148,6 +148,7 @@
     @Override
     public String toString()
     {
-        return String.format("QUERY %s [pageSize = %d]", query, options.getPageSize());
+        return String.format("QUERY %s [pageSize = %d] at consistency %s", 
+                             query, options.getPageSize(), options.getConsistency());
     }
 }
diff --git a/src/java/org/apache/cassandra/utils/bytecomparable/ByteComparable.java b/src/java/org/apache/cassandra/utils/bytecomparable/ByteComparable.java
new file mode 100644
index 0000000..4bccb40
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/bytecomparable/ByteComparable.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.bytecomparable;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Interface indicating a value can be represented/identified by a comparable {@link ByteSource}.
+ *
+ * All Cassandra types that can be used as part of a primary key have a corresponding byte-comparable translation,
+ * detailed in ByteComparable.md. Byte-comparable representations are used in some memtable as well as primary and
+ * secondary index implementations.
+ */
+public interface ByteComparable
+{
+    /**
+     * Returns a source that generates the byte-comparable representation of the value byte by byte.
+     */
+    ByteSource asComparableBytes(Version version);
+
+    enum Version
+    {
+        LEGACY, // Encoding used in legacy sstable format; forward (value to byte-comparable) translation only
+        OSS42,  // CASSANDRA 4.2 encoding
+    }
+
+    ByteComparable EMPTY = (Version version) -> ByteSource.EMPTY;
+
+    /**
+     * Construct a human-readable string from the byte-comparable representation. Used for debugging.
+     */
+    default String byteComparableAsString(Version version)
+    {
+        StringBuilder builder = new StringBuilder();
+        ByteSource stream = asComparableBytes(version);
+        if (stream == null)
+            return "null";
+        for (int b = stream.next(); b != ByteSource.END_OF_STREAM; b = stream.next())
+            builder.append(Integer.toHexString((b >> 4) & 0xF)).append(Integer.toHexString(b & 0xF));
+        return builder.toString();
+    }
+
+    // Simple factories used for testing
+
+    static ByteComparable of(String s)
+    {
+        return v -> ByteSource.of(s, v);
+    }
+
+    static ByteComparable of(long value)
+    {
+        return v -> ByteSource.of(value);
+    }
+
+    static ByteComparable of(int value)
+    {
+        return v -> ByteSource.of(value);
+    }
+
+    static ByteComparable fixedLength(ByteBuffer bytes)
+    {
+        return v -> ByteSource.fixedLength(bytes);
+    }
+
+    static ByteComparable fixedLength(byte[] bytes)
+    {
+        return v -> ByteSource.fixedLength(bytes);
+    }
+
+    /**
+     * Returns a separator for two byte sources, i.e. something that is definitely > prevMax, and <= currMin, assuming
+     * prevMax < currMin.
+     * This returns the shortest prefix of currMin that is greater than prevMax.
+     */
+    static ByteComparable separatorPrefix(ByteComparable prevMax, ByteComparable currMin)
+    {
+        return version -> ByteSource.separatorPrefix(prevMax.asComparableBytes(version), currMin.asComparableBytes(version));
+    }
+
+    /**
+     * Returns a separator for two byte comparable, i.e. something that is definitely > prevMax, and <= currMin, assuming
+     * prevMax < currMin.
+     * This is a stream of length 1 longer than the common prefix of the two streams, with last byte one higher than the
+     * prevMax stream.
+     */
+    static ByteComparable separatorGt(ByteComparable prevMax, ByteComparable currMin)
+    {
+        return version -> ByteSource.separatorGt(prevMax.asComparableBytes(version), currMin.asComparableBytes(version));
+    }
+
+    static ByteComparable cut(ByteComparable src, int cutoff)
+    {
+        return version -> ByteSource.cut(src.asComparableBytes(version), cutoff);
+    }
+
+    /**
+     * Return the length of a byte comparable, not including the terminator byte.
+     */
+    static int length(ByteComparable src, Version version)
+    {
+        int l = 0;
+        ByteSource s = src.asComparableBytes(version);
+        while (s.next() != ByteSource.END_OF_STREAM)
+            ++l;
+        return l;
+    }
+
+    /**
+     * Compare two byte-comparable values by their byte-comparable representation. Used for tests.
+     *
+     * @return the result of the lexicographic unsigned byte comparison of the byte-comparable representations of the
+     *         two arguments
+     */
+    static int compare(ByteComparable bytes1, ByteComparable bytes2, Version version)
+    {
+        ByteSource s1 = bytes1.asComparableBytes(version);
+        ByteSource s2 = bytes2.asComparableBytes(version);
+
+        if (s1 == null || s2 == null)
+            return Boolean.compare(s1 != null, s2 != null);
+
+        while (true)
+        {
+            int b1 = s1.next();
+            int b2 = s2.next();
+            int cmp = Integer.compare(b1, b2);
+            if (cmp != 0)
+                return cmp;
+            if (b1 == ByteSource.END_OF_STREAM)
+                return 0;
+        }
+    }
+
+    /**
+     * Returns the length of the minimum prefix that differentiates the two given byte-comparable representations.
+     */
+    static int diffPoint(ByteComparable bytes1, ByteComparable bytes2, Version version)
+    {
+        ByteSource s1 = bytes1.asComparableBytes(version);
+        ByteSource s2 = bytes2.asComparableBytes(version);
+        int pos = 1;
+        int b;
+        while ((b = s1.next()) == s2.next() && b != ByteSource.END_OF_STREAM)
+            ++pos;
+        return pos;
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/bytecomparable/ByteComparable.md b/src/java/org/apache/cassandra/utils/bytecomparable/ByteComparable.md
new file mode 100644
index 0000000..f360635
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/bytecomparable/ByteComparable.md
@@ -0,0 +1,693 @@
+<!---
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+ 
+     http://www.apache.org/licenses/LICENSE-2.0
+ 
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+# Byte-comparable translation of types (ByteComparable/ByteSource)
+
+## Problem / Motivation
+
+Cassandra has a very heavy reliance on comparisons — they are used throughout read and write paths, coordination,
+compaction, etc. to be able to order and merge results. It also supports a range of types which often require the 
+compared object to be completely in memory to order correctly, which in turn has necessitated interfaces where 
+comparisons can only be applied if the compared objects are completely loaded.
+
+This has some rather painful implications on the performance of the database, both in terms of the time it takes to load,
+compare and garbage collect, as well as in terms of the space required to hold complete keys in on-disk indices and
+deserialized versions in in-memory data structures. In addition to this, the reliance on comparisons forces Cassandra to
+use only comparison-based structures, which aren’t the most efficient.
+
+There is no way to escape the need to compare and order objects in Cassandra, but the machinery for doing this can be
+done much more smartly if we impose some simple structure in the objects we deal with — byte ordering.
+
+The term “byte order” as used in this document refers to the property of being ordered via lexicographic compare on the
+unsigned values of the byte contents. Some of the types in Cassandra already have this property (e.g. strings, blobs),
+but other most heavily used ones (e.g. integers, uuids) don’t.
+
+When byte order is universally available for the types used for keys, several key advantages can be put to use:
+- Comparisons can be done using a single simple method, core machinery doesn’t need to know anything about types.
+- Prefix differences are enough to define order; unique prefixes can be used instead of complete keys.
+- Tries can be used to store, query and iterate over ranges of keys, providing fast lookup and prefix compression.
+- Merging can be performed by merging tries, significantly reducing the number of necessary comparisons.
+
+## Ordering the types
+
+As we want to keep all existing functionality in Cassandra, we need to be able to deal with existing
+non-byte-order-comparable types. This requires some form of conversion of each value to a sequence of bytes that can be 
+byte-order compared (also called "byte-comparable"), as well as the inverse conversion from byte-comparable to value.
+
+As one of the main advantages of byte order is the ability to decide comparisons early, without having to read the whole
+of the input sequence, byte-ordered interpretations of values are represented as sources of bytes with unknown length, 
+using the interface `ByteSource`. The interface declares one method, `next()` which produces the next byte of the
+stream, or `ByteSource.END_OF_STREAM` if the stream is exhausted.
+
+`END_OF_STREAM` is chosen as `-1` (`(int) -1`, which is outside the range of possible byte values), to make comparing 
+two byte sources as trivial (and thus fast) as possible.
+  
+To be able to completely abstract type information away from the storage machinery, we also flatten complex types into
+single byte sequences. To do this, we add separator bytes in front, between components, and at the end and do some 
+encoding of variable-length sequences.
+
+The other interface provided by this package `ByteComparable`, is an entity whose byte-ordered interpretation can be
+requested. The interface is implemented by `DecoratedKey`, and can be requested for clustering keys and bounds using
+`ClusteringComparator.asByteComparable`. The inverse translation is provided by 
+`Buffer/NativeDecoratedKey.fromByteComparable` and `ClusteringComparator.clustering/bound/boundaryFromByteComparable`.
+
+The (rather technical) paragraphs below detail the encoding we have chosen for the various types. For simplicity we
+only discuss the bidirectional `OSS42` version of the translation. The implementations in code of the various mappings
+are in the releavant `AbstractType` subclass.
+
+### Desired properties
+
+Generally, we desire the following two properties from the byte-ordered translations of values we use in the database:
+- Comparison equivalence (1):  
+    <math xmlns="http://www.w3.org/1998/Math/MathML">
+      <semantics>
+        <mstyle displaystyle="true">
+          <mo>&#x2200;</mo>
+          <mi>x</mi>
+          <mo>,</mo>
+          <mi>y</mi>
+          <mo>&#x2208;</mo>
+          <mi>T</mi>
+          <mo>,</mo>
+          <mrow>
+            <mtext>compareBytesUnsigned</mtext>
+          </mrow>
+          <mrow>
+            <mo>(</mo>
+            <mi>T</mi>
+            <mo>.</mo>
+            <mrow>
+              <mtext>byteOrdered</mtext>
+            </mrow>
+            <mrow>
+              <mo>(</mo>
+              <mi>x</mi>
+              <mo>)</mo>
+            </mrow>
+            <mo>,</mo>
+            <mi>T</mi>
+            <mo>.</mo>
+            <mrow>
+              <mtext>byteOrdered</mtext>
+            </mrow>
+            <mrow>
+              <mo>(</mo>
+              <mi>y</mi>
+              <mo>)</mo>
+            </mrow>
+            <mo>)</mo>
+          </mrow>
+          <mo>=</mo>
+          <mi>T</mi>
+          <mo>.</mo>
+          <mrow>
+            <mtext>compare</mtext>
+          </mrow>
+          <mrow>
+            <mo>(</mo>
+            <mi>x</mi>
+            <mo>,</mo>
+            <mi>y</mi>
+            <mo>)</mo>
+          </mrow>
+        </mstyle>
+        <!-- <annotation encoding="text/x-asciimath">forall x,y in T, "compareBytesUnsigned"(T."byteOrdered"(x), T."byteOrdered"(y))=T."compare"(x, y)</annotation> -->
+      </semantics>
+    </math>
+- Prefix-freedom (2):  
+    <math xmlns="http://www.w3.org/1998/Math/MathML">
+      <semantics>
+        <mstyle displaystyle="true">
+          <mo>&#x2200;</mo>
+          <mi>x</mi>
+          <mo>,</mo>
+          <mi>y</mi>
+          <mo>&#x2208;</mo>
+          <mi>T</mi>
+          <mo>,</mo>
+          <mi>T</mi>
+          <mo>.</mo>
+          <mrow>
+            <mtext>byteOrdered</mtext>
+          </mrow>
+          <mrow>
+            <mo>(</mo>
+            <mi>x</mi>
+            <mo>)</mo>
+          </mrow>
+          <mrow>
+            <mspace width="1ex" />
+            <mtext> is not a prefix of </mtext>
+            <mspace width="1ex" />
+          </mrow>
+          <mi>T</mi>
+          <mo>.</mo>
+          <mrow>
+            <mtext>byteOrdered</mtext>
+          </mrow>
+          <mrow>
+            <mo>(</mo>
+            <mi>y</mi>
+            <mo>)</mo>
+          </mrow>
+        </mstyle>
+        <!-- <annotation encoding="text/x-asciimath">forall x,y in T, T."byteOrdered"(x) " is not a prefix of " T."byteOrdered"(y)</annotation> -->
+      </semantics>
+    </math>
+   
+The former is the essential requirement, and the latter allows construction of encodings of sequences of multiple
+values, as well as a little more efficiency in the data structures.
+
+To more efficiently encode byte-ordered blobs, however, we use a slightly tweaked version of the above requirements:
+
+- Comparison equivalence (3):  
+    <math xmlns="http://www.w3.org/1998/Math/MathML">
+      <semantics>
+        <mstyle displaystyle="true">
+          <mo>&#x2200;</mo>
+          <mi>x</mi>
+          <mo>,</mo>
+          <mi>y</mi>
+          <mo>&#x2208;</mo>
+          <mi>T</mi>
+          <mo>,</mo>
+          <mo>&#x2200;</mo>
+          <msub>
+            <mi>b</mi>
+            <mn>1</mn>
+          </msub>
+          <mo>,</mo>
+          <msub>
+            <mi>b</mi>
+            <mn>2</mn>
+          </msub>
+          <mo>&#x2208;</mo>
+          <mrow>
+            <mo>[</mo>
+            <mn>0x10</mn>
+            <mo>-</mo>
+            <mn>0xEF</mn>
+            <mo>]</mo>
+          </mrow>
+          <mo>,</mo>
+            <mtext><br/></mtext>
+          <mrow>
+            <mtext>compareBytesUnsigned</mtext>
+          </mrow>
+          <mrow>
+            <mo>(</mo>
+            <mi>T</mi>
+            <mo>.</mo>
+            <mrow>
+              <mtext>byteOrdered</mtext>
+            </mrow>
+            <mrow>
+              <mo>(</mo>
+              <mi>x</mi>
+              <mo>)</mo>
+            </mrow>
+            <mo>+</mo>
+            <msub>
+              <mi>b</mi>
+              <mn>1</mn>
+            </msub>
+            <mo>,</mo>
+            <mi>T</mi>
+            <mo>.</mo>
+            <mrow>
+              <mtext>byteOrdered</mtext>
+            </mrow>
+            <mrow>
+              <mo>(</mo>
+              <mi>y</mi>
+              <mo>)</mo>
+            </mrow>
+            <mo>+</mo>
+            <msub>
+              <mi>b</mi>
+              <mn>2</mn>
+            </msub>
+            <mo>)</mo>
+          </mrow>
+          <mo>=</mo>
+          <mi>T</mi>
+          <mo>.</mo>
+          <mrow>
+            <mtext>compare</mtext>
+          </mrow>
+          <mrow>
+            <mo>(</mo>
+            <mi>x</mi>
+            <mo>,</mo>
+            <mi>y</mi>
+            <mo>)</mo>
+          </mrow>
+        </mstyle>
+        <!-- <annotation encoding="text/x-asciimath">forall x,y in T, forall b_1, b_2 in [0x10-0xEF],
+    "compareBytesUnsigned"(T."byteOrdered"(x)+b_1, T."byteOrdered"(y)+b_2)=T."compare"(x, y)</annotation> -->
+      </semantics>
+    </math>
+- Weak prefix-freedom (4):  
+    <math xmlns="http://www.w3.org/1998/Math/MathML">
+      <semantics>
+        <mstyle displaystyle="true">
+          <mo>&#x2200;</mo>
+          <mi>x</mi>
+          <mo>,</mo>
+          <mi>y</mi>
+          <mo>&#x2208;</mo>
+          <mi>T</mi>
+          <mo>,</mo>
+          <mo>&#x2200;</mo>
+          <mi>b</mi>
+          <mo>&#x2208;</mo>
+          <mrow>
+            <mo>[</mo>
+            <mn>0x10</mn>
+            <mo>-</mo>
+            <mn>0xEF</mn>
+            <mo>]</mo>
+          </mrow>
+          <mo>,</mo>
+            <mtext><br/></mtext>
+          <mrow>
+            <mo>(</mo>
+            <mi>T</mi>
+            <mo>.</mo>
+            <mrow>
+              <mtext>byteOrdered</mtext>
+            </mrow>
+            <mrow>
+              <mo>(</mo>
+              <mi>x</mi>
+              <mo>)</mo>
+            </mrow>
+            <mo>+</mo>
+            <mi>b</mi>
+            <mo>)</mo>
+          </mrow>
+          <mrow>
+            <mspace width="1ex" />
+            <mtext> is not a prefix of </mtext>
+            <mspace width="1ex" />
+          </mrow>
+          <mi>T</mi>
+          <mo>.</mo>
+          <mrow>
+            <mtext>byteOrdered</mtext>
+          </mrow>
+          <mrow>
+            <mo>(</mo>
+            <mi>y</mi>
+            <mo>)</mo>
+          </mrow>
+        </mstyle>
+        <!-- <annotation encoding="text/x-asciimath">forall x,y in T, forall b in [0x10-0xEF],
+    (T."byteOrdered"(x)+b) " is not a prefix of " T."byteOrdered"(y)</annotation> -->
+      </semantics>
+    </math>
+
+These versions allow the addition of a separator byte after each value, and guarantee that the combination with 
+separator fulfills the original requirements. (3) is somewhat stronger than (1) but is necessarily true if (2) is also 
+in force, while (4) trivially follows from (2).
+
+## Fixed length unsigned integers (Murmur token, date/time)
+
+This is the trivial case, as we can simply use the input bytes in big-endian order. The comparison result is the same, 
+and fixed length values are trivially prefix free, i.e. (1) and (2) are satisfied, and thus (3) and (4) follow from the
+observation above.
+
+## Fixed-length signed integers (byte, short, int, legacy bigint)
+
+As above, but we need to invert the sign bit of the number to put negative numbers before positives. This maps 
+`MIN_VALUE` to `0x00`..., `-1` to `0x7F…`, `0` to `0x80…`, and `MAX_VALUE` to `0xFF…`; comparing the resulting number 
+as an unsigned integer has the same effect as comparing the source signed.
+
+Examples:
+
+| Type and value | bytes                   |encodes as|
+|----------------|-------------------------|----------|
+| int 1          | 00 00 00 01             |             80 00 00 01
+| short -1       | FF FF                   |             7F FF
+| byte 0         | 00                      |             80
+| byte -2        | FE                      |             7E
+| int MAX_VALUE  | 7F FF FF FF             |             FF FF FF FF
+| long MIN_VALUE | 80 00 00 00 00 00 00 00 | 00 00 00 00 00 00 00 00
+
+## Variable-length encoding of integers (current bigint)
+
+Another way to encode integers that may save significant amounts of space when smaller numbers are often in use, but
+still permits large values to be efficiently encoded, is to use an encoding scheme similar to UTF-8.
+
+For unsigned numbers this can be done by starting the number with as many 1s in most significant bits as there are 
+additional bytes in the encoding, followed by a 0, and the bits of the number. Numbers between 0 and 127 are encoded
+in one byte, and each additional byte adds 7 more bits. Values that use all 8 bytes do not need a 9th bit of 0 and can
+thus fit 9 bytes. Because longer numbers have more 1s in their MSBs, they compare 
+higher than shorter ones (and we always use the shortest representation). Because the length is specified through these
+initial bits, no value can be a prefix of another.
+
+| Value            | bytes                   |encodes as|
+|------------------|-------------------------|----------|
+| 0                | 00 00 00 00 00 00 00 00 |             00
+| 1                | 00 00 00 00 00 00 00 01 |             01
+| 127 (2^7-1)      | 00 00 00 00 00 00 00 7F |             7F
+| 128 (2^7)        | 00 00 00 00 00 00 00 80 |             80 80
+| 16383 (2^14 - 1) | 00 00 00 00 00 00 3F FF |             BF FF
+| 16384 (2^14)     | 00 00 00 00 00 00 40 00 |             C0 40 00
+| 2^31 - 1         | 00 00 00 00 7F FF FF FF |         F0 7F FF FF FF
+| 2^31             | 00 00 00 00 80 00 00 00 |         F0 80 00 00 00
+| 2^56 - 1         | 00 FF FF FF FF FF FF FF | FE FF FF FF FF FF FF FF
+| 2^56             | 01 00 00 00 00 00 00 00 | FF 01 00 00 00 00 00 00 00
+| 2^64- 1          | FF FF FF FF FF FF FF FF | FF FF FF FF FF FF FF FF FF
+
+
+To encode signed numbers, we must start with the sign bit, and must also ensure that longer negative numbers sort 
+smaller than shorter ones. The first bit of the encoding is the inverted sign (i.e. 1 for positive, 0 for negative),
+followed by the length encoded as a sequence of bits that matches the inverted sign, followed by a bit that differs 
+(like above, not necessary for 9-byte encodings) and the bits of the number's two's complement.
+
+| Value             | bytes                    |encodes as|
+|-------------------|--------------------------|----------|
+| 1                 | 00 00 00 00 00 00 00 01  |             01
+| -1                | FF FF FF FF FF FF FF FF  |             7F
+| 0                 | 00 00 00 00 00 00 00 00  |             80
+| 63                | 00 00 00 00 00 00 00 3F  |             BF
+| -64               | FF FF FF FF FF FF FF C0  |             40
+| 64                | 00 00 00 00 00 00 00 40  |             C0 40
+| -65               | FF FF FF FF FF FF FF BF  |             3F BF
+| 8191              | 00 00 00 00 00 00 1F FF  | DF FF
+| 8192              | 00 00 00 00 00 00 20 00  | E0 20 00
+| Integer.MAX_VALUE | 00 00 00 00 7F FF FF FF  |             F8 7F FF FF FF
+| Long.MIN_VALUE    | 80 00 00 00 00 00 00 00  | 00 00 00 00 00 00 00 00 00
+
+
+## Fixed-size floating-point numbers (float, double)
+
+IEEE-754 was designed with byte-by-byte comparisons in mind, and provides an important guarantee about the bytes of a
+floating point number:  
+* If x and y are of the same sign, bytes(x) ≥ bytes(y) ⇔ |x| ≥ |y|.
+
+Thus, to be able to order floating point numbers as unsigned integers, we can:
+* Flip the sign bit so negatives are smaller than positive numbers.
+* If the number was negative, also flip all the other bits so larger magnitudes become smaller integers.
+
+This matches exactly the behaviour of `Double.compare`, which doesn’t fully agree with numerical comparisons (see spec) 
+in order to define a natural order over the floating point numbers.
+
+Examples:
+
+|Type and value|bytes|encodes as|
+|---|---|---|
+|float +1.0|            3F 80 00 00|               BF 80 00 00|
+|float +0.0|            00 00 00 00|               80 00 00 00|
+|float -0.0|            80 00 00 00|               7F FF FF FF|
+|float -1.0|            BF 80 00 00|               40 7F FF FF|
+|double +1.0|           3F F0 00 00 00 00 00 00|   BF F0 00 00 00 00 00 00|
+|double +Inf|           7F F0 00 00 00 00 00 00|   FF F0 00 00 00 00 00 00|
+|double -Inf|           FF F0 00 00 00 00 00 00|   00 0F FF FF FF FF FF FF|
+|double NaN|            7F F8 00 00 00 00 00 00|   FF F8 00 00 00 00 00 00|
+
+## UUIDs
+UUIDs are fixed-length unsigned integers, where the UUID version/type is compared first, and where bits need to be 
+reordered for the time UUIDs. To create a byte-ordered representation, we reorder the bytes: pull the version digit 
+first, then the rest of the digits, using the special time order if the version is equal to one.
+
+Examples:
+
+|Type and value|bytes|encodes as|
+|---|---|---|
+|Random (v4)|    cc520882-9507-44fb-8fc9-b349ecdee658 |    4cc52088295074fb8fc9b349ecdee658
+|Time (v1)  |    2a92d750-d8dc-11e6-a2de-cf8ecd4cf053 |    11e6d8dc2a92d750a2decf8ecd4cf053
+
+## Multi-component sequences (Partition or Clustering keys, tuples), bounds and nulls
+
+As mentioned above, we encode sequences by adding separator bytes in front, between components, and a terminator at the
+end. The values we chose for the separator and terminator are `0x40` and `0x38`, and they serve several purposes:
+- Permits partially specified bounds, with strict/exclusive or non-strict/inclusive semantics. This is done by finishing
+  a bound with a terminator value that is smaller/greater than the separator and terminator. We can use `0x20` for `<`/`≥`
+  and `0x60` for `≤`/`>`.
+- Permits encoding of `null` and `empty` values. We use `0x3E` as the separator for nulls and `0x3F` for empty, 
+  followed by no value bytes. This is always smaller than a sequence with non-null value for this component, but not 
+  smaller than a sequence that ends in this component.
+- Helps identify the ending of variable-length components (see below).
+
+Examples:
+
+|Types and values|bytes|encodes as|
+|---|---|---|
+|(short 1, float 1.0)    |    00 01, 3F 80 00 00    |   40·80 01·40·BF 80 00 00·38
+|(short -1, null)        |    FF FF, —              |   40·7F FF·3E·38
+|≥ (short 0, float -Inf) |    00 00, FF 80 00 00, >=|   40·80 00·40·00 7F FF FF·20
+|< (short MIN)           |    80 00, <=             |   40·00 00·20
+|\> (null)               |                          |   3E·60
+|BOTTOM                  |                          |   20
+|TOP                     |                          |   60
+
+(The middle dot · doesn't exist in the encoding, it’s just a visualisation of the boundaries in the examples.)
+
+Since:
+- all separators in use are within `0x10`-`0xEF`, and
+- we use the same separator for internal components, with the exception of nulls which we encode with a smaller 
+  separator
+- the sequence has a fixed number of components or we use a different trailing value whenever it can be shorter
+
+the properties (3) and (4) guarantee that the byte comparison of the encoding goes in the same direction as the
+lexicographical comparison of the sequence. In combination with the third point above, (4) also ensures that no encoding 
+is a prefix of another. Since we have (1) and (2), (3) and (4) are also satisfied.
+
+Note that this means that the encodings of all partition and clustering keys used in the database will be prefix-free.
+
+## Variable-length byte comparables (ASCII, UTF-8 strings, blobs, InetAddress)
+
+In isolation, these can be compared directly without reinterpretation. However, once we place these inside a flattened
+sequence of values we need to clearly define the boundaries between values while maintaining order. To do this we use an
+end-of-value marker; since shorter values must be smaller than longer, this marker must be 0 and we need to find a way 
+to encode/escape actual 0s in the input sequence.
+
+The method we chose for this is the following:
+- If the input does not end on `00`, a `00` byte is appended at the end.
+- If the input contains a `00` byte, it is encoded as `00 FF`.
+- If the input contains a sequence of *n* `00` bytes, they are encoded as `00` `FE` (*n*-1 times) `FF`  
+  (so that we don’t double the size of `00` blobs).
+- If the input ends in `00`, the last `FF` is changed to `FE`  
+  (to ensure it’s smaller than the same value with `00` appended).
+
+Examples:
+
+|bytes/sequence|encodes as|
+|---|----|
+|22 00                |        22 00 FE
+|22 00 00 33          |        22 00 FE FF 33 00
+|22 00 11             |        22 00 FF 11 00
+|(blob 22, short 0)   |        40·22 00·40·80 00·40
+| ≥ (blob 22 00)      |        40·22 00 FE·20
+| ≤ (blob 22 00 00)   |        40·22 00 FE FE·60
+
+Within the encoding, a `00` byte can only be followed by a `FE` or `FF` byte, and hence if an encoding is a prefix of 
+another, the latter has to have a `FE` or `FF` as the next byte, which ensures both (4) (adding `10`-`EF` to the former 
+makes it no longer a prefix of the latter) and (3) (adding `10`-`EF` to the former makes it smaller than the latter; in
+this case the original value of the former is a prefix of the original value of the latter).
+
+## Variable-length integers (varint, RandomPartitioner token), legacy encoding
+
+If integers of unbounded length are guaranteed to start with a non-zero digit, to compare them we can first use a signed
+length, as numbers with longer representations have higher magnitudes. Only if the lengths match we need to compare the
+sequence of digits, which now has a known length.
+
+(Note: The meaning of “digit” here is not the same as “decimal digit”. We operate with numbers stored as bytes, thus it
+makes most sense to treat the numbers as encoded in base-256, where each digit is a byte.)
+
+This translates to the following encoding of varints:
+- Strip any leading zeros. Note that for negative numbers, `BigInteger` encodes leading 0 as `0xFF`.
+- If the length is 128 or greater, lead with a byte of `0xFF` (positive) or `0x00` (negative) for every 128 until there
+  are less than 128 left.
+- Encode the sign and (remaining) length of the number as a byte:
+  - `0x80 + (length - 1)` for positive numbers (so that greater magnitude is higher);
+  - `0x7F - (length - 1)` for negative numbers (so that greater magnitude is lower, and all negatives are lower than
+    positives).
+- Paste the bytes of the number, 2’s complement encoded for negative numbers (`BigInteger` already applies the 2’s
+  complement).
+
+Since when comparing two numbers we either have a difference in the length prefix, or the lengths are the same if we 
+need to compare the content bytes, there is no risk that a longer number can be confused with a shorter combined in a
+multi-component sequence. In other words, no value can be a prefix of another, thus we have (1) and (2) and thus (3) and (4)
+as well.
+
+Examples:
+
+|   value | bytes            |encodes as|
+|--------:|------------------|---|
+|       0 | 00               | 80·00
+|       1 | 01               | 80·01
+|      -1 | FF               | 7F·FF
+|     255 | 00 FF            | 80·FF
+|    -256 | FF 00            | 7F·00
+|     256 | 01 00            | 81·01 00
+|    2^16 | 01 00 00         | 82·01 00 00
+|   -2^32 | FF 00 00 00 00   | 7C·00 00 00 00
+|  2^1024 | 01 00(128 times) | FF 80·01 00(128 times)
+| -2^2048 | FF 00(256 times) | 00 00 80·00(256 times)
+
+(Middle dot · shows the transition point between length and digits.)
+
+## Variable-length integers, current encoding
+
+Because variable-length integers are also often used to store smaller range integers, it makes sense to also apply
+the variable-length integer encoding. Thus, the current varint scheme chooses to:
+- Strip any leading zeros. Note that for negative numbers, `BigInteger` encodes leading 0 as `0xFF`.
+- Map numbers directly to their [variable-length integer encoding](#variable-length-encoding-of-integers-current-bigint),
+  if they have 6 bytes or less.
+- Otherwise, encode as:
+  - a sign byte (00 for negative numbers, FF for positive, distinct from the leading byte of the variable-length 
+    encoding above)
+  - a variable-length encoded number of bytes adjusted by -7 (so that the smallest length this encoding uses maps to 
+    0), inverted for negative numbers (so that greater length compares smaller)
+  - the bytes of the number, two's complement encoded.
+We never use a longer encoding (e.g. using the second method if variable-length suffices or with added 00 leading 
+bytes) if a shorter one suffices.
+
+By the same reasoning as above, and the fact that the sign byte cannot be confused with a variable-length encoding 
+first byte, no value can be a prefix of another. As the sign byte compares smaller for negative (respectively bigger 
+for positive numbers) than any variable-length encoded integer, the comparison order is maintained when one number 
+uses variable-length encoding, and the other doesn't. Longer numbers compare smaller when negative (because of the 
+inverted length bytes), and bigger when positive.
+
+Examples:
+
+|    value | bytes                   |encodes as|
+|---------:|-------------------------|---|
+|        0 | 00                      | 80
+|        1 | 01                      | 81
+|       -1 | FF                      | 7F
+|      255 | 00 FF                   | C0 FF
+|     -256 | FF 00                   | 3F 00
+|      256 | 01 00                   | C1 00
+|     2^16 | 01 00 00                | E1 00 00
+|    -2^32 | FF 00 00 00 00          | 07 00 00 00 00
+|   2^56-1 | 00 FF FF FF FF FF FF FF | FE FF FF FF FF FF FF FF
+|    -2^56 | FF 00 00 00 00 00 00 00 | 01 00 00 00 00 00 00 00
+|     2^56 | 01 00 00 00 00 00 00 00 | FF·00·01 00 00 00 00 00 00 00
+| -2^56-1  | FE FF FF FF FF FF FF FF | 00·FF·FE FF FF FF FF FF FF FF
+|   2^1024 | 01 00(128 times)        | FF·7A·01 00(128 times)
+|  -2^2048 | FF 00(256 times)        | 00·7F 06·00(256 times)
+
+(Middle dot · shows the transition point between length and digits.)
+
+## Variable-length floating-point decimals (decimal)
+
+Variable-length floats are more complicated, but we can treat them similarly to IEEE-754 floating point numbers, by
+normalizing them by splitting them into sign, mantissa and signed exponent such that the mantissa is a number below 1 
+with a non-zero leading digit. We can then compare sign, exponent and mantissa in sequence (where the comparison of
+exponent and mantissa are with reversed meaning if the sign is negative) and that gives us the decimal ordering.
+
+A bit of extra care must be exercised when encoding decimals. Since fractions like `0.1` cannot be perfectly encoded in
+binary, decimals (and mantissas) cannot be encoded in binary or base-256 correctly. A decimal base must be used; since 
+we deal with bytes, it makes most sense to make things a little more efficient by using base-100. Floating-point 
+encoding and the comparison idea from the previous paragraph work in any number base.
+
+`BigDecimal` presents a further challenge, as it encodes decimals using a mixture of bases: numbers have a binary-
+encoded integer part and a decimal power-of-ten scale. The bytes produced by a `BigDecimal` are thus not suitable for 
+direct conversion to byte comparable and we must first instantiate the bytes as a `BigDecimal`, and then apply the 
+class’s methods to operate on it as a number.
+
+We then use the following encoding:
+- If the number is 0, the encoding is a single `0x80` byte.
+- Convert the input to signed mantissa and signed exponent in base-100. If the value is negative, invert the sign of the
+  exponent to form the "modulated exponent".
+- Output a byte encoding:
+  - the sign of the number encoded as `0x80` if positive and `0x00` if negative,
+  - the exponent length (stripping leading 0s) in bytes as `0x40 + modulated_exponent_length`, where the length is given
+    with the sign of the modulated exponent.
+- Output `exponent_length` bytes of modulated exponent, 2’s complement encoded so that negative values are correctly 
+  ordered.
+- Output `0x80 + leading signed byte of mantissa`, which is obtained by multiplying the mantissa by 100 and rounding to
+  -∞. The rounding is done so that the remainder of the mantissa becomes positive, and thus every new byte adds some 
+  value to it, making shorter sequences lower in value.
+- Update the mantissa to be the remainder after the rounding above. The result is guaranteed to be 0 or greater.
+- While the mantissa is non-zero, output `0x80 + leading byte` as above and update the mantissa to be the remainder.
+- Output `0x00`.
+
+As a description of how this produces the correct ordering, consider the result of comparison in the first differing 
+byte:
+- Difference in the first byte can be caused by:
+  - Difference in sign of the number or being zero, which yields the correct ordering because
+    - Negative numbers start with `0x3c` - `0x44`
+    - Zero starts with `0x80`
+    - Positive numbers start with `0xbc` - `0xc4`
+  - Difference in sign of the exponent modulated with the sign of the number. In a positive number negative exponents 
+    mean smaller values, while in a negative number it’s the opposite, thus the modulation with the number’s sign 
+    ensures the correct ordering. 
+  - Difference in modulated length of the exponent: again, since we gave the length a sign that is formed from both 
+    the sign of the exponent and the sign of the number, smaller numbers mean smaller exponent in the positive number 
+    case, and bigger exponent in the negative number case. In either case this provides the correct ordering.
+- Difference in one of the bytes of the modulated exponent (whose length and sign are now equal for both compared
+  numbers):
+  - Smaller byte means a smaller modulated exponent. In the positive case this means a smaller exponent, thus a smaller 
+    number. In the negative case this means the exponent is bigger, the absolute value of the number as well, and thus 
+    the number is smaller.
+- It is not possible for the difference to mix one number’s exponent with another’s mantissa (as such numbers would have
+  different leading bytes).
+- Difference in a mantissa byte present in both inputs:
+  - Smaller byte means smaller signed mantissa and hence smaller number when the exponents are equal.
+- One mantissa ending before another:
+  - This will result in the shorter being treated as smaller (since the trailing byte is `00`).
+  - Since all mantissas have at least one byte, this can’t happen in the leading mantissa byte.
+  - Thus the other number’s bytes from here on are not negative, and at least one of them must be non-zero, which means 
+    its mantissa is bigger and thus it encodes a bigger number.
+    
+Examples:
+
+|value|mexp|mantissa|mantissa in bytes|encodes as|
+|---:|---:|---|---|---|
+|1.1        | 1    | 0.0110 |.  01 10  |    C1·01·81 8A·00
+|1          | 1    | 0.01   |.  01     |    C1·01·81·00
+|0.01       | 0    | 0.01   |.  01     |    C0·81·00
+|0          |      |        |          |    80
+|-0.01      | 0    | -0.01  |. -01     |    40·81·00
+|-1         | -1   | -0.01  |. -01     |    3F·FF·7F·00
+|-1.1       | -1   | -0.0110|. -02 90  |    3F·FF·7E DA·00
+|-98.9      | -1   | -0.9890|. -99 10  |    3F·FF·1D 8A·00
+|-99        | -1   | -0.99  |. -99     |    3F·FF·1D·00
+|-99.9      | -1   | -0.9990|.-100 10  |    3F·FF·1C 8A·00
+|-8.1e2000  | -1001| -0.0810|. -09 90  |    3E·FC 17·77 DA·00
+|-8.1e-2000 | 999  | -0.0810|. -09 90  |    42·03 E7·77 DA·00
+|8.1e-2000  | -999 | 0.0810 |.  08 10  |    BE·FC 19·88 8A·00
+|8.1e2000   | 1001 | 0.0810 |.  08 10  |    C2·03 E9·88 8A·00
+(mexp stands for “modulated exponent”, i.e. exponent * sign)
+
+The values are prefix-free, because no exponent’s encoding can be a prefix of another, and the mantissas can never have
+a `00` byte at any place other than the last byte, and thus all (1)-(4) are satisfied.
+
+## Nulls and empty encodings
+
+Some types in Cassandra (e.g. numbers) admit null values that are represented as empty byte buffers. This is 
+distinct from null byte buffers, which can also appear in some cases. Particularly, null values in clustering 
+columns, when allowed by the type, are interpreted as empty byte buffers, encoded with the empty separator `0x3F`. 
+Unspecified clustering columns (at the end of a clustering specification), possible with `COMPACT STORAGE` or secondary 
+indexes, use the null separator `0x3E`.
+
+## Reversed types
+
+Reversing a type is straightforward: flip all bits of the encoded byte sequence. Since the source type encoding must
+satisfy (3) and (4), the flipped bits also do for the reversed comparator. (It is also true that if the source type 
+satisfies (1)-(2), the reversed will satisfy these too.)
+
+In a sequence we also must correct the empty encoding for a reversed type (since it must be greater than all values).
+Instead of `0x3F` we use `0x41` as the separator byte. Null encodings are not modified, as nulls compare smaller even
+in reversed types.
+
diff --git a/src/java/org/apache/cassandra/utils/bytecomparable/ByteSource.java b/src/java/org/apache/cassandra/utils/bytecomparable/ByteSource.java
new file mode 100644
index 0000000..be4cec0
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/bytecomparable/ByteSource.java
@@ -0,0 +1,853 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils.bytecomparable;
+
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+
+import org.apache.cassandra.db.marshal.ValueAccessor;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable.Version;
+import org.apache.cassandra.utils.memory.MemoryUtil;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+/**
+ * A stream of bytes, used for byte-order-comparable representations of data, and utilities to convert various values
+ * to their byte-ordered translation.
+ * See ByteComparable.md for details about the encoding scheme.
+ */
+public interface ByteSource
+{
+    /** Consume the next byte, unsigned. Must be between 0 and 255, or END_OF_STREAM if there are no more bytes. */
+    int next();
+
+    /** Value returned if at the end of the stream. */
+    int END_OF_STREAM = -1;
+
+    ByteSource EMPTY = () -> END_OF_STREAM;
+
+    /**
+     * Escape value. Used, among other things, to mark the end of subcomponents (so that shorter compares before anything longer).
+     * Actual zeros in input need to be escaped if this is in use (see {@link AbstractEscaper}).
+     */
+    int ESCAPE = 0x00;
+
+    // Zeros are encoded as a sequence of ESCAPE, 0 or more of ESCAPED_0_CONT, ESCAPED_0_DONE so zeroed spaces only grow by 1 byte
+    int ESCAPED_0_CONT = 0xFE;
+    int ESCAPED_0_DONE = 0xFF;
+
+    // All separators must be within these bounds
+    int MIN_SEPARATOR = 0x10;
+    int MAX_SEPARATOR = 0xEF;
+
+    // Next component marker.
+    int NEXT_COMPONENT = 0x40;
+    // Marker used to present null values represented by empty buffers (e.g. by Int32Type)
+    int NEXT_COMPONENT_EMPTY = 0x3F;
+    int NEXT_COMPONENT_EMPTY_REVERSED = 0x41;
+    // Marker for null components in tuples, maps, sets and clustering keys.
+    int NEXT_COMPONENT_NULL = 0x3E;
+
+    // Section for next component markers which is not allowed for use
+    int MIN_NEXT_COMPONENT = 0x3C;
+    int MAX_NEXT_COMPONENT = 0x44;
+
+    // Default terminator byte in sequences. Smaller than NEXT_COMPONENT_NULL, but larger than LT_NEXT_COMPONENT to
+    // ensure lexicographic compares go in the correct direction
+    int TERMINATOR = 0x38;
+    // These are special endings, for exclusive/inclusive bounds (i.e. smaller than anything with more components,
+    // bigger than anything with more components)
+    int LT_NEXT_COMPONENT = 0x20;
+    int GT_NEXT_COMPONENT = 0x60;
+
+    // Special value for components that should be excluded from the normal min/max span. (static rows)
+    int EXCLUDED = 0x18;
+
+    /**
+     * Encodes byte-accessible data as a byte-comparable source that has 0s escaped and finishes in an escaped
+     * state.
+     * This provides a weakly-prefix-free byte-comparable version of the content to use in sequences.
+     * (See {@link AbstractEscaper} for a detailed explanation.)
+     */
+    static <V> ByteSource of(ValueAccessor<V> accessor, V data, Version version)
+    {
+        return new AccessorEscaper<>(accessor, data, version);
+    }
+
+    /**
+     * Encodes a byte buffer as a byte-comparable source that has 0s escaped and finishes in an escape.
+     * This provides a weakly-prefix-free byte-comparable version of the content to use in sequences.
+     * (See ByteSource.BufferEscaper/Multi for explanation.)
+     */
+    static ByteSource of(ByteBuffer buf, Version version)
+    {
+        return new BufferEscaper(buf, version);
+    }
+
+    /**
+     * Encodes a byte array as a byte-comparable source that has 0s escaped and finishes in an escape.
+     * This provides a prefix-free byte-comparable version of the content to use in sequences.
+     * (See ByteSource.BufferEscaper/Multi for explanation.)
+     */
+    static ByteSource of(byte[] buf, Version version)
+    {
+        return new ArrayEscaper(buf, version);
+    }
+
+    /**
+     * Encodes a memory range as a byte-comparable source that has 0s escaped and finishes in an escape.
+     * This provides a weakly-prefix-free byte-comparable version of the content to use in sequences.
+     * (See ByteSource.BufferEscaper/Multi for explanation.)
+     */
+    static ByteSource ofMemory(long address, int length, ByteComparable.Version version)
+    {
+        return new MemoryEscaper(address, length, version);
+    }
+
+    /**
+     * Combines a chain of sources, turning their weak-prefix-free byte-comparable representation into the combination's
+     * prefix-free byte-comparable representation, with the included terminator character.
+     * For correctness, the terminator must be within MIN-MAX_SEPARATOR and outside the range reserved for
+     * NEXT_COMPONENT markers.
+     * Typically TERMINATOR, or LT/GT_NEXT_COMPONENT if used for partially specified bounds.
+     */
+    static ByteSource withTerminator(int terminator, ByteSource... srcs)
+    {
+        assert terminator >= MIN_SEPARATOR && terminator <= MAX_SEPARATOR;
+        assert terminator < MIN_NEXT_COMPONENT || terminator > MAX_NEXT_COMPONENT;
+        return new Multi(srcs, terminator);
+    }
+
+    /**
+     * As above, but permits any separator. The legacy format wasn't using weak prefix freedom and has some
+     * non-reversible transformations.
+     */
+    static ByteSource withTerminatorLegacy(int terminator, ByteSource... srcs)
+    {
+        return new Multi(srcs, terminator);
+    }
+
+    static ByteSource withTerminatorMaybeLegacy(Version version, int legacyTerminator, ByteSource... srcs)
+    {
+        return version == Version.LEGACY ? withTerminatorLegacy(legacyTerminator, srcs)
+                                         : withTerminator(TERMINATOR, srcs);
+    }
+
+    static ByteSource of(String s, Version version)
+    {
+        return new ArrayEscaper(s.getBytes(StandardCharsets.UTF_8), version);
+    }
+
+    static ByteSource of(long value)
+    {
+        return new Number(value ^ (1L<<63), 8);
+    }
+
+    static ByteSource of(int value)
+    {
+        return new Number(value ^ (1L<<31), 4);
+    }
+
+    /**
+     * Produce a source for a signed fixed-length number, also translating empty to null.
+     * The first byte has its sign bit inverted, and the rest are passed unchanged.
+     * Presumes that the length of the buffer is always either 0 or constant for the type, which permits decoding and
+     * ensures the representation is prefix-free.
+     */
+    static <V> ByteSource optionalSignedFixedLengthNumber(ValueAccessor<V> accessor, V data)
+    {
+        return !accessor.isEmpty(data) ? signedFixedLengthNumber(accessor, data) : null;
+    }
+
+    /**
+     * Produce a source for a signed fixed-length number.
+     * The first byte has its sign bit inverted, and the rest are passed unchanged.
+     * Presumes that the length of the buffer is always constant for the type.
+     */
+    static <V> ByteSource signedFixedLengthNumber(ValueAccessor<V> accessor, V data)
+    {
+        return new SignedFixedLengthNumber<>(accessor, data);
+    }
+
+    /**
+     * Produce a source for a signed fixed-length floating-point number, also translating empty to null.
+     * If sign bit is on, returns negated bytes. If not, add the sign bit value.
+     * (Sign of IEEE floats is the highest bit, the rest can be compared in magnitude by byte comparison.)
+     * Presumes that the length of the buffer is always either 0 or constant for the type, which permits decoding and
+     * ensures the representation is prefix-free.
+     */
+    static <V> ByteSource optionalSignedFixedLengthFloat(ValueAccessor<V> accessor, V data)
+    {
+        return !accessor.isEmpty(data) ? signedFixedLengthFloat(accessor, data) : null;
+    }
+
+    /**
+     * Produce a source for a signed fixed-length floating-point number.
+     * If sign bit is on, returns negated bytes. If not, add the sign bit value.
+     * (Sign of IEEE floats is the highest bit, the rest can be compared in magnitude by byte comparison.)
+     * Presumes that the length of the buffer is always constant for the type.
+     */
+    static <V> ByteSource signedFixedLengthFloat(ValueAccessor<V> accessor, V data)
+    {
+        return new SignedFixedLengthFloat<>(accessor, data);
+    }
+
+    /**
+     * Produce a source for a signed integer, stored using variable length encoding.
+     * The representation uses between 1 and 9 bytes, is prefix-free and compares
+     * correctly.
+     */
+    static ByteSource variableLengthInteger(long value)
+    {
+        return new VariableLengthInteger(value);
+    }
+
+    /**
+     * Returns a separator for two byte sources, i.e. something that is definitely > prevMax, and <= currMin, assuming
+     * prevMax < currMin.
+     * This returns the shortest prefix of currMin that is greater than prevMax.
+     */
+    public static ByteSource separatorPrefix(ByteSource prevMax, ByteSource currMin)
+    {
+        return new Separator(prevMax, currMin, true);
+    }
+
+    /**
+     * Returns a separator for two byte sources, i.e. something that is definitely > prevMax, and <= currMin, assuming
+     * prevMax < currMin.
+     * This is a source of length 1 longer than the common prefix of the two sources, with last byte one higher than the
+     * prevMax source.
+     */
+    public static ByteSource separatorGt(ByteSource prevMax, ByteSource currMin)
+    {
+        return new Separator(prevMax, currMin, false);
+    }
+
+    public static ByteSource oneByte(int i)
+    {
+        assert i >= 0 && i <= 0xFF : "Argument must be a valid unsigned byte.";
+        return new ByteSource()
+        {
+            boolean consumed = false;
+
+            @Override
+            public int next()
+            {
+                if (consumed)
+                    return END_OF_STREAM;
+                consumed = true;
+                return i;
+            }
+        };
+    }
+
+    public static ByteSource cut(ByteSource src, int cutoff)
+    {
+        return new ByteSource()
+        {
+            int pos = 0;
+
+            @Override
+            public int next()
+            {
+                return pos++ < cutoff ? src.next() : END_OF_STREAM;
+            }
+        };
+    }
+
+    /**
+     * Wrap a ByteSource in a length-fixing facade.
+     *
+     * If the length of {@code src} is less than {@code cutoff}, then pad it on the right with {@code padding} until
+     * the overall length equals {@code cutoff}.  If the length of {@code src} is greater than {@code cutoff}, then
+     * truncate {@code src} to that size.  Effectively a noop if {@code src} happens to have length {@code cutoff}.
+     *
+     * @param src the input source to wrap
+     * @param cutoff the size of the source returned
+     * @param padding a padding byte (an int subject to a 0xFF mask)
+     */
+    public static ByteSource cutOrRightPad(ByteSource src, int cutoff, int padding)
+    {
+        return new ByteSource()
+        {
+            int pos = 0;
+
+            @Override
+            public int next()
+            {
+                if (pos++ >= cutoff)
+                {
+                    return END_OF_STREAM;
+                }
+                int next = src.next();
+                return next == END_OF_STREAM ? padding : next;
+            }
+        };
+    }
+
+
+    /**
+     * Variable-length encoding. Escapes 0s as ESCAPE + zero or more ESCAPED_0_CONT + ESCAPED_0_DONE.
+     * If the source ends in 0, we use ESCAPED_0_CONT to make sure that the encoding remains smaller than that source
+     * with a further 0 at the end.
+     * Finishes in an escaped state (either with ESCAPE or ESCAPED_0_CONT), which in {@link Multi} is followed by
+     * a component separator between 0x10 and 0xFE.
+     *
+     * E.g. "A\0\0B" translates to 4100FEFF4200
+     *      "A\0B\0"               4100FF4200FE (+00 for {@link Version#LEGACY})
+     *      "A\0"                  4100FE       (+00 for {@link Version#LEGACY})
+     *      "AB"                   414200
+     *
+     * If in a single byte source, the bytes could be simply passed unchanged, but this would not allow us to
+     * combine components. This translation preserves order, and since the encoding for 0 is higher than the separator
+     * also makes sure shorter components are treated as smaller.
+     *
+     * The encoding is not prefix-free, since e.g. the encoding of "A" (4100) is a prefix of the encoding of "A\0"
+     * (4100FE), but the byte following the prefix is guaranteed to be FE or FF, which makes the encoding weakly
+     * prefix-free. Additionally, any such prefix sequence will compare smaller than the value to which it is a prefix,
+     * because any permitted separator byte will be smaller than the byte following the prefix.
+     */
+    abstract static class AbstractEscaper implements ByteSource
+    {
+        private final Version version;
+        private int bufpos;
+        private boolean escaped;
+
+        AbstractEscaper(int position, Version version)
+        {
+            this.bufpos = position;
+            this.version = version;
+        }
+
+        @Override
+        public final int next()
+        {
+            if (bufpos >= limit())
+            {
+                if (bufpos > limit())
+                    return END_OF_STREAM;
+
+                ++bufpos;
+                if (escaped)
+                {
+                    escaped = false;
+                    if (version == Version.LEGACY)
+                        --bufpos; // place an ESCAPE at the end of sequence ending in ESCAPE
+                    return ESCAPED_0_CONT;
+                }
+                return ESCAPE;
+            }
+
+            int index = bufpos++;
+            int b = get(index) & 0xFF;
+            if (!escaped)
+            {
+                if (b == ESCAPE)
+                    escaped = true;
+                return b;
+            }
+            else
+            {
+                if (b == ESCAPE)
+                    return ESCAPED_0_CONT;
+                --bufpos;
+                escaped = false;
+                return ESCAPED_0_DONE;
+            }
+        }
+
+        protected abstract byte get(int index);
+
+        protected abstract int limit();
+    }
+
+    static class AccessorEscaper<V> extends AbstractEscaper
+    {
+        private final V data;
+        private final ValueAccessor<V> accessor;
+
+        private AccessorEscaper(ValueAccessor<V> accessor, V data, Version version)
+        {
+            super(0, version);
+            this.accessor = accessor;
+            this.data = data;
+        }
+
+        protected int limit()
+        {
+            return accessor.size(data);
+        }
+
+        protected byte get(int index)
+        {
+            return accessor.getByte(data, index);
+        }
+    }
+
+    static class BufferEscaper extends AbstractEscaper
+    {
+        private final ByteBuffer buf;
+
+        private BufferEscaper(ByteBuffer buf, Version version)
+        {
+            super(buf.position(), version);
+            this.buf = buf;
+        }
+
+        protected int limit()
+        {
+            return buf.limit();
+        }
+
+        protected byte get(int index)
+        {
+            return buf.get(index);
+        }
+    }
+
+    static class ArrayEscaper extends AbstractEscaper
+    {
+        private final byte[] buf;
+
+        private ArrayEscaper(byte[] buf, Version version)
+        {
+            super(0, version);
+            this.buf = buf;
+        }
+
+        @Override
+        protected byte get(int index)
+        {
+            return buf[index];
+        }
+
+        @Override
+        protected int limit()
+        {
+            return buf.length;
+        }
+    }
+
+    static class MemoryEscaper extends AbstractEscaper
+    {
+        private final long address;
+        private final int length;
+
+        MemoryEscaper(long address, int length, ByteComparable.Version version)
+        {
+            super(0, version);
+            this.address = address;
+            this.length = length;
+        }
+
+        protected byte get(int index)
+        {
+            return MemoryUtil.getByte(address + index);
+        }
+
+        protected int limit()
+        {
+            return length;
+        }
+    }
+
+    /**
+     * Fixed length signed number encoding. Inverts first bit (so that neg < pos), then just posts all bytes from the
+     * buffer. Assumes buffer is of correct length.
+     */
+    static class SignedFixedLengthNumber<V> implements ByteSource
+    {
+        private final ValueAccessor<V> accessor;
+        private final V data;
+        private int bufpos;
+
+        public SignedFixedLengthNumber(ValueAccessor<V> accessor, V data)
+        {
+            this.accessor = accessor;
+            this.data = data;
+            this.bufpos = 0;
+        }
+
+        @Override
+        public int next()
+        {
+            if (bufpos >= accessor.size(data))
+                return END_OF_STREAM;
+            int v = accessor.getByte(data, bufpos) & 0xFF;
+            if (bufpos == 0)
+                v ^= 0x80;
+            ++bufpos;
+            return v;
+        }
+    }
+
+    /**
+     * Variable-length encoding for unsigned integers.
+     * The encoding is similar to UTF-8 encoding.
+     * Numbers between 0 and 127 are encoded in one byte, using 0 in the most significant bit.
+     * Larger values have 1s in as many of the most significant bits as the number of additional bytes
+     * in the representation, followed by a 0. This ensures that longer numbers compare larger than shorter
+     * ones. Since we never use a longer representation than necessary, this implies numbers compare correctly.
+     * As the number of bytes is specified in the bits of the first, no value is a prefix of another.
+     */
+    static class VariableLengthUnsignedInteger implements ByteSource
+    {
+        private final long value;
+        private int pos = -1;
+
+        public VariableLengthUnsignedInteger(long value)
+        {
+            this.value = value;
+        }
+
+        @Override
+        public int next()
+        {
+            if (pos == -1)
+            {
+                int bitsMinusOne = 63 - (Long.numberOfLeadingZeros(value | 1)); // 0 to 63 (the | 1 is to make sure 0 maps to 0 (1 bit))
+                int bytesMinusOne = bitsMinusOne / 7;
+                int mask = -256 >> bytesMinusOne;   // sequence of bytesMinusOne 1s in the most-significant bits
+                pos = bytesMinusOne * 8;
+                return (int) ((value >>> pos) | mask) & 0xFF;
+            }
+            pos -= 8;
+            if (pos < 0)
+                return END_OF_STREAM;
+            return (int) (value >>> pos) & 0xFF;
+        }
+    }
+
+    /**
+     * Variable-length encoding for signed integers.
+     * The encoding is based on the unsigned encoding above, where the first bit stored is the inverted sign,
+     * followed by as many matching bits as there are additional bytes in the encoding, followed by the two's
+     * complement of the number.
+     * Because of the inverted sign bit, negative numbers compare smaller than positives, and because the length
+     * bits match the sign, longer positive numbers compare greater and longer negative ones compare smaller.
+     *
+     * Examples:
+     *      0              encodes as           80
+     *      1              encodes as           81
+     *     -1              encodes as           7F
+     *     63              encodes as           BF
+     *     64              encodes as           C040
+     *    -64              encodes as           40
+     *    -65              encodes as           3FBF
+     *   2^20-1            encodes as           EFFFFF
+     *   2^20              encodes as           F0100000
+     *  -2^20              encodes as           100000
+     *   2^64-1            encodes as           FFFFFFFFFFFFFFFFFF
+     *  -2^64              encodes as           000000000000000000
+     *
+     * As the number of bytes is specified in bits 2-9, no value is a prefix of another.
+     */
+    static class VariableLengthInteger implements ByteSource
+    {
+        private final long value;
+        private int pos;
+
+        public VariableLengthInteger(long value)
+        {
+            long negativeMask = value >> 63;    // -1 for negative, 0 for positive
+            value ^= negativeMask;
+
+            int bits = 64 - Long.numberOfLeadingZeros(value | 1); // 1 to 63 (can't be 64 because we flip negative numbers)
+            int bytes = bits / 7 + 1;   // 0-6 bits 1 byte 7-13 2 bytes etc to 56-63 9 bytes
+            if (bytes >= 9)
+            {
+                value |= 0x8000000000000000L;   // 8th bit, which doesn't fit the first byte
+                pos = negativeMask < 0 ? 256 : -1; // out of 0-64 range integer such that & 0xFF is 0x00 for negative and 0xFF for positive
+            }
+            else
+            {
+                long mask = (-0x100 >> bytes) & 0xFF; // one in sign bit and as many more as there are extra bytes
+                pos = bytes * 8;
+                value = value | (mask << (pos - 8));
+            }
+
+            value ^= negativeMask;
+            this.value = value;
+        }
+
+        @Override
+        public int next()
+        {
+            if (pos <= 0 || pos > 64)
+            {
+                if (pos == 0)
+                    return END_OF_STREAM;
+                else
+                {
+                    // 8-byte value, returning first byte
+                    int result = pos & 0xFF; // 0x00 for negative numbers, 0xFF for positive
+                    pos = 64;
+                    return result;
+                }
+            }
+            pos -= 8;
+            return (int) (value >>> pos) & 0xFF;
+        }
+    }
+
+    static class Number implements ByteSource
+    {
+        private final long value;
+        private int pos;
+
+        public Number(long value, int length)
+        {
+            this.value = value;
+            this.pos = length;
+        }
+
+        @Override
+        public int next()
+        {
+            if (pos == 0)
+                return END_OF_STREAM;
+            return (int) ((value >> (--pos * 8)) & 0xFF);
+        }
+    }
+
+    /**
+     * Fixed length signed floating point number encoding. First bit is sign. If positive, add sign bit value to make
+     * greater than all negatives. If not, invert all content to make negatives with bigger magnitude smaller.
+     */
+    static class SignedFixedLengthFloat<V> implements ByteSource
+    {
+        private final ValueAccessor<V> accessor;
+        private final V data;
+        private int bufpos;
+        private boolean invert;
+
+        public SignedFixedLengthFloat(ValueAccessor<V> accessor, V data)
+        {
+            this.accessor = accessor;
+            this.data = data;
+            this.bufpos = 0;
+        }
+
+        @Override
+        public int next()
+        {
+            if (bufpos >= accessor.size(data))
+                return END_OF_STREAM;
+            int v = accessor.getByte(data, bufpos) & 0xFF;
+            if (bufpos == 0)
+            {
+                invert = v >= 0x80;
+                v |= 0x80;
+            }
+            if (invert)
+                v = v ^ 0xFF;
+            ++bufpos;
+            return v;
+        }
+    }
+
+    /**
+     * Combination of multiple byte sources. Adds {@link NEXT_COMPONENT} before sources, or {@link NEXT_COMPONENT_NULL} if next is null.
+     */
+    static class Multi implements ByteSource
+    {
+        private final ByteSource[] srcs;
+        private int srcnum = -1;
+        private final int sequenceTerminator;
+
+        Multi(ByteSource[] srcs, int sequenceTerminator)
+        {
+            this.srcs = srcs;
+            this.sequenceTerminator = sequenceTerminator;
+        }
+
+        @Override
+        public int next()
+        {
+            if (srcnum == srcs.length)
+                return END_OF_STREAM;
+
+            int b = END_OF_STREAM;
+            if (srcnum >= 0 && srcs[srcnum] != null)
+                b = srcs[srcnum].next();
+            if (b > END_OF_STREAM)
+                return b;
+
+            ++srcnum;
+            if (srcnum == srcs.length)
+                return sequenceTerminator;
+            if (srcs[srcnum] == null)
+                return NEXT_COMPONENT_NULL;
+            return NEXT_COMPONENT;
+        }
+    }
+
+    /**
+     * Construct the shortest common prefix of prevMax and currMin that separates those two byte streams.
+     * If {@code useCurr == true} the last byte of the returned stream comes from {@code currMin} and is the first
+     * byte which is greater than byte on the corresponding position of {@code prevMax}.
+     * Otherwise, the last byte of the returned stream comes from {@code prevMax} and is incremented by one, still
+     * guaranteeing that it is <= than the byte on the corresponding position of {@code currMin}.
+     */
+    static class Separator implements ByteSource
+    {
+        private final ByteSource prev;
+        private final ByteSource curr;
+        private boolean done = false;
+        private final boolean useCurr;
+
+        Separator(ByteSource prevMax, ByteSource currMin, boolean useCurr)
+        {
+            this.prev = prevMax;
+            this.curr = currMin;
+            this.useCurr = useCurr;
+        }
+
+        @Override
+        public int next()
+        {
+            if (done)
+                return END_OF_STREAM;
+            int p = prev.next();
+            int c = curr.next();
+            assert p <= c : prev + " not less than " + curr;
+            if (p == c)
+                return c;
+            done = true;
+            return useCurr ? c : p + 1;
+        }
+    }
+
+    static <V> ByteSource optionalFixedLength(ValueAccessor<V> accessor, V data)
+    {
+        return !accessor.isEmpty(data) ? fixedLength(accessor, data) : null;
+    }
+
+    /**
+     * A byte source of the given bytes without any encoding.
+     * The resulting source is only guaranteed to give correct comparison results and be prefix-free if the
+     * underlying type has a fixed length.
+     * In tests, this method is also used to generate non-escaped test cases.
+     */
+    public static <V> ByteSource fixedLength(ValueAccessor<V> accessor, V data)
+    {
+        return new ByteSource()
+        {
+            int pos = -1;
+
+            @Override
+            public int next()
+            {
+                return ++pos < accessor.size(data) ? accessor.getByte(data, pos) & 0xFF : END_OF_STREAM;
+            }
+        };
+    }
+
+    /**
+     * A byte source of the given bytes without any encoding.
+     * The resulting source is only guaranteed to give correct comparison results and be prefix-free if the
+     * underlying type has a fixed length.
+     * In tests, this method is also used to generate non-escaped test cases.
+     */
+    public static ByteSource fixedLength(ByteBuffer b)
+    {
+        return new ByteSource()
+        {
+            int pos = b.position() - 1;
+
+            @Override
+            public int next()
+            {
+                return ++pos < b.limit() ? b.get(pos) & 0xFF : END_OF_STREAM;
+            }
+        };
+    }
+
+    /**
+     * A byte source of the given bytes without any encoding.
+     * If used in a sequence, the resulting source is only guaranteed to give correct comparison results if the
+     * underlying type has a fixed length.
+     * In tests, this method is also used to generate non-escaped test cases.
+     */
+    public static ByteSource fixedLength(byte[] b)
+    {
+        return fixedLength(b, 0, b.length);
+    }
+
+    public static ByteSource fixedLength(byte[] b, int offset, int length)
+    {
+        checkArgument(offset >= 0 && offset <= b.length);
+        checkArgument(length >= 0 && offset + length <= b.length);
+
+        return new ByteSource()
+        {
+            int pos = offset - 1;
+
+            @Override
+            public int next()
+            {
+                return ++pos < offset + length ? b[pos] & 0xFF : END_OF_STREAM;
+            }
+        };
+    }
+
+    public class Peekable implements ByteSource
+    {
+        private static final int NONE = Integer.MIN_VALUE;
+
+        private final ByteSource wrapped;
+        private int peeked = NONE;
+
+        public Peekable(ByteSource wrapped)
+        {
+            this.wrapped = wrapped;
+        }
+
+        @Override
+        public int next()
+        {
+            if (peeked != NONE)
+            {
+                int val = peeked;
+                peeked = NONE;
+                return val;
+            }
+            else
+                return wrapped.next();
+        }
+
+        public int peek()
+        {
+            if (peeked == NONE)
+                peeked = wrapped.next();
+            return peeked;
+        }
+    }
+
+    public static Peekable peekable(ByteSource p)
+    {
+        // When given a null source, we're better off not wrapping it and just returning null. This way existing
+        // code that doesn't know about ByteSource.Peekable, but handles correctly null ByteSources won't be thrown
+        // off by a non-null instance that semantically should have been null.
+        if (p == null)
+            return null;
+        return (p instanceof Peekable)
+               ? (Peekable) p
+               : new Peekable(p);
+    }
+}
diff --git a/src/java/org/apache/cassandra/utils/bytecomparable/ByteSourceInverse.java b/src/java/org/apache/cassandra/utils/bytecomparable/ByteSourceInverse.java
new file mode 100644
index 0000000..16b6679
--- /dev/null
+++ b/src/java/org/apache/cassandra/utils/bytecomparable/ByteSourceInverse.java
@@ -0,0 +1,471 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils.bytecomparable;
+
+import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.cassandra.db.marshal.ValueAccessor;
+
+/**
+ * Contains inverse transformation utilities for {@link ByteSource}s.
+ *
+ * See ByteComparable.md for details about the encoding scheme.
+ */
+public final class ByteSourceInverse
+{
+    private static final int INITIAL_BUFFER_CAPACITY = 32;
+    private static final int BYTE_ALL_BITS = 0xFF;
+    private static final int BYTE_NO_BITS = 0x00;
+    private static final int BYTE_SIGN_BIT = 1 << 7;
+    private static final int SHORT_SIGN_BIT = 1 << 15;
+    private static final int INT_SIGN_BIT = 1 << 31;
+    private static final long LONG_SIGN_BIT = 1L << 63;
+
+    /**
+     * Consume the given number of bytes and produce a long from them, effectively treating the bytes as a big-endian
+     * unsigned encoding of the number.
+     */
+    public static long getUnsignedFixedLengthAsLong(ByteSource byteSource, int length)
+    {
+        Preconditions.checkNotNull(byteSource);
+        Preconditions.checkArgument(length >= 1 && length <= 8, "Between 1 and 8 bytes can be read at a time");
+
+        long result = 0;
+        for (int i = 0; i < length; ++i)
+            result = (result << 8) | getAndCheckByte(byteSource, i, length);  // note: this must use the unsigned byte value
+
+        return result;
+    }
+
+    /**
+     * Produce the bytes for an encoded signed fixed-length number.
+     * The first byte has its sign bit inverted, and the rest are passed unchanged.
+     */
+    public static <V> V getSignedFixedLength(ValueAccessor<V> accessor, ByteSource byteSource, int length)
+    {
+        Preconditions.checkNotNull(byteSource);
+        Preconditions.checkArgument(length >= 1, "At least 1 byte should be read");
+
+        V result = accessor.allocate(length);
+        // The first byte needs to have its sign flipped
+        accessor.putByte(result, 0, (byte) (getAndCheckByte(byteSource, 0, length) ^ BYTE_SIGN_BIT));
+        // and the rest can be retrieved unchanged.
+        for (int i = 1; i < length; ++i)
+            accessor.putByte(result, i, (byte) getAndCheckByte(byteSource, i, length));
+        return result;
+    }
+
+    /**
+     * Produce the bytes for an encoded signed fixed-length number, also translating null to empty buffer.
+     * The first byte has its sign bit inverted, and the rest are passed unchanged.
+     */
+    public static <V> V getOptionalSignedFixedLength(ValueAccessor<V> accessor, ByteSource byteSource, int length)
+    {
+        return byteSource == null ? accessor.empty() : getSignedFixedLength(accessor, byteSource, length);
+    }
+
+    /**
+     * Produce the bytes for an encoded signed fixed-length floating-point number.
+     * If sign bit is on, returns negated bytes. If not, clears the sign bit and passes the rest of the bytes unchanged.
+     */
+    public static <V> V getSignedFixedLengthFloat(ValueAccessor<V> accessor, ByteSource byteSource, int length)
+    {
+        Preconditions.checkNotNull(byteSource);
+        Preconditions.checkArgument(length >= 1, "At least 1 byte should be read");
+
+        V result = accessor.allocate(length);
+
+        int xor;
+        int first = getAndCheckByte(byteSource, 0, length);
+        if (first < 0x80)
+        {
+            // Negative number. Invert all bits.
+            xor = BYTE_ALL_BITS;
+            first ^= xor;
+        }
+        else
+        {
+            // Positive number. Invert only the sign bit.
+            xor = BYTE_NO_BITS;
+            first ^= BYTE_SIGN_BIT;
+        }
+        accessor.putByte(result, 0, (byte) first);
+
+        // xor is now applied to the rest of the bytes to flip their bits if necessary.
+        for (int i = 1; i < length; ++i)
+            accessor.putByte(result, i, (byte) (getAndCheckByte(byteSource, i, length) ^ xor));
+
+        return result;
+    }
+
+    /**
+     * Produce the bytes for an encoded signed fixed-length floating-point number, also translating null to an empty
+     * buffer.
+     * If sign bit is on, returns negated bytes. If not, clears the sign bit and passes the rest of the bytes unchanged.
+     */
+    public static <V> V getOptionalSignedFixedLengthFloat(ValueAccessor<V> accessor, ByteSource byteSource, int length)
+    {
+        return byteSource == null ? accessor.empty() : getSignedFixedLengthFloat(accessor, byteSource, length);
+    }
+
+    /**
+     * Consume the next length bytes from the source unchanged.
+     */
+    public static <V> V getFixedLength(ValueAccessor<V> accessor, ByteSource byteSource, int length)
+    {
+        Preconditions.checkNotNull(byteSource);
+        Preconditions.checkArgument(length >= 1, "At least 1 byte should be read");
+
+        V result = accessor.allocate(length);
+        for (int i = 0; i < length; ++i)
+            accessor.putByte(result, i, (byte) getAndCheckByte(byteSource, i, length));
+        return result;
+    }
+
+    /**
+     * Consume the next length bytes from the source unchanged, also translating null to an empty buffer.
+     */
+    public static <V> V getOptionalFixedLength(ValueAccessor<V> accessor, ByteSource byteSource, int length)
+    {
+        return byteSource == null ? accessor.empty() : getFixedLength(accessor, byteSource, length);
+    }
+
+    /**
+     * Consume the next {@code int} from the current position of the given {@link ByteSource}. The source position is
+     * modified accordingly (moved 4 bytes forward).
+     * <p>
+     * The source is not strictly required to represent just the encoding of an {@code int} value, so theoretically
+     * this API could be used for reading data in 4-byte strides. Nevertheless its usage is fairly limited because:
+     * <ol>
+     *     <li>...it presupposes signed fixed-length encoding for the encoding of the original value</li>
+     *     <li>...it decodes the data returned on each stride as an {@code int} (i.e. it inverts its leading bit)</li>
+     *     <li>...it doesn't provide any meaningful guarantees (with regard to throwing) in case there are not enough
+     *     bytes to read, in case a special escape value was not interpreted as such, etc.</li>
+     * </ol>
+     * </p>
+     *
+     * @param byteSource A non-null byte source, containing at least 4 bytes.
+     */
+    public static int getSignedInt(ByteSource byteSource)
+    {
+        return (int) getUnsignedFixedLengthAsLong(byteSource, 4) ^ INT_SIGN_BIT;
+    }
+
+    /**
+     * Consume the next {@code long} from the current position of the given {@link ByteSource}. The source position is
+     * modified accordingly (moved 8 bytes forward).
+     * <p>
+     * The source is not strictly required to represent just the encoding of a {@code long} value, so theoretically
+     * this API could be used for reading data in 8-byte strides. Nevertheless its usage is fairly limited because:
+     * <ol>
+     *     <li>...it presupposes signed fixed-length encoding for the encoding of the original value</li>
+     *     <li>...it decodes the data returned on each stride as a {@code long} (i.e. it inverts its leading bit)</li>
+     *     <li>...it doesn't provide any meaningful guarantees (with regard to throwing) in case there are not enough
+     *     bytes to read, in case a special escape value was not interpreted as such, etc.</li>
+     * </ol>
+     * </p>
+     *
+     * @param byteSource A non-null byte source, containing at least 8 bytes.
+     */
+    public static long getSignedLong(ByteSource byteSource)
+    {
+        return getUnsignedFixedLengthAsLong(byteSource, 8) ^ LONG_SIGN_BIT;
+    }
+
+    /**
+     * Converts the given {@link ByteSource} to a {@code byte}.
+     *
+     * @param byteSource A non-null byte source, containing at least 1 byte.
+     */
+    public static byte getSignedByte(ByteSource byteSource)
+    {
+        return (byte) (getAndCheckByte(Preconditions.checkNotNull(byteSource), 0, 1) ^ BYTE_SIGN_BIT);
+    }
+
+    /**
+     * Converts the given {@link ByteSource} to a {@code short}. All terms and conditions valid for
+     * {@link #getSignedInt(ByteSource)} and {@link #getSignedLong(ByteSource)} translate to this as well.
+     *
+     * @param byteSource A non-null byte source, containing at least 2 bytes.
+     *
+     * @see #getSignedInt(ByteSource)
+     * @see #getSignedLong(ByteSource)
+     */
+    public static short getSignedShort(ByteSource byteSource)
+    {
+        return (short) (getUnsignedFixedLengthAsLong(byteSource, 2) ^ SHORT_SIGN_BIT);
+    }
+
+    /**
+     * Decode a variable-length signed integer.
+     */
+    public static long getVariableLengthInteger(ByteSource byteSource)
+    {
+        int signAndMask = getAndCheckByte(byteSource);
+
+        long sum = 0;
+        int bytes;
+        // For every bit after the sign that matches the sign, read one more byte.
+        for (bytes = 0; bytes < 7 && sameByteSign(signAndMask << (bytes + 1), signAndMask); ++bytes)
+            sum = (sum << 8) | getAndCheckByte(byteSource);
+
+        // The eighth length bit is stored in the second byte.
+        if (bytes == 7 && sameByteSign((int) (sum >> 48), signAndMask))
+            return ((sum << 8) | getAndCheckByte(byteSource)) ^ LONG_SIGN_BIT;    // 9-byte encoding, use bytes 2-9 with inverted sign
+        else
+        {
+            sum |= (((long) signAndMask) << bytes * 8);     // add the rest of the bits
+            long signMask = -0x40L << bytes * 7;            // mask of the bits that should be replaced by the sign
+            long sign = (byte) (signAndMask ^ 0x80) >> 7;   // -1 if negative (0 leading bit), 0 otherwise
+            return sum & ~signMask | sign & signMask;
+        }
+    }
+
+    /**
+     * Decode a variable-length unsigned integer, passing all bytes read through XOR with the given xorWith parameter.
+     *
+     * Used in BigInteger encoding to read number length, where negative numbers have their length negated
+     * (i.e. xorWith = 0xFF) to ensure correct ordering.
+     */
+    public static long getVariableLengthUnsignedIntegerXoring(ByteSource byteSource, int xorWith)
+    {
+        int signAndMask = getAndCheckByte(byteSource) ^ xorWith;
+
+        long sum = 0;
+        int bytes;
+        // Read an extra byte while the next most significant bit is 1.
+        for (bytes = 0; bytes <= 7 && ((signAndMask << bytes) & 0x80) != 0; ++bytes)
+            sum = (sum << 8) | getAndCheckByte(byteSource) ^ xorWith;
+
+        // Strip the length bits from the leading byte.
+        signAndMask &= ~(-256 >> bytes);
+        return sum | (((long) signAndMask) << bytes * 8);     // Add the rest of the bits of the leading byte.
+    }
+
+    /** Returns true if the two parameters treated as bytes have the same sign. */
+    private static boolean sameByteSign(int a, int b)
+    {
+        return ((a ^ b) & 0x80) == 0;
+    }
+
+
+    private static int getAndCheckByte(ByteSource byteSource)
+    {
+        return getAndCheckByte(byteSource, -1, -1);
+    }
+
+    private static int getAndCheckByte(ByteSource byteSource, int pos, int length)
+    {
+        int data = byteSource.next();
+        if (data == ByteSource.END_OF_STREAM)
+            throw new IllegalArgumentException(
+                length > 0 ? String.format("Unexpected end of stream reached after %d bytes (expected >= %d)", pos, length)
+                           : "Unexpected end of stream");
+        assert data >= BYTE_NO_BITS && data <= BYTE_ALL_BITS
+            : "A ByteSource must produce unsigned bytes and end in END_OF_STREAM";
+        return data;
+    }
+
+    /**
+     * Reads a single variable-length byte sequence (blob, string, ...) encoded according to the scheme described
+     * in ByteComparable.md, decoding it back to its original, unescaped form.
+     *
+     * @param byteSource The source of the variable-length bytes sequence.
+     * @return A byte array containing the original, unescaped bytes of the given source. Unescaped here means
+     * not including any of the escape sequences of the encoding scheme used for variable-length byte sequences.
+     */
+    public static byte[] getUnescapedBytes(ByteSource.Peekable byteSource)
+    {
+        return byteSource == null ? null : readBytes(unescape(byteSource));
+    }
+
+    /**
+     * As above, but converts the result to a ByteSource.
+     */
+    public static ByteSource unescape(ByteSource.Peekable byteSource)
+    {
+        return new ByteSource() {
+            boolean escaped = false;
+
+            @Override
+            public int next()
+            {
+                if (!escaped)
+                {
+                    int data = byteSource.next(); // we consume this byte no matter what it is
+                    if (data > ByteSource.ESCAPE)
+                        return data;        // most used path leads here
+
+                    assert data != ByteSource.END_OF_STREAM : "Invalid escaped byte sequence";
+                    escaped = true;
+                }
+
+                int next = byteSource.peek();
+                switch (next)
+                {
+                    case END_OF_STREAM:
+                        // The end of a byte-comparable outside of a multi-component sequence. No matter what we have
+                        // seen or peeked before, we should stop now.
+                        byteSource.next();
+                        return END_OF_STREAM;
+                    case ESCAPED_0_DONE:
+                        // The end of 1 or more consecutive 0x00 value bytes.
+                        escaped = false;
+                        byteSource.next();
+                        return ESCAPE;
+                    case ESCAPED_0_CONT:
+                        // Escaped sequence continues
+                        byteSource.next();
+                        return ESCAPE;
+                    default:
+                        // An ESCAPE or ESCAPED_0_CONT won't be followed by either another ESCAPED_0_CONT, an
+                        // ESCAPED_0_DONE, or an END_OF_STREAM only when the byte-comparable is part of a multi-component
+                        // sequence and we have reached the end of the encoded byte-comparable. In this case, the byte
+                        // we have just peeked is the separator or terminator byte between or at the end of components
+                        // (which by contact must be 0x10 - 0xFE, which cannot conflict with our special bytes).
+                        assert next >= ByteSource.MIN_SEPARATOR && next <= ByteSource.MAX_SEPARATOR : next;
+                        // Unlike above, we don't consume this byte (the sequence decoding needs it).
+                        return END_OF_STREAM;
+                }
+            }
+        };
+    }
+
+    /**
+     * Reads the bytes of the given source into a byte array. Doesn't do any transformation on the bytes, just reads
+     * them until it reads an {@link ByteSource#END_OF_STREAM} byte, after which it returns an array of all the read
+     * bytes, <strong>excluding the {@link ByteSource#END_OF_STREAM}</strong>.
+     * <p>
+     * This method sizes a tentative internal buffer array at {@code initialBufferCapacity}.  However, if
+     * {@code byteSource} exceeds this size, the buffer array is recreated with doubled capacity as many times as
+     * necessary.  If, after {@code byteSource} is fully exhausted, the number of bytes read from it does not exactly
+     * match the current size of the tentative buffer array, then it is copied into another array sized to fit the
+     * number of bytes read; otherwise, it is returned without that final copy step.
+     *
+     * @param byteSource The source which bytes we're interested in.
+     * @param initialBufferCapacity The initial size of the internal buffer.
+     * @return A byte array containing exactly all the read bytes. In case of a {@code null} source, the returned byte
+     * array will be empty.
+     */
+    public static byte[] readBytes(ByteSource byteSource, final int initialBufferCapacity)
+    {
+        Preconditions.checkNotNull(byteSource);
+
+        int readBytes = 0;
+        byte[] buf = new byte[initialBufferCapacity];
+        int data;
+        while ((data = byteSource.next()) != ByteSource.END_OF_STREAM)
+        {
+            buf = ensureCapacity(buf, readBytes);
+            buf[readBytes++] = (byte) data;
+        }
+
+        if (readBytes != buf.length)
+        {
+            buf = Arrays.copyOf(buf, readBytes);
+        }
+        return buf;
+    }
+
+    /**
+     * Reads the bytes of the given source into a byte array. Doesn't do any transformation on the bytes, just reads
+     * them until it reads an {@link ByteSource#END_OF_STREAM} byte, after which it returns an array of all the read
+     * bytes, <strong>excluding the {@link ByteSource#END_OF_STREAM}</strong>.
+     * <p>
+     * This is equivalent to {@link #readBytes(ByteSource, int)} where the second actual parameter is
+     * {@linkplain #INITIAL_BUFFER_CAPACITY} ({@value INITIAL_BUFFER_CAPACITY}).
+     *
+     * @param byteSource The source which bytes we're interested in.
+     * @return A byte array containing exactly all the read bytes. In case of a {@code null} source, the returned byte
+     * array will be empty.
+     */
+    public static byte[] readBytes(ByteSource byteSource)
+    {
+        return readBytes(byteSource, INITIAL_BUFFER_CAPACITY);
+    }
+
+    /**
+     * Ensures the given buffer has capacity for taking data with the given length - if it doesn't, it returns a copy
+     * of the buffer, but with double the capacity.
+     */
+    private static byte[] ensureCapacity(byte[] buf, int dataLengthInBytes)
+    {
+        if (dataLengthInBytes == buf.length)
+            // We won't gain much with guarding against overflow. We'll overflow when dataLengthInBytes >= 1 << 30,
+            // and if we do guard, we'll be able to extend the capacity to Integer.MAX_VALUE (which is 1 << 31 - 1).
+            // Controlling the exception that will be thrown shouldn't matter that much, and  in practice, we almost
+            // surely won't be reading gigabytes of ByteSource data at once.
+            return Arrays.copyOf(buf, dataLengthInBytes * 2);
+        else
+            return buf;
+    }
+
+    /**
+     * Converts the given {@link ByteSource} to a UTF-8 {@link String}.
+     *
+     * @param byteSource The source we're interested in.
+     * @return A UTF-8 string corresponding to the given source.
+     */
+    public static String getString(ByteSource.Peekable byteSource)
+    {
+        if (byteSource == null)
+            return null;
+
+        byte[] data = getUnescapedBytes(byteSource);
+
+        return new String(data, StandardCharsets.UTF_8);
+    }
+
+    /*
+     * Multi-component sequence utilities.
+     */
+
+    /**
+     * A utility for consuming components from a peekable multi-component sequence.
+     * It uses the component separators, so the given sequence needs to have its last component fully consumed, in
+     * order for the next consumable byte to be a separator. Identifying the end of the component that will then be
+     * consumed is the responsibility of the consumer (the user of this method).
+     * @param source A peekable multi-component sequence, which next byte is a component separator.
+     * @return the given multi-component sequence if its next component is not null, or {@code null} if it is.
+     */
+    public static ByteSource.Peekable nextComponentSource(ByteSource.Peekable source)
+    {
+        return nextComponentSource(source, source.next());
+    }
+
+    /**
+     * A utility for consuming components from a peekable multi-component sequence, very similar to
+     * {@link #nextComponentSource(ByteSource.Peekable)} - the difference being that here the separator can be passed
+     * in case it had to be consumed beforehand.
+     */
+    public static ByteSource.Peekable nextComponentSource(ByteSource.Peekable source, int separator)
+    {
+        return nextComponentNull(separator)
+               ? null
+               : source;
+    }
+
+    public static boolean nextComponentNull(int separator)
+    {
+        return separator == ByteSource.NEXT_COMPONENT_NULL || separator == ByteSource.NEXT_COMPONENT_EMPTY
+               || separator == ByteSource.NEXT_COMPONENT_EMPTY_REVERSED;
+    }
+}
diff --git a/test/conf/cassandra_ssl_test_outbound.keystore b/test/conf/cassandra_ssl_test_outbound.keystore
new file mode 100644
index 0000000..7dbf466
--- /dev/null
+++ b/test/conf/cassandra_ssl_test_outbound.keystore
Binary files differ
diff --git a/test/distributed/org/apache/cassandra/distributed/impl/Instance.java b/test/distributed/org/apache/cassandra/distributed/impl/Instance.java
index 38fd1f8..705a7f3 100644
--- a/test/distributed/org/apache/cassandra/distributed/impl/Instance.java
+++ b/test/distributed/org/apache/cassandra/distributed/impl/Instance.java
@@ -114,6 +114,7 @@
 import org.apache.cassandra.schema.MigrationCoordinator;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.schema.SchemaConstants;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.service.CassandraDaemon;
 import org.apache.cassandra.service.ClientState;
@@ -619,6 +620,9 @@
                 // Start up virtual table support
                 CassandraDaemon.getInstanceForTesting().setupVirtualKeyspaces();
 
+                // clean up debris in data directories
+                CassandraDaemon.getInstanceForTesting().scrubDataDirectories();
+
                 Keyspace.setInitialized();
 
                 // Replay any CommitLogSegments found on disk
@@ -663,6 +667,7 @@
                     propagateMessagingVersions(cluster); // fake messaging needs to know messaging version for filters
                 }
                 internodeMessagingStarted = true;
+
                 JVMStabilityInspector.replaceKiller(new InstanceKiller(Instance.this::shutdown));
 
                 // TODO: this is more than just gossip
diff --git a/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java b/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java
index dc280f3..d848d20 100644
--- a/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java
+++ b/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java
@@ -30,6 +30,7 @@
 import java.util.Objects;
 import java.util.Optional;
 import java.util.Set;
+import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.function.BiConsumer;
@@ -40,6 +41,10 @@
 import java.util.stream.Collectors;
 
 import com.google.common.util.concurrent.Futures;
+
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.gms.ApplicationState;
+import org.apache.cassandra.gms.VersionedValue;
 import org.apache.cassandra.io.util.File;
 import org.junit.Assert;
 
@@ -554,6 +559,47 @@
         });
     }
 
+    public static void awaitGossipSchemaMatch(ICluster<? extends  IInstance> cluster)
+    {
+        cluster.forEach(ClusterUtils::awaitGossipSchemaMatch);
+    }
+
+    public static void awaitGossipSchemaMatch(IInstance instance)
+    {
+        if (!instance.config().has(Feature.GOSSIP))
+        {
+            // when gosisp isn't enabled, don't bother waiting on gossip to settle...
+            return;
+        }
+        awaitGossip(instance, "Schema IDs did not match", all -> {
+            String current = null;
+            for (Map.Entry<String, Map<String, String>> e : all.entrySet())
+            {
+                Map<String, String> state = e.getValue();
+                // has the instance joined?
+                String status = state.get(ApplicationState.STATUS_WITH_PORT.name());
+                if (status == null)
+                    status = state.get(ApplicationState.STATUS.name());
+                if (status == null || !status.contains(VersionedValue.STATUS_NORMAL))
+                    continue; // ignore instances not joined yet
+                String schema = state.get("SCHEMA");
+                if (schema == null)
+                    throw new AssertionError("Unable to find schema for " + e.getKey() + "; status was " + status);
+                schema = schema.split(":")[1];
+
+                if (current == null)
+                {
+                    current = schema;
+                }
+                else if (!current.equals(schema))
+                {
+                    return false;
+                }
+            }
+            return true;
+        });
+    }
+
     /**
      * Get the gossip information from the node.  Currently only address, generation, and heartbeat are returned
      *
diff --git a/test/distributed/org/apache/cassandra/distributed/test/EphemeralSnapshotTest.java b/test/distributed/org/apache/cassandra/distributed/test/EphemeralSnapshotTest.java
new file mode 100644
index 0000000..a9e804d
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/EphemeralSnapshotTest.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+
+import com.google.common.util.concurrent.Futures;
+import org.junit.Test;
+
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.IIsolatedExecutor;
+import org.apache.cassandra.io.util.File;
+import org.apache.cassandra.service.snapshot.SnapshotManifest;
+import org.apache.cassandra.utils.Pair;
+
+import static java.lang.String.format;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.cassandra.distributed.api.ConsistencyLevel.ONE;
+import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
+import static org.apache.cassandra.distributed.api.Feature.NATIVE_PROTOCOL;
+import static org.apache.cassandra.distributed.api.Feature.NETWORK;
+import static org.awaitility.Awaitility.await;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class EphemeralSnapshotTest extends TestBaseImpl
+{
+    private static final String snapshotName = "snapshotname";
+    private static final String tableName = "city";
+
+    @Test
+    public void testStartupRemovesEphemeralSnapshotOnEphemeralFlagInManifest() throws Exception
+    {
+        try (Cluster c = init(builder().withNodes(1)
+                                       .withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL))
+                                       .start()))
+        {
+            Pair<String, String[]> initialisationData = initialise(c);
+
+            String tableId = initialisationData.left;
+            String[] dataDirs = initialisationData.right;
+
+            // rewrite manifest, pretend that it is ephemeral
+            Path manifestPath = findManifest(dataDirs, tableId);
+            SnapshotManifest manifest = SnapshotManifest.deserializeFromJsonFile(new File(manifestPath));
+            SnapshotManifest manifestWithEphemeralFlag = new SnapshotManifest(manifest.files, null, manifest.createdAt, true);
+            manifestWithEphemeralFlag.serializeToJsonFile(new File(manifestPath));
+
+            verify(c.get(1));
+        }
+    }
+
+    // TODO this test might be deleted once we get rid of ephemeral marker file for good in 4.3
+    @Test
+    public void testStartupRemovesEphemeralSnapshotOnMarkerFile() throws Exception
+    {
+        try (Cluster c = init(builder().withNodes(1)
+                                       .withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL))
+                                       .start()))
+        {
+            Pair<String, String[]> initialisationData = initialise(c);
+
+            String tableId = initialisationData.left;
+            String[] dataDirs = initialisationData.right;
+
+            // place ephemeral marker file into snapshot directory pretending it was created as ephemeral
+            Path ephemeralMarkerFile = Paths.get(dataDirs[0])
+                                            .resolve(KEYSPACE)
+                                            .resolve(format("%s-%s", tableName, tableId))
+                                            .resolve("snapshots")
+                                            .resolve(snapshotName)
+                                            .resolve("ephemeral.snapshot");
+
+            Files.createFile(ephemeralMarkerFile);
+
+            verify(c.get(1));
+        }
+    }
+
+    private Pair<String, String[]> initialise(Cluster c)
+    {
+        c.schemaChange(withKeyspace("CREATE TABLE IF NOT EXISTS %s." + tableName + " (cityid int PRIMARY KEY, name text)"));
+        c.coordinator(1).execute(withKeyspace("INSERT INTO %s." + tableName + "(cityid, name) VALUES (1, 'Canberra');"), ONE);
+        IInvokableInstance instance = c.get(1);
+
+        instance.flush(KEYSPACE);
+
+        assertEquals(0, instance.nodetool("snapshot", "-kt", withKeyspace("%s." + tableName), "-t", snapshotName));
+        waitForSnapshot(instance, snapshotName);
+
+        String tableId = instance.callOnInstance((IIsolatedExecutor.SerializableCallable<String>) () -> {
+            return Keyspace.open(KEYSPACE).getMetadata().tables.get(tableName).get().id.asUUID().toString().replaceAll("-", "");
+        });
+
+        String[] dataDirs = (String[]) instance.config().get("data_file_directories");
+
+        return Pair.create(tableId, dataDirs);
+    }
+
+
+    private void verify(IInvokableInstance instance)
+    {
+        // by default, we do not see ephemerals
+        assertFalse(instance.nodetoolResult("listsnapshots").getStdout().contains("snapshotname"));
+
+        // we see them via -e flag
+        assertTrue(instance.nodetoolResult("listsnapshots", "-e").getStdout().contains("snapshotname"));
+
+        Futures.getUnchecked(instance.shutdown());
+
+        // startup should remove ephemeral marker file
+        instance.startup();
+
+        assertFalse(instance.nodetoolResult("listsnapshots", "-e").getStdout().contains("snapshotname"));
+    }
+
+    private void waitForSnapshot(IInvokableInstance instance, String snapshotName)
+    {
+        await().timeout(20, SECONDS)
+               .pollInterval(1, SECONDS)
+               .until(() -> instance.nodetoolResult("listsnapshots", "-e").getStdout().contains(snapshotName));
+    }
+
+    private Path findManifest(String[] dataDirs, String tableId)
+    {
+        for (String dataDir : dataDirs)
+        {
+            Path manifest = Paths.get(dataDir)
+                                 .resolve(KEYSPACE)
+                                 .resolve(format("%s-%s", tableName, tableId))
+                                 .resolve("snapshots")
+                                 .resolve(snapshotName)
+                                 .resolve("manifest.json");
+
+            if (Files.exists(manifest))
+            {
+                return manifest;
+            }
+        }
+
+        throw new IllegalStateException("Unable to find manifest!");
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/InternodeEncryptionEnforcementTest.java b/test/distributed/org/apache/cassandra/distributed/test/InternodeEncryptionEnforcementTest.java
index 969f372..c95ba5d 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/InternodeEncryptionEnforcementTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/InternodeEncryptionEnforcementTest.java
@@ -17,15 +17,29 @@
  */
 package org.apache.cassandra.distributed.test;
 
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetAddress;
+import java.security.KeyStore;
+import java.security.cert.Certificate;
 import java.util.HashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.collect.ImmutableMap;
 import org.junit.Test;
 
+import org.apache.cassandra.auth.AllowAllInternodeAuthenticator;
+import org.apache.cassandra.auth.IInternodeAuthenticator;
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.distributed.Cluster;
 import org.apache.cassandra.distributed.api.Feature;
 import org.apache.cassandra.distributed.api.IIsolatedExecutor.SerializableRunnable;
 import org.apache.cassandra.distributed.shared.NetworkTopology;
+import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.net.InboundMessageHandlers;
 import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.net.OutboundConnections;
@@ -40,6 +54,131 @@
 
 public final class InternodeEncryptionEnforcementTest extends TestBaseImpl
 {
+
+    @Test
+    public void testInboundConnectionsAreRejectedWhenAuthFails() throws IOException, TimeoutException
+    {
+        Cluster.Builder builder = createCluster(RejectInboundConnections.class);
+
+        final ExecutorService executorService = Executors.newSingleThreadExecutor();
+        try (Cluster cluster = builder.start())
+        {
+            executorService.submit(() -> openConnections(cluster));
+
+            /*
+             * instance (1) should not connect to instance (2) as authentication fails;
+             * instance (2) should not connect to instance (1) as authentication fails.
+             */
+            SerializableRunnable runnable = () ->
+            {
+                // There should be no inbound handlers as authentication fails and we remove handlers.
+                assertEquals(0, MessagingService.instance().messageHandlers.values().size());
+
+                // There should be no outbound connections as authentication fails.
+                OutboundConnections outbound = getOnlyElement(MessagingService.instance().channelManagers.values());
+                assertTrue(!outbound.small.isConnected() && !outbound.large.isConnected() && !outbound.urgent.isConnected());
+
+                // Verify that the failure is due to authentication failure
+                final RejectInboundConnections authenticator = (RejectInboundConnections) DatabaseDescriptor.getInternodeAuthenticator();
+                assertTrue(authenticator.authenticationFailed);
+            };
+
+            // Wait for authentication to fail
+            cluster.get(1).logs().watchFor("Unable to authenticate peer");
+            cluster.get(1).runOnInstance(runnable);
+            cluster.get(2).logs().watchFor("Unable to authenticate peer");
+            cluster.get(2).runOnInstance(runnable);
+        }
+        executorService.shutdown();
+    }
+
+    @Test
+    public void testOutboundConnectionsAreRejectedWhenAuthFails() throws IOException, TimeoutException
+    {
+        Cluster.Builder builder = createCluster(RejectOutboundAuthenticator.class);
+
+        final ExecutorService executorService = Executors.newSingleThreadExecutor();
+        try (Cluster cluster = builder.start())
+        {
+            executorService.submit(() -> openConnections(cluster));
+
+            /*
+             * instance (1) should not connect to instance (2) as authentication fails;
+             * instance (2) should not connect to instance (1) as authentication fails.
+             */
+            SerializableRunnable runnable = () ->
+            {
+                // There should be no inbound connections as authentication fails.
+                InboundMessageHandlers inbound = getOnlyElement(MessagingService.instance().messageHandlers.values());
+                assertEquals(0, inbound.count());
+
+                // There should be no outbound connections as authentication fails.
+                OutboundConnections outbound = getOnlyElement(MessagingService.instance().channelManagers.values());
+                assertTrue(!outbound.small.isConnected() && !outbound.large.isConnected() && !outbound.urgent.isConnected());
+
+                // Verify that the failure is due to authentication failure
+                final RejectOutboundAuthenticator authenticator = (RejectOutboundAuthenticator) DatabaseDescriptor.getInternodeAuthenticator();
+                assertTrue(authenticator.authenticationFailed);
+            };
+
+            // Wait for authentication to fail
+            cluster.get(1).logs().watchFor("authentication failed");
+            cluster.get(1).runOnInstance(runnable);
+            cluster.get(2).logs().watchFor("authentication failed");
+            cluster.get(2).runOnInstance(runnable);
+        }
+        executorService.shutdown();
+    }
+
+    @Test
+    public void testOutboundConnectionsAreInterruptedWhenAuthFails() throws IOException, TimeoutException
+    {
+        Cluster.Builder builder = createCluster(AllowFirstAndRejectOtherOutboundAuthenticator.class);
+        try (Cluster cluster = builder.start())
+        {
+            try
+            {
+                openConnections(cluster);
+            }
+            catch (RuntimeException ise)
+            {
+                assertThat(ise.getMessage(), containsString("agreement not reached"));
+            }
+
+            // Verify that authentication is failed and Interrupt is called on outbound connections.
+            cluster.get(1).logs().watchFor("authentication failed to");
+            cluster.get(1).logs().watchFor("Interrupted outbound connections to");
+
+            /*
+             * Check if outbound connections are zero
+             */
+            SerializableRunnable runnable = () ->
+            {
+                // Verify that there is only one successful outbound connection
+                final AllowFirstAndRejectOtherOutboundAuthenticator authenticator = (AllowFirstAndRejectOtherOutboundAuthenticator) DatabaseDescriptor.getInternodeAuthenticator();
+                assertEquals(1, authenticator.successfulOutbound.get());
+                assertTrue(authenticator.failedOutbound.get() > 0);
+
+                // There should be no outbound connections as authentication fails.
+                OutboundConnections outbound = getOnlyElement(MessagingService.instance().channelManagers.values());
+                assertTrue(!outbound.small.isConnected() && !outbound.large.isConnected() && !outbound.urgent.isConnected());
+            };
+            cluster.get(1).runOnInstance(runnable);
+        }
+    }
+
+    @Test
+    public void testConnectionsAreAcceptedWhenAuthSucceds() throws IOException
+    {
+        verifyAuthenticationSucceeds(AllowAllInternodeAuthenticator.class);
+    }
+
+    @Test
+    public void testAuthenticationWithCertificateAuthenticator() throws IOException
+    {
+        verifyAuthenticationSucceeds(CertificateVerifyAuthenticator.class);
+    }
+
     @Test
     public void testConnectionsAreRejectedWithInvalidConfig() throws Throwable
     {
@@ -155,4 +294,155 @@
         cluster.schemaChange("CREATE KEYSPACE test_connections_from_2 " +
                              "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 2};", false, cluster.get(2));
     }
+
+    private void verifyAuthenticationSucceeds(final Class authenticatorClass) throws IOException
+    {
+        Cluster.Builder builder = createCluster(authenticatorClass);
+        try (Cluster cluster = builder.start())
+        {
+            openConnections(cluster);
+
+            /*
+             * instance (1) should connect to instance (2) without any issues;
+             * instance (2) should connect to instance (1) without any issues.
+             */
+
+            SerializableRunnable runnable = () ->
+            {
+                // There should be inbound connections as authentication succeeds.
+                InboundMessageHandlers inbound = getOnlyElement(MessagingService.instance().messageHandlers.values());
+                assertTrue(inbound.count() > 0);
+
+                // There should be outbound connections as authentication succeeds.
+                OutboundConnections outbound = getOnlyElement(MessagingService.instance().channelManagers.values());
+                assertTrue(outbound.small.isConnected() || outbound.large.isConnected() || outbound.urgent.isConnected());
+            };
+
+            cluster.get(1).runOnInstance(runnable);
+            cluster.get(2).runOnInstance(runnable);
+        }
+    }
+
+    private Cluster.Builder createCluster(final Class authenticatorClass)
+    {
+        return builder()
+        .withNodes(2)
+        .withConfig(c ->
+                    {
+                        c.with(Feature.NETWORK);
+                        c.with(Feature.NATIVE_PROTOCOL);
+
+                        HashMap<String, Object> encryption = new HashMap<>();
+                        encryption.put("keystore", "test/conf/cassandra_ssl_test.keystore");
+                        encryption.put("keystore_password", "cassandra");
+                        encryption.put("truststore", "test/conf/cassandra_ssl_test.truststore");
+                        encryption.put("truststore_password", "cassandra");
+                        encryption.put("internode_encryption", "all");
+                        encryption.put("require_client_auth", "true");
+                        c.set("server_encryption_options", encryption);
+                        c.set("internode_authenticator", authenticatorClass.getName());
+                    })
+        .withNodeIdTopology(ImmutableMap.of(1, NetworkTopology.dcAndRack("dc1", "r1a"),
+                                            2, NetworkTopology.dcAndRack("dc2", "r2a")));
+    }
+
+    // Authenticator that validates certificate authentication
+    public static class CertificateVerifyAuthenticator implements IInternodeAuthenticator
+    {
+        @Override
+        public boolean authenticate(InetAddress remoteAddress, int remotePort, Certificate[] certificates, InternodeConnectionDirection connectionType)
+        {
+            if (connectionType == InternodeConnectionDirection.OUTBOUND_PRECONNECT)
+            {
+                return true;
+            }
+            try
+            {
+                // Check if the presented certificates during internode authentication are the ones in the keystores
+                // configured in the cassandra.yaml configuration.
+                KeyStore keyStore = KeyStore.getInstance("JKS");
+                char[] keyStorePassword = "cassandra".toCharArray();
+                InputStream keyStoreData = new FileInputStream("test/conf/cassandra_ssl_test.keystore");
+                keyStore.load(keyStoreData, keyStorePassword);
+                return certificates != null && certificates.length != 0 && keyStore.getCertificate("cassandra_ssl_test").equals(certificates[0]);
+            }
+            catch (Exception e)
+            {
+                return false;
+            }
+        }
+
+        @Override
+        public void validateConfiguration() throws ConfigurationException
+        {
+
+        }
+    }
+
+    public static class RejectConnectionsAuthenticator implements IInternodeAuthenticator
+    {
+        boolean authenticationFailed = false;
+
+        @Override
+        public boolean authenticate(InetAddress remoteAddress, int remotePort, Certificate[] certificates, InternodeConnectionDirection connectionType)
+        {
+            authenticationFailed = true;
+            return false;
+        }
+
+        @Override
+        public void validateConfiguration() throws ConfigurationException
+        {
+
+        }
+    }
+
+    public static class RejectInboundConnections extends RejectConnectionsAuthenticator
+    {
+        @Override
+        public boolean authenticate(InetAddress remoteAddress, int remotePort, Certificate[] certificates, InternodeConnectionDirection connectionType)
+        {
+            if (connectionType == InternodeConnectionDirection.INBOUND)
+            {
+                return super.authenticate(remoteAddress, remotePort, certificates, connectionType);
+            }
+            return true;
+        }
+    }
+
+    public static class RejectOutboundAuthenticator extends RejectConnectionsAuthenticator
+    {
+        @Override
+        public boolean authenticate(InetAddress remoteAddress, int remotePort, Certificate[] certificates, InternodeConnectionDirection connectionType)
+        {
+            if (connectionType == InternodeConnectionDirection.OUTBOUND)
+            {
+                return super.authenticate(remoteAddress, remotePort, certificates, connectionType);
+            }
+            return true;
+        }
+    }
+
+    public static class AllowFirstAndRejectOtherOutboundAuthenticator extends RejectOutboundAuthenticator
+    {
+        AtomicInteger successfulOutbound = new AtomicInteger();
+        AtomicInteger failedOutbound = new AtomicInteger();
+
+        @Override
+        public boolean authenticate(InetAddress remoteAddress, int remotePort, Certificate[] certificates, InternodeConnectionDirection connectionType)
+        {
+            if (connectionType == InternodeConnectionDirection.OUTBOUND)
+            {
+                if(successfulOutbound.get() == 0) {
+                    successfulOutbound.incrementAndGet();
+                    return true;
+                } else {
+                    failedOutbound.incrementAndGet();
+                    return false;
+                }
+
+            }
+            return true;
+        }
+    }
 }
diff --git a/test/distributed/org/apache/cassandra/distributed/test/QueriesTableTest.java b/test/distributed/org/apache/cassandra/distributed/test/QueriesTableTest.java
new file mode 100644
index 0000000..09e56e0
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/QueriesTableTest.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.junit.Test;
+
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
+import org.apache.cassandra.distributed.api.Row;
+import org.apache.cassandra.distributed.api.SimpleQueryResult;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertTrue;
+
+public class QueriesTableTest extends TestBaseImpl
+{
+    public static final int ITERATIONS = 256;
+
+    @Test
+    public void shouldExposeReadsAndWrites() throws Throwable
+    {
+        try (Cluster cluster = init(Cluster.build(1).start()))
+        {
+            ExecutorService executor = Executors.newFixedThreadPool(16);
+            
+            cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (k int primary key, v int)");
+
+            AtomicInteger reads = new AtomicInteger(0);
+            AtomicInteger writes = new AtomicInteger(0);
+            AtomicInteger paxos = new AtomicInteger(0);
+            
+            for (int i = 0; i < ITERATIONS; i++)
+            {
+                int k = i;
+                executor.execute(() -> cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (k, v) VALUES (" + k + ", 0)", ConsistencyLevel.ALL));
+                executor.execute(() -> cluster.coordinator(1).execute("UPDATE " + KEYSPACE + ".tbl SET v = 10 WHERE k = " + (k - 1) + " IF v = 0", ConsistencyLevel.ALL));
+                executor.execute(() -> cluster.coordinator(1).execute("SELECT * FROM " + KEYSPACE + ".tbl WHERE k = " + (k - 1), ConsistencyLevel.ALL));
+
+                executor.execute(() ->
+                {
+                    SimpleQueryResult result = cluster.get(1).executeInternalWithResult("SELECT * FROM system_views.queries");
+                    
+                    while (result.hasNext())
+                    {
+                        Row row = result.next();
+                        String threadId = row.get("thread_id").toString();
+                        String task = row.get("task").toString();
+
+                        if (threadId.contains("Read") && task.contains("SELECT"))
+                            reads.incrementAndGet();
+                        else if (threadId.contains("Mutation") && task.contains("Mutation"))
+                            writes.incrementAndGet();
+                        else if (threadId.contains("Mutation") && task.contains("Paxos"))
+                            paxos.incrementAndGet();
+                    }
+                });
+            }
+
+            executor.shutdown();
+            assertTrue(executor.awaitTermination(1, TimeUnit.MINUTES));
+            
+            // We should see at least one read, write, and conditional update in the "queries" table.
+            assertThat(reads.get()).isGreaterThan(0).isLessThanOrEqualTo(ITERATIONS);
+            assertThat(writes.get()).isGreaterThan(0).isLessThanOrEqualTo(ITERATIONS);
+            assertThat(paxos.get()).isGreaterThan(0).isLessThanOrEqualTo(ITERATIONS);
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/cdc/ToggleCDCOnRepairEnabledTest.java b/test/distributed/org/apache/cassandra/distributed/test/cdc/ToggleCDCOnRepairEnabledTest.java
new file mode 100644
index 0000000..499cf07
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/cdc/ToggleCDCOnRepairEnabledTest.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test.cdc;
+
+import java.util.function.Consumer;
+
+import org.junit.Test;
+
+import org.apache.cassandra.db.commitlog.CommitLog;
+import org.apache.cassandra.db.commitlog.CommitLogSegment;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.test.TestBaseImpl;
+
+import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
+import static org.apache.cassandra.distributed.shared.AssertUtils.assertTrue;
+import static org.apache.cassandra.distributed.shared.AssertUtils.row;
+
+public class ToggleCDCOnRepairEnabledTest extends TestBaseImpl
+{
+    @Test
+    public void testCDCOnRepairIsEnabled() throws Exception
+    {
+        testCDCOnRepairEnabled(true, cluster -> {
+            cluster.get(2).runOnInstance(() -> {
+                boolean containCDCInLog = CommitLog.instance.segmentManager
+                                              .getActiveSegments()
+                                              .stream()
+                                              .anyMatch(s -> s.getCDCState() == CommitLogSegment.CDCState.CONTAINS);
+                assertTrue("Mutation should be added to commit log when cdc_on_repair_enabled is true",
+                           containCDCInLog);
+            });
+        });
+    }
+
+    @Test
+    public void testCDCOnRepairIsDisabled() throws Exception
+    {
+        testCDCOnRepairEnabled(false, cluster -> {
+            cluster.get(2).runOnInstance(() -> {
+                boolean containCDCInLog = CommitLog.instance.segmentManager
+                                              .getActiveSegments()
+                                              .stream()
+                                              .allMatch(s -> s.getCDCState() != CommitLogSegment.CDCState.CONTAINS);
+                assertTrue("No mutation should be added to commit log when cdc_on_repair_enabled is false",
+                           containCDCInLog);
+            });
+        });
+    }
+
+    // test helper to repair data between nodes when cdc_on_repair_enabled is on or off.
+    private void testCDCOnRepairEnabled(boolean enabled, Consumer<Cluster> assertion) throws Exception
+    {
+        try (Cluster cluster = init(Cluster.build(2)
+                                           .withConfig(c -> c.set("cdc_enabled", true)
+                                                             .set("cdc_on_repair_enabled", enabled)
+                                                             .with(Feature.NETWORK)
+                                                             .with(Feature.GOSSIP))
+                                           .start()))
+        {
+            cluster.schemaChange(withKeyspace("CREATE TABLE %s.tbl (k INT PRIMARY KEY, v INT) WITH cdc=true"));
+
+            // Data only in node1
+            cluster.get(1).executeInternal(withKeyspace("INSERT INTO %s.tbl (k, v) VALUES (1, 1)"));
+            Object[][] result = cluster.get(1).executeInternal(withKeyspace("SELECT * FROM %s.tbl WHERE k = 1"));
+            assertRows(result, row(1, 1));
+            result = cluster.get(2).executeInternal(withKeyspace("SELECT * FROM %s.tbl WHERE k = 1"));
+            assertRows(result);
+
+            // repair
+            cluster.get(1).flush(KEYSPACE);
+            cluster.get(2).nodetool("repair", KEYSPACE, "tbl");
+
+            // verify node2 now have data
+            result = cluster.get(2).executeInternal(withKeyspace("SELECT * FROM %s.tbl WHERE k = 1"));
+            assertRows(result, row(1, 1));
+
+            assertion.accept(cluster);
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/FailedBootstrapTest.java b/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/FailedBootstrapTest.java
new file mode 100644
index 0000000..56de092
--- /dev/null
+++ b/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/FailedBootstrapTest.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.distributed.test.hostreplacement;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import net.bytebuddy.ByteBuddy;
+import net.bytebuddy.dynamic.loading.ClassLoadingStrategy;
+import net.bytebuddy.implementation.MethodDelegation;
+import net.bytebuddy.implementation.bind.annotation.SuperCall;
+import net.bytebuddy.implementation.bind.annotation.This;
+import org.apache.cassandra.auth.CassandraRoleManager;
+import org.apache.cassandra.distributed.Cluster;
+import org.apache.cassandra.distributed.api.Feature;
+import org.apache.cassandra.distributed.api.IInvokableInstance;
+import org.apache.cassandra.distributed.api.NodeToolResult;
+import org.apache.cassandra.distributed.api.TokenSupplier;
+import org.apache.cassandra.distributed.test.TestBaseImpl;
+import org.apache.cassandra.metrics.ClientRequestsMetricsHolder;
+import org.apache.cassandra.streaming.StreamException;
+import org.apache.cassandra.streaming.StreamResultFuture;
+import org.assertj.core.api.Assertions;
+import org.awaitility.Awaitility;
+
+import static net.bytebuddy.matcher.ElementMatchers.named;
+import static org.apache.cassandra.distributed.shared.ClusterUtils.replaceHostAndStart;
+import static org.apache.cassandra.distributed.shared.ClusterUtils.stopUnchecked;
+import static org.apache.cassandra.distributed.test.hostreplacement.HostReplacementTest.setupCluster;
+
+public class FailedBootstrapTest extends TestBaseImpl
+{
+    private static final Logger logger = LoggerFactory.getLogger(FailedBootstrapTest.class);
+
+    private static final int NODE_TO_REMOVE = 2;
+
+    @Test
+    public void roleSetupDoesNotProduceUnavailables() throws IOException
+    {
+        Cluster.Builder builder = Cluster.build(3)
+                                         .withConfig(c -> c.with(Feature.values()))
+                                         .withInstanceInitializer(BB::install);
+        TokenSupplier even = TokenSupplier.evenlyDistributedTokens(3, builder.getTokenCount());
+        builder = builder.withTokenSupplier((TokenSupplier) node -> even.tokens(node == 4 ? NODE_TO_REMOVE : node));
+        try (Cluster cluster = builder.start())
+        {
+            List<IInvokableInstance> alive = Arrays.asList(cluster.get(1), cluster.get(3));
+            IInvokableInstance nodeToRemove = cluster.get(NODE_TO_REMOVE);
+
+            setupCluster(cluster);
+
+            stopUnchecked(nodeToRemove);
+
+            // should fail to join, but should start up!
+            IInvokableInstance added = replaceHostAndStart(cluster, nodeToRemove, p -> p.setProperty("cassandra.superuser_setup_delay_ms", "1"));
+            // log gossip for debugging
+            alive.forEach(i -> {
+                NodeToolResult result = i.nodetoolResult("gossipinfo");
+                result.asserts().success();
+                logger.info("gossipinfo for node{}\n{}", i.config().num(), result.getStdout());
+            });
+
+            // CassandraRoleManager attempted to do distributed reads while bootstrap was still going (it failed, so still in bootstrap mode)
+            // so need to validate that is no longer happening and we incrementing org.apache.cassandra.metrics.ClientRequestMetrics.unavailables
+            // sleep larger than multiple retry attempts...
+            Awaitility.await()
+                      .atMost(1, TimeUnit.MINUTES)
+                      .until(() -> added.callOnInstance(() -> BB.SETUP_SCHEDULE_COUNTER.get()) >= 42); // why 42?  just need something large enough to make sure multiple attempts happened
+
+            // do we have any read metrics have unavailables?
+            added.runOnInstance(() -> {
+                Assertions.assertThat(ClientRequestsMetricsHolder.readMetrics.unavailables.getCount()).describedAs("read unavailables").isEqualTo(0);
+                Assertions.assertThat(ClientRequestsMetricsHolder.casReadMetrics.unavailables.getCount()).describedAs("CAS read unavailables").isEqualTo(0);
+            });
+        }
+    }
+
+    public static class BB
+    {
+        public static void install(ClassLoader classLoader, Integer num)
+        {
+            if (num != 4)
+                return;
+
+            new ByteBuddy().rebase(StreamResultFuture.class)
+                           .method(named("maybeComplete"))
+                           .intercept(MethodDelegation.to(BB.class))
+                           .make()
+                           .load(classLoader, ClassLoadingStrategy.Default.INJECTION);
+
+            new ByteBuddy().rebase(CassandraRoleManager.class)
+                           .method(named("scheduleSetupTask"))
+                           .intercept(MethodDelegation.to(BB.class))
+                           .make()
+                           .load(classLoader, ClassLoadingStrategy.Default.INJECTION);
+        }
+
+        public static void maybeComplete(@This StreamResultFuture future) throws NoSuchMethodException, InvocationTargetException, IllegalAccessException
+        {
+            Method method = future.getClass().getSuperclass().getSuperclass().getDeclaredMethod("tryFailure", Throwable.class);
+            method.setAccessible(true);
+            method.invoke(future, new StreamException(future.getCurrentState(), "Stream failed"));
+        }
+
+        private static final AtomicInteger SETUP_SCHEDULE_COUNTER = new AtomicInteger(0);
+        public static void scheduleSetupTask(final Callable<?> setupTask, @SuperCall Runnable fn)
+        {
+            SETUP_SCHEDULE_COUNTER.incrementAndGet();
+            fn.run();
+        }
+    }
+}
diff --git a/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/HostReplacementTest.java b/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/HostReplacementTest.java
index 3de0bf5..8219d43 100644
--- a/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/HostReplacementTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/test/hostreplacement/HostReplacementTest.java
@@ -35,6 +35,7 @@
 import org.apache.cassandra.distributed.api.SimpleQueryResult;
 import org.apache.cassandra.distributed.api.TokenSupplier;
 import org.apache.cassandra.distributed.shared.AssertUtils;
+import org.apache.cassandra.distributed.shared.ClusterUtils;
 import org.apache.cassandra.distributed.test.TestBaseImpl;
 import org.assertj.core.api.Assertions;
 
@@ -210,6 +211,8 @@
         fixDistributedSchemas(cluster);
         init(cluster);
 
+        ClusterUtils.awaitGossipSchemaMatch(cluster);
+
         populate(cluster);
         cluster.forEach(i -> i.flush(KEYSPACE));
     }
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/BatchUpgradeTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/BatchUpgradeTest.java
index db5e2e1..7ba12ff 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/BatchUpgradeTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/BatchUpgradeTest.java
@@ -33,7 +33,7 @@
         new TestCase()
         .nodes(2)
         .nodesToUpgrade(2)
-        .upgradesFrom(v40).setup((cluster) -> {
+        .upgradesToCurrentFrom(v40).setup((cluster) -> {
             cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".users (" +
                                  "userid uuid PRIMARY KEY," +
                                  "firstname ascii," +
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageColumnDeleteTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageColumnDeleteTest.java
index 720a1b5..920c850 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageColumnDeleteTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageColumnDeleteTest.java
@@ -33,7 +33,7 @@
         new TestCase()
         .nodes(2)
         .nodesToUpgrade(2)
-        .upgradesFrom(v30)
+        .upgradesToCurrentFrom(v30)
         .setup((cluster) -> {
             cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck)) WITH COMPACT STORAGE");
         })
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageHiddenColumnTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageHiddenColumnTest.java
index 4e5236c..178d328 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageHiddenColumnTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageHiddenColumnTest.java
@@ -33,7 +33,7 @@
         new TestCase()
         .nodes(2)
         .nodesToUpgrade(2)
-        .upgradesFrom(v30)
+        .upgradesToCurrentFrom(v30)
         .setup((cluster) -> {
             cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, PRIMARY KEY (pk, ck)) WITH COMPACT STORAGE");
         })
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageImplicitNullInClusteringTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageImplicitNullInClusteringTest.java
index 9d4824a..b59fda3 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageImplicitNullInClusteringTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStorageImplicitNullInClusteringTest.java
@@ -33,7 +33,7 @@
         new TestCase()
         .nodes(2)
         .nodesToUpgrade(2)
-        .upgradesFrom(v30)
+        .upgradesToCurrentFrom(v30)
         .setup((cluster) -> {
             cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck1 int, ck2 int, v int, PRIMARY KEY (pk, ck1, ck2)) WITH COMPACT STORAGE");
         })
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStoragePagingTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStoragePagingTest.java
index 307d6dd..62d6ea0 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStoragePagingTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/CompactStoragePagingTest.java
@@ -33,7 +33,7 @@
         new TestCase()
         .nodes(2)
         .nodesToUpgrade(2)
-        .upgradesFrom(v30)
+        .upgradesToCurrentFrom(v30)
         .setup((cluster) -> {
             cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck)) WITH COMPACT STORAGE");
             for (int i = 1; i < 10; i++)
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/DropCompactStorageNullClusteringValuesTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/DropCompactStorageNullClusteringValuesTest.java
index 1657765..2e5578d 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/DropCompactStorageNullClusteringValuesTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/DropCompactStorageNullClusteringValuesTest.java
@@ -33,7 +33,7 @@
     public void testNullClusteringValues() throws Throwable
     {
         new TestCase().nodes(1)
-                      .upgradesFrom(v30)
+                      .upgradesToCurrentFrom(v30)
                       .withConfig(config -> config.with(GOSSIP, NETWORK, NATIVE_PROTOCOL).set("enable_drop_compact_storage", true))
                       .setup(cluster -> {
                           String create = "CREATE TABLE %s.%s(k int, c1 int, c2 int, v int, PRIMARY KEY (k, c1, c2)) " +
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/DropCompactStorageTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/DropCompactStorageTest.java
index c645085..9846264 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/DropCompactStorageTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/DropCompactStorageTest.java
@@ -35,7 +35,7 @@
         new TestCase()
         .nodes(2)
         .nodesToUpgrade(1, 2)
-        .upgradesFrom(v30)
+        .upgradesToCurrentFrom(v30)
         .withConfig(config -> config.with(GOSSIP, NETWORK).set("enable_drop_compact_storage", true))
         .setup((cluster) -> {
             cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, PRIMARY KEY (pk, ck)) WITH COMPACT STORAGE");
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/GroupByTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/GroupByTest.java
index 634c886..b2971cd 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/GroupByTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/GroupByTest.java
@@ -21,7 +21,6 @@
 import org.junit.Test;
 
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
-import org.apache.cassandra.distributed.shared.Versions;
 
 import static org.apache.cassandra.distributed.api.Feature.GOSSIP;
 import static org.apache.cassandra.distributed.api.Feature.NETWORK;
@@ -36,7 +35,7 @@
         // CASSANDRA-16582: group-by across mixed version cluster would fail with ArrayIndexOutOfBoundException
         new UpgradeTestBase.TestCase()
         .nodes(2)
-        .upgradesFrom(v3X)
+        .upgradesToCurrentFrom(v3X)
         .nodesToUpgrade(1)
         .withConfig(config -> config.with(GOSSIP, NETWORK))
         .setup(cluster -> {
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityTestBase.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityTestBase.java
index 4e50eb1..1ca23ae 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityTestBase.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityTestBase.java
@@ -74,6 +74,14 @@
         testAvailability(true, initial, writeConsistencyLevel, readConsistencyLevel);
     }
 
+    protected static void testAvailability(Semver initial,
+                                           ConsistencyLevel writeConsistencyLevel,
+                                           ConsistencyLevel readConsistencyLevel) throws Throwable
+    {
+        testAvailability(true, initial, writeConsistencyLevel, readConsistencyLevel);
+        testAvailability(false, initial, writeConsistencyLevel, readConsistencyLevel);
+    }
+
     private static void testAvailability(boolean upgradedCoordinator,
                                          Semver initial,
                                          ConsistencyLevel writeConsistencyLevel,
@@ -82,7 +90,7 @@
         new TestCase()
         .nodes(NUM_NODES)
         .nodesToUpgrade(upgradedCoordinator ? 1 : 2)
-        .upgrades(initial, UpgradeTestBase.CURRENT)
+        .upgradesToCurrentFrom(initial)
         .withConfig(config -> config.set("read_request_timeout_in_ms", SECONDS.toMillis(5))
                                     .set("write_request_timeout_in_ms", SECONDS.toMillis(5)))
         // use retry of 10ms so that each check is consistent
@@ -93,6 +101,7 @@
             cluster.schemaChange(withKeyspace("CREATE TABLE %s.t (k uuid, c int, v int, PRIMARY KEY (k, c)) WITH speculative_retry = '10ms'"));
             cluster.setUncaughtExceptionsFilter(throwable -> throwable instanceof RejectedExecutionException);
         })
+        .runBeforeClusterUpgrade(cluster -> cluster.filters().reset())
         .runAfterNodeUpgrade((cluster, n) -> {
 
             ICoordinator coordinator = cluster.coordinator(COORDINATOR);
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XOneAllTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XOneAllTest.java
index 8ea94ea..59554d1 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XOneAllTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeAvailabilityV3XOneAllTest.java
@@ -25,7 +25,7 @@
  */
 public class MixedModeAvailabilityV3XOneAllTest extends MixedModeAvailabilityTestBase
 {
-    public MixedModeAvailabilityV3XOneAllTest() throws Throwable
+    public MixedModeAvailabilityV3XOneAllTest()
     {
         super(v3X, ConsistencyLevel.ONE, ConsistencyLevel.ALL);
     }
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyTestBase.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyTestBase.java
index f98fc8a..519625e 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyTestBase.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeConsistencyTestBase.java
@@ -42,11 +42,6 @@
 {
     protected static void testConsistency(Semver initial) throws Throwable
     {
-        testConsistency(initial, UpgradeTestBase.CURRENT);
-    }
-
-    protected static void testConsistency(Semver initial, Semver upgrade) throws Throwable
-    {
         List<Tester> testers = new ArrayList<>();
         testers.addAll(Tester.create(1, ALL));
         testers.addAll(Tester.create(2, ALL, QUORUM));
@@ -55,7 +50,7 @@
         new TestCase()
         .nodes(3)
         .nodesToUpgrade(1)
-        .upgrades(initial, upgrade)
+        .upgradesToCurrentFrom(initial)
         .withConfig(config -> config.set("read_request_timeout_in_ms", SECONDS.toMillis(30))
                                     .set("write_request_timeout_in_ms", SECONDS.toMillis(30)))
         .setup(cluster -> {
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3ReplicationTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3ReplicationTest.java
index a38e25d..69d3dbe 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3ReplicationTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeFrom3ReplicationTest.java
@@ -18,21 +18,65 @@
 
 package org.apache.cassandra.distributed.upgrade;
 
+import java.util.ArrayList;
+import java.util.List;
+
 import org.junit.Test;
 
-import org.apache.cassandra.distributed.shared.Versions;
+import org.apache.cassandra.distributed.api.ConsistencyLevel;
 
-public class MixedModeFrom3ReplicationTest extends MixedModeReplicationTestBase
+import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
+import static org.apache.cassandra.distributed.shared.AssertUtils.row;
+
+public class MixedModeFrom3ReplicationTest extends UpgradeTestBase
 {
     @Test
-    public void testSimpleStrategy30to3X() throws Throwable
-    {
-        testSimpleStrategy(v30, v3X);
-    }
-
-    @Test
     public void testSimpleStrategy() throws Throwable
     {
-        testSimpleStrategy(v30);
+        String insert = "INSERT INTO test_simple.names (key, name) VALUES (?, ?)";
+        String select = "SELECT * FROM test_simple.names WHERE key = ?";
+
+        new TestCase()
+        .nodes(3)
+        .nodesToUpgrade(1, 2)
+        .upgradesToCurrentFrom(v30)
+        .setup(cluster -> {
+            cluster.schemaChange("CREATE KEYSPACE test_simple WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 2};");
+            cluster.schemaChange("CREATE TABLE test_simple.names (key int PRIMARY KEY, name text)");
+        })
+        .runAfterNodeUpgrade((cluster, upgraded) -> {
+            List<Long> initialTokens = new ArrayList<>(cluster.size() + 1);
+            initialTokens.add(null); // The first valid token is at 1 to avoid offset math below.
+
+            for (int i = 1; i <= cluster.size(); i++)
+                initialTokens.add(Long.valueOf(cluster.get(i).config().get("initial_token").toString()));
+
+            List<Long> validTokens = initialTokens.subList(1, cluster.size() + 1);
+
+            // Exercise all the coordinators...
+            for (int i = 1; i <= cluster.size(); i++)
+            {
+                // ...and sample enough keys that we cover the ring.
+                for (int j = 0; j < 10; j++)
+                {
+                    int key = j + (i * 10);
+                    Object[] row = row(key, "Nero");
+                    Long token = tokenFrom(key);
+
+                    cluster.coordinator(i).execute(insert, ConsistencyLevel.ALL, row);
+
+                    int node = primaryReplica(validTokens, token);
+                    assertRows(cluster.get(node).executeInternal(select, key), row);
+
+                    node = nextNode(node, cluster.size());
+                    assertRows(cluster.get(node).executeInternal(select, key), row);
+
+                    // At RF=2, this node should not have received the write.
+                    node = nextNode(node, cluster.size());
+                    assertRows(cluster.get(node).executeInternal(select, key));
+                }
+            }
+        })
+        .run();
     }
 }
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeGossipTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeGossipTest.java
index 35c4fb3..e1a96ac 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeGossipTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeGossipTest.java
@@ -33,7 +33,6 @@
 import org.apache.cassandra.distributed.UpgradeableCluster;
 import org.apache.cassandra.distributed.api.Feature;
 import org.apache.cassandra.distributed.api.IMessageFilters;
-import org.apache.cassandra.distributed.shared.Versions;
 import org.apache.cassandra.net.Verb;
 import org.assertj.core.api.Assertions;
 
@@ -51,8 +50,8 @@
         .nodes(3)
         .nodesToUpgradeOrdered(1, 2, 3)
         // all upgrades from v30 up, excluding v30->v3X and from v40
-        .singleUpgrade(v30)
-        .singleUpgrade(v3X)
+        .singleUpgradeToCurrentFrom(v30)
+        .singleUpgradeToCurrentFrom(v3X)
         .setup(c -> {})
         .runAfterNodeUpgrade((cluster, node) -> {
             if (node == 1) {
@@ -87,8 +86,8 @@
         .nodes(3)
         .nodesToUpgradeOrdered(1, 2, 3)
         // all upgrades from v30 up, excluding v30->v3X and from v40
-        .singleUpgrade(v30)
-        .singleUpgrade(v3X)
+        .singleUpgradeToCurrentFrom(v30)
+        .singleUpgradeToCurrentFrom(v3X)
         .setup(cluster -> {
             // node2 and node3 gossiper cannot talk with each other
             cluster.filters().verbs(Verb.GOSSIP_DIGEST_SYN.id).from(2).to(3).drop();
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeMessageForwardTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeMessageForwardTest.java
index 935cc8e..c2c4b88 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeMessageForwardTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeMessageForwardTest.java
@@ -82,7 +82,7 @@
         .withConfig(c -> c.with(Feature.GOSSIP, Feature.NETWORK).set("request_timeout_in_ms", 30000))
         .withBuilder(b -> b.withRacks(numDCs, 1, nodesPerDc))
         .nodes(numDCs * nodesPerDc)
-        .singleUpgrade(v30)
+        .singleUpgradeToCurrentFrom(v30)
         .setup(cluster -> {
             cluster.schemaChange("ALTER KEYSPACE " + KEYSPACE +
                 " WITH replication = {'class': 'NetworkTopologyStrategy', " + ntsArgs + " };");
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairDeleteTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairDeleteTest.java
index 01955c5..e603778 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairDeleteTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairDeleteTest.java
@@ -46,7 +46,8 @@
         allUpgrades(2, 1)
         .setup(cluster -> {
             cluster.schemaChange(withKeyspace("CREATE TABLE %s.t (k int, c int, v int, s int static, PRIMARY KEY (k, c))"));
-
+        })
+        .runBeforeClusterUpgrade(cluster -> {
             // insert the rows in all the nodes
             String insert = withKeyspace("INSERT INTO %s.t (k, c, v, s) VALUES (?, ?, ?, ?)");
             cluster.coordinator(1).execute(insert, ConsistencyLevel.ALL, row1);
@@ -85,7 +86,8 @@
         allUpgrades(2, 1)
         .setup(cluster -> {
             cluster.schemaChange(withKeyspace("CREATE TABLE %s.t (k int, c int, v int, s int static, PRIMARY KEY (k, c))"));
-
+        })
+        .runBeforeClusterUpgrade(cluster -> {
             // insert half partition in each node
             String insert = withKeyspace("INSERT INTO %s.t (k, c, v, s) VALUES (?, ?, ?, ?)");
             cluster.coordinator(1).execute(insert, ConsistencyLevel.ALL, partition1[0]);
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java
index fcb0482..4966e5c 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java
@@ -45,6 +45,7 @@
 
         allUpgrades(2, 1)
         .setup(c -> c.schemaChange(withKeyspace("CREATE TABLE %s.t (k int, c int, v int, PRIMARY KEY (k, c))")))
+        .runBeforeClusterUpgrade(cluster -> cluster.coordinator(1).execute(withKeyspace("TRUNCATE %s.t"), ConsistencyLevel.ALL))
         .runAfterClusterUpgrade(cluster -> {
 
             // insert rows internally in each node
@@ -77,7 +78,8 @@
         allUpgrades(2, 1)
         .setup(cluster -> {
             cluster.schemaChange(withKeyspace("CREATE TABLE %s.t (k int, c int, v int, PRIMARY KEY (k, c))"));
-
+        })
+        .runBeforeClusterUpgrade(cluster -> {
             // insert the initial version of the rows in all the nodes
             String insert = withKeyspace("INSERT INTO %s.t (k, c, v) VALUES (?, ?, ?)");
             cluster.coordinator(1).execute(insert, ConsistencyLevel.ALL, row1);
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadTest.java
index b11678d..a039678 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadTest.java
@@ -22,7 +22,6 @@
 
 import org.apache.cassandra.distributed.api.Feature;
 import org.apache.cassandra.distributed.api.IInvokableInstance;
-import org.apache.cassandra.distributed.shared.Versions;
 import org.apache.cassandra.gms.Gossiper;
 import org.apache.cassandra.utils.CassandraVersion;
 
@@ -40,8 +39,8 @@
         .nodes(2)
         .nodesToUpgrade(1)
         // all upgrades from v30 up, excluding v30->v3X and from v40
-        .singleUpgrade(v30)
-        .singleUpgrade(v3X)
+        .singleUpgradeToCurrentFrom(v30)
+        .singleUpgradeToCurrentFrom(v3X)
         .setup(cluster -> {
             cluster.schemaChange(CREATE_TABLE);
             insertData(cluster.coordinator(1));
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeRepairTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeRepairTest.java
index 813d9f2..6606bcd 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeRepairTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeRepairTest.java
@@ -54,7 +54,7 @@
         new UpgradeTestBase.TestCase()
         .nodes(2)
         .nodesToUpgrade(UPGRADED_NODE)
-        .singleUpgrade(v3X)
+        .singleUpgradeToCurrentFrom(v3X)
         .withConfig(config -> config.with(NETWORK, GOSSIP))
         .setup(cluster -> {
             cluster.schemaChange(CREATE_TABLE);
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReplicationTestBase.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReplicationTestBase.java
deleted file mode 100644
index 3f2da7a..0000000
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReplicationTestBase.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.cassandra.distributed.upgrade;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import com.vdurmont.semver4j.Semver;
-
-import org.apache.cassandra.distributed.api.ConsistencyLevel;
-
-import static org.apache.cassandra.distributed.shared.AssertUtils.assertRows;
-import static org.apache.cassandra.distributed.shared.AssertUtils.row;
-
-/**
- * A base class for testing basic replication on mixed-version clusters.
- */
-public class MixedModeReplicationTestBase extends UpgradeTestBase
-{
-    protected void testSimpleStrategy(Semver from) throws Throwable
-    {
-        testSimpleStrategy(from, UpgradeTestBase.CURRENT);
-    }
-
-    protected void testSimpleStrategy(Semver from, Semver to) throws Throwable
-    {
-        String insert = "INSERT INTO test_simple.names (key, name) VALUES (?, ?)";
-        String select = "SELECT * FROM test_simple.names WHERE key = ?";
-
-        new TestCase()
-        .nodes(3)
-        .nodesToUpgrade(1, 2)
-        .upgrades(from, to)
-        .setup(cluster -> {
-            cluster.schemaChange("CREATE KEYSPACE test_simple WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 2};");
-            cluster.schemaChange("CREATE TABLE test_simple.names (key int PRIMARY KEY, name text)");
-        })
-        .runAfterNodeUpgrade((cluster, upgraded) -> {
-            List<Long> initialTokens = new ArrayList<>(cluster.size() + 1);
-            initialTokens.add(null); // The first valid token is at 1 to avoid offset math below.
-
-            for (int i = 1; i <= cluster.size(); i++)
-                initialTokens.add(Long.valueOf(cluster.get(i).config().get("initial_token").toString()));
-
-            List<Long> validTokens = initialTokens.subList(1, cluster.size() + 1);
-
-            // Exercise all the coordinators...
-            for (int i = 1; i <= cluster.size(); i++)
-            {
-                // ...and sample enough keys that we cover the ring.
-                for (int j = 0; j < 10; j++)
-                {
-                    int key = j + (i * 10);
-                    Object[] row = row(key, "Nero");
-                    Long token = tokenFrom(key);
-
-                    cluster.coordinator(i).execute(insert, ConsistencyLevel.ALL, row);
-
-                    int node = primaryReplica(validTokens, token);
-                    assertRows(cluster.get(node).executeInternal(select, key), row);
-
-                    node = nextNode(node, cluster.size());
-                    assertRows(cluster.get(node).executeInternal(select, key), row);
-
-                    // At RF=2, this node should not have received the write.
-                    node = nextNode(node, cluster.size());
-                    assertRows(cluster.get(node).executeInternal(select, key));
-                }
-            }
-        })
-        .run();
-    }
-}
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/Pre40MessageFilterTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/Pre40MessageFilterTest.java
index 4cca7b9..59e624d 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/Pre40MessageFilterTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/Pre40MessageFilterTest.java
@@ -35,8 +35,8 @@
         .withConfig(configConsumer)
         .nodesToUpgrade(1)
         // all upgrades from v30 up, excluding v30->v3X
-        .singleUpgrade(v30)
-        .upgradesFrom(v3X)
+        .singleUpgradeToCurrentFrom(v30)
+        .upgradesToCurrentFrom(v3X)
         .setup((cluster) -> {
             cluster.filters().outbound().allVerbs().messagesMatching((f,t,m) -> false).drop();
             cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTest.java
index 691d8af..55e1f1e 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTest.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTest.java
@@ -22,7 +22,7 @@
 
 import org.apache.cassandra.distributed.api.ConsistencyLevel;
 import org.apache.cassandra.distributed.api.Feature;
-import org.apache.cassandra.distributed.shared.Versions;
+
 import static org.apache.cassandra.distributed.shared.AssertUtils.*;
 
 public class UpgradeTest extends UpgradeTestBase
@@ -34,7 +34,7 @@
         .nodes(2)
         .nodesToUpgrade(1)
         .withConfig((cfg) -> cfg.with(Feature.NETWORK, Feature.GOSSIP))
-        .upgradesFrom(v3X)
+        .upgradesToCurrentFrom(v3X)
         .setup((cluster) -> {
             cluster.schemaChange("CREATE TABLE " + KEYSPACE + ".tbl (pk int, ck int, v int, PRIMARY KEY (pk, ck))");
             cluster.coordinator(1).execute("INSERT INTO " + KEYSPACE + ".tbl (pk, ck, v) VALUES (1, 1, 1)", ConsistencyLevel.ALL);
diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTestBase.java b/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTestBase.java
index 5c32fcd..e41444f 100644
--- a/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTestBase.java
+++ b/test/distributed/org/apache/cassandra/distributed/upgrade/UpgradeTestBase.java
@@ -19,17 +19,22 @@
 package org.apache.cassandra.distributed.upgrade;
 
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.LinkedHashSet;
 import java.util.List;
+import java.util.NavigableSet;
+import java.util.Objects;
 import java.util.Set;
 import java.util.function.Consumer;
+import java.util.stream.Collectors;
 
-import com.google.common.collect.ImmutableList;
 import com.vdurmont.semver4j.Semver;
 import com.vdurmont.semver4j.Semver.SemverType;
 
 import org.junit.After;
+import org.junit.Assume;
 import org.junit.BeforeClass;
 
 import org.slf4j.Logger;
@@ -44,10 +49,11 @@
 import org.apache.cassandra.distributed.shared.ThrowingRunnable;
 import org.apache.cassandra.distributed.shared.Versions;
 import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.SimpleGraph;
 
 import static org.apache.cassandra.distributed.shared.Versions.Version;
 import static org.apache.cassandra.distributed.shared.Versions.find;
+import static org.apache.cassandra.utils.SimpleGraph.sortedVertices;
 
 public class UpgradeTestBase extends DistributedTestBase
 {
@@ -56,8 +62,7 @@
     @After
     public void afterEach()
     {
-        System.runFinalization();
-        System.gc();
+        triggerGC();
     }
 
     @BeforeClass
@@ -86,27 +91,60 @@
     public static final Semver v3X = new Semver("3.11.0", SemverType.LOOSE);
     public static final Semver v40 = new Semver("4.0-alpha1", SemverType.LOOSE);
     public static final Semver v41 = new Semver("4.1-alpha1", SemverType.LOOSE);
+    public static final Semver v42 = new Semver("4.2-alpha1", SemverType.LOOSE);
 
-    protected static final List<Pair<Semver,Semver>> SUPPORTED_UPGRADE_PATHS = ImmutableList.of(
-        Pair.create(v30, v3X),
-        Pair.create(v30, v40),
-        Pair.create(v30, v41),
-        Pair.create(v3X, v40),
-        Pair.create(v3X, v41),
-        Pair.create(v40, v41));
+    protected static final SimpleGraph<Semver> SUPPORTED_UPGRADE_PATHS = new SimpleGraph.Builder<Semver>()
+                                                                         .addEdge(v30, v3X)
+                                                                         .addEdge(v30, v40)
+                                                                         .addEdge(v30, v41)
+                                                                         .addEdge(v30, v42)
+                                                                         .addEdge(v3X, v40)
+                                                                         .addEdge(v3X, v41)
+                                                                         .addEdge(v3X, v42)
+                                                                         .addEdge(v40, v41)
+                                                                         .addEdge(v40, v42)
+                                                                         .addEdge(v41, v42)
+                                                                         .build();
 
     // the last is always the current
-    public static final Semver CURRENT = SUPPORTED_UPGRADE_PATHS.get(SUPPORTED_UPGRADE_PATHS.size() - 1).right;
+    public static final Semver CURRENT = SimpleGraph.max(SUPPORTED_UPGRADE_PATHS);
+    public static final Semver OLDEST = SimpleGraph.min(SUPPORTED_UPGRADE_PATHS);
 
     public static class TestVersions
     {
         final Version initial;
-        final Version upgrade;
+        final List<Version> upgrade;
+        final List<Semver> upgradeVersions;
 
-        public TestVersions(Version initial, Version upgrade)
+        public TestVersions(Version initial, List<Version> upgrade)
         {
             this.initial = initial;
             this.upgrade = upgrade;
+            this.upgradeVersions = upgrade.stream().map(v -> v.version).collect(Collectors.toList());
+        }
+
+        @Override
+        public boolean equals(Object o)
+        {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+            TestVersions that = (TestVersions) o;
+            return Objects.equals(initial.version, that.initial.version) && Objects.equals(upgradeVersions, that.upgradeVersions);
+        }
+
+        @Override
+        public int hashCode()
+        {
+            return Objects.hash(initial.version, upgradeVersions);
+        }
+
+        @Override
+        public String toString()
+        {
+            StringBuilder sb = new StringBuilder();
+            sb.append(initial.version).append(" -> ");
+            sb.append(upgradeVersions);
+            return sb.toString();
         }
     }
 
@@ -118,6 +156,7 @@
         private RunOnCluster setup;
         private RunOnClusterAndNode runBeforeNodeRestart;
         private RunOnClusterAndNode runAfterNodeUpgrade;
+        private RunOnCluster runBeforeClusterUpgrade;
         private RunOnCluster runAfterClusterUpgrade;
         private final Set<Integer> nodesToUpgrade = new LinkedHashSet<>();
         private Consumer<IInstanceConfig> configConsumer;
@@ -140,28 +179,77 @@
         }
 
         /** performs all supported upgrade paths that exist in between from and CURRENT (inclusive) **/
-        public TestCase upgradesFrom(Semver from)
+        public TestCase upgradesToCurrentFrom(Semver from)
         {
-            return upgrades(from, CURRENT);
+            return upgradesTo(from, CURRENT);
         }
 
-        /** performs all supported upgrade paths that exist in between from and to (inclusive) **/
-        public TestCase upgrades(Semver from, Semver to)
+        /**
+         * performs all supported upgrade paths to the "to" target; example
+         * {@code upgradesTo(3.0, 4.0); // produces: 3.0 -> 4.0, 3.11 -> 4.0}
+         */
+        public TestCase upgradesTo(Semver from, Semver to)
         {
-            SUPPORTED_UPGRADE_PATHS.stream()
-                .filter(upgradePath -> (upgradePath.left.compareTo(from) >= 0 && upgradePath.right.compareTo(to) <= 0))
-                .forEachOrdered(upgradePath ->
-                {
-                    this.upgrade.add(
-                            new TestVersions(versions.getLatest(upgradePath.left), versions.getLatest(upgradePath.right)));
-                });
+            List<TestVersions> upgrade = new ArrayList<>();
+            NavigableSet<Semver> vertices = sortedVertices(SUPPORTED_UPGRADE_PATHS);
+            for (Semver start : vertices.subSet(from, true, to, false))
+            {
+                // only include pairs that are allowed
+                if (SUPPORTED_UPGRADE_PATHS.hasEdge(start, to))
+                    upgrade.add(new TestVersions(versions.getLatest(start), Collections.singletonList(versions.getLatest(to))));
+            }
+            logger.info("Adding upgrades of\n{}", upgrade.stream().map(TestVersions::toString).collect(Collectors.joining("\n")));
+            this.upgrade.addAll(upgrade);
             return this;
         }
 
-        /** Will test this specific upgrade path **/
-        public TestCase singleUpgrade(Semver from)
+        /**
+         * performs all supported upgrade paths from the "from" target; example
+         * {@code upgradesFrom(4.0, 4.2); // produces: 4.0 -> 4.1, 4.0 -> 4.2}
+         */
+        public TestCase upgradesFrom(Semver from, Semver to)
         {
-            this.upgrade.add(new TestVersions(versions.getLatest(from), versions.getLatest(CURRENT)));
+            List<TestVersions> upgrade = new ArrayList<>();
+            NavigableSet<Semver> vertices = sortedVertices(SUPPORTED_UPGRADE_PATHS);
+            for (Semver end : vertices.subSet(from, false, to, true))
+            {
+                // only include pairs that are allowed
+                if (SUPPORTED_UPGRADE_PATHS.hasEdge(from, end))
+                    upgrade.add(new TestVersions(versions.getLatest(from), Collections.singletonList(versions.getLatest(end))));
+            }
+            logger.info("Adding upgrades of\n{}", upgrade.stream().map(TestVersions::toString).collect(Collectors.joining("\n")));
+            this.upgrade.addAll(upgrade);
+            return this;
+        }
+
+        /**
+         * performs all supported upgrade paths that exist in between from and to that include the current version.
+         * This call is equivilent to calling {@code upgradesTo(from, CURRENT).upgradesFrom(CURRENT, to)}.
+         **/
+        public TestCase upgrades(Semver from, Semver to)
+        {
+            Assume.assumeTrue("Unable to do upgrades(" + from + ", " + to + "); does not contain CURRENT=" + CURRENT, contains(from, to, CURRENT));
+            if (from.compareTo(CURRENT) < 0)
+                upgradesTo(from, CURRENT);
+            if (CURRENT.compareTo(to) < 0)
+                upgradesFrom(CURRENT, to);
+            return this;
+        }
+
+        private static boolean contains(Semver from, Semver to, Semver target)
+        {
+            // target >= from && target <= to
+            return target.compareTo(from) >= 0 && target.compareTo(to) <= 0;
+        }
+
+        /** Will test this specific upgrade path **/
+        public TestCase singleUpgradeToCurrentFrom(Semver from)
+        {
+            if (!SUPPORTED_UPGRADE_PATHS.hasEdge(from, CURRENT))
+                throw new AssertionError("Upgrading from " + from + " to " + CURRENT + " isn't directly supported and must go through other versions first; supported paths: " + SUPPORTED_UPGRADE_PATHS.findPaths(from, CURRENT));
+            TestVersions tests = new TestVersions(this.versions.getLatest(from), Arrays.asList(this.versions.getLatest(CURRENT)));
+            logger.info("Adding upgrade of {}", tests);
+            this.upgrade.add(tests);
             return this;
         }
 
@@ -183,6 +271,12 @@
             return this;
         }
 
+        public TestCase runBeforeClusterUpgrade(RunOnCluster runBeforeClusterUpgrade)
+        {
+            this.runBeforeClusterUpgrade = runBeforeClusterUpgrade;
+            return this;
+        }
+
         public TestCase runAfterClusterUpgrade(RunOnCluster runAfterClusterUpgrade)
         {
             this.runAfterClusterUpgrade = runAfterClusterUpgrade;
@@ -211,6 +305,8 @@
                 throw new AssertionError();
             if (runBeforeNodeRestart == null)
                 runBeforeNodeRestart = (c, n) -> {};
+            if (runBeforeClusterUpgrade == null)
+                runBeforeClusterUpgrade = (c) -> {};
             if (runAfterClusterUpgrade == null)
                 runAfterClusterUpgrade = (c) -> {};
             if (runAfterNodeUpgrade == null)
@@ -219,26 +315,44 @@
                 for (int n = 1; n <= nodeCount; n++)
                     nodesToUpgrade.add(n);
 
+            int offset = 0;
             for (TestVersions upgrade : this.upgrade)
             {
-                logger.info("testing upgrade from {} to {}", upgrade.initial.version, upgrade.upgrade.version);
+                logger.info("testing upgrade from {} to {}", upgrade.initial.version, upgrade.upgradeVersions);
                 try (UpgradeableCluster cluster = init(UpgradeableCluster.create(nodeCount, upgrade.initial, configConsumer, builderConsumer)))
                 {
                     setup.run(cluster);
 
-                    for (int n : nodesToUpgrade)
+                    for (Version nextVersion : upgrade.upgrade)
                     {
-                        cluster.get(n).shutdown().get();
-                        cluster.get(n).setVersion(upgrade.upgrade);
-                        runBeforeNodeRestart.run(cluster, n);
-                        cluster.get(n).startup();
-                        runAfterNodeUpgrade.run(cluster, n);
-                    }
+                        try
+                        {
+                            runBeforeClusterUpgrade.run(cluster);
 
-                    runAfterClusterUpgrade.run(cluster);
+                            for (int n : nodesToUpgrade)
+                            {
+                                cluster.get(n).shutdown().get();
+                                triggerGC();
+                                cluster.get(n).setVersion(nextVersion);
+                                runBeforeNodeRestart.run(cluster, n);
+                                cluster.get(n).startup();
+                                runAfterNodeUpgrade.run(cluster, n);
+                            }
+
+                            runAfterClusterUpgrade.run(cluster);
+
+                            cluster.checkAndResetUncaughtExceptions();
+                        }
+                        catch (Throwable t)
+                        {
+                            throw new AssertionError(String.format("Error in test '%s' while upgrading to '%s'; successful upgrades %s", upgrade, nextVersion.version, this.upgrade.stream().limit(offset).collect(Collectors.toList())), t);
+                        }
+                    }
                 }
+                offset++;
             }
         }
+
         public TestCase nodesToUpgrade(int ... nodes)
         {
             Set<Integer> set = new HashSet<>(nodes.length);
@@ -260,10 +374,16 @@
         }
      }
 
+    private static void triggerGC()
+    {
+        System.runFinalization();
+        System.gc();
+    }
+
     protected TestCase allUpgrades(int nodes, int... toUpgrade)
     {
         return new TestCase().nodes(nodes)
-                             .upgradesFrom(v30)
+                             .upgradesToCurrentFrom(v30)
                              .nodesToUpgrade(toUpgrade);
     }
 
diff --git a/test/microbench/org/apache/cassandra/test/microbench/AbstractTypeByteSourceDecodingBench.java b/test/microbench/org/apache/cassandra/test/microbench/AbstractTypeByteSourceDecodingBench.java
new file mode 100644
index 0000000..427265e
--- /dev/null
+++ b/test/microbench/org/apache/cassandra/test/microbench/AbstractTypeByteSourceDecodingBench.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.test.microbench;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BiFunction;
+
+import net.nicoulaj.compilecommand.annotations.Inline;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.BytesType;
+import org.apache.cassandra.db.marshal.DecimalType;
+import org.apache.cassandra.db.marshal.IntegerType;
+import org.apache.cassandra.db.marshal.TypeParser;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Threads;
+import org.openjdk.jmh.annotations.Warmup;
+
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MICROSECONDS)
+@Warmup(iterations = 5, time = 1)
+@Measurement(iterations = 5, time = 2)
+@Fork(value = 1,jvmArgsAppend = { "-Xmx4G", "-Xms4G", "-Djmh.executor=CUSTOM", "-Djmh.executor.class=org.apache.cassandra.test.microbench.FastThreadExecutor"})
+@Threads(1)
+@State(Scope.Benchmark)
+public class AbstractTypeByteSourceDecodingBench
+{
+
+    private static final ByteComparable.Version LATEST = ByteComparable.Version.OSS42;
+
+    private static final Map<AbstractType, BiFunction<Random, Integer, ByteSource.Peekable>> PEEKABLE_GENERATOR_BY_TYPE = new HashMap<>();
+    static
+    {
+        PEEKABLE_GENERATOR_BY_TYPE.put(UTF8Type.instance, (prng, length) ->
+        {
+            byte[] randomBytes = new byte[length];
+            prng.nextBytes(randomBytes);
+            return ByteSource.peekable(ByteSource.of(new String(randomBytes, StandardCharsets.UTF_8), LATEST));
+        });
+        PEEKABLE_GENERATOR_BY_TYPE.put(BytesType.instance, (prng, length) ->
+        {
+            byte[] randomBytes = new byte[length];
+            prng.nextBytes(randomBytes);
+            return ByteSource.peekable(ByteSource.of(randomBytes, LATEST));
+        });
+        PEEKABLE_GENERATOR_BY_TYPE.put(IntegerType.instance, (prng, length) ->
+        {
+            BigInteger randomVarint = BigInteger.valueOf(prng.nextLong());
+            for (int i = 1; i < length / 8; ++i)
+                randomVarint = randomVarint.multiply(BigInteger.valueOf(prng.nextLong()));
+            return ByteSource.peekable(IntegerType.instance.asComparableBytes(IntegerType.instance.decompose(randomVarint), LATEST));
+        });
+        PEEKABLE_GENERATOR_BY_TYPE.put(DecimalType.instance, (prng, length) ->
+        {
+            BigInteger randomMantissa = BigInteger.valueOf(prng.nextLong());
+            for (int i = 1; i < length / 8; ++i)
+                randomMantissa = randomMantissa.multiply(BigInteger.valueOf(prng.nextLong()));
+            int randomScale = prng.nextInt(Integer.MAX_VALUE >> 1) + Integer.MAX_VALUE >> 1;
+            BigDecimal randomDecimal = new BigDecimal(randomMantissa, randomScale);
+            return ByteSource.peekable(DecimalType.instance.asComparableBytes(DecimalType.instance.decompose(randomDecimal), LATEST));
+        });
+    }
+
+    private Random prng = new Random();
+
+    @Param({"32", "128", "512"})
+    private int length;
+
+    @Param({"UTF8Type", "BytesType", "IntegerType", "DecimalType"})
+    private String abstractTypeName;
+
+    private AbstractType abstractType;
+    private BiFunction<Random, Integer, ByteSource.Peekable> peekableGenerator;
+
+    @Setup(Level.Trial)
+    public void setup()
+    {
+        abstractType = TypeParser.parse(abstractTypeName);
+        peekableGenerator = PEEKABLE_GENERATOR_BY_TYPE.get(abstractType);
+    }
+
+    @Inline
+    private ByteSource.Peekable randomPeekableBytes()
+    {
+        return peekableGenerator.apply(prng, length);
+    }
+
+    @Benchmark
+    public int baseline()
+    {
+        // Getting the source is not enough as its content is produced on next() calls.
+        ByteSource.Peekable source = randomPeekableBytes();
+        int count = 0;
+        while (source.next() != ByteSource.END_OF_STREAM)
+            ++count;
+        return count;
+    }
+
+    @Benchmark
+    public ByteBuffer fromComparableBytes()
+    {
+        ByteSource.Peekable peekableBytes = randomPeekableBytes();
+        return abstractType.fromComparableBytes(peekableBytes, ByteComparable.Version.OSS42);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/SchemaLoader.java b/test/unit/org/apache/cassandra/SchemaLoader.java
index 3279490..e2ef487 100644
--- a/test/unit/org/apache/cassandra/SchemaLoader.java
+++ b/test/unit/org/apache/cassandra/SchemaLoader.java
@@ -286,6 +286,7 @@
         SchemaTestUtil.announceNewKeyspace(AuthKeyspace.metadata());
         DatabaseDescriptor.getRoleManager().setup();
         DatabaseDescriptor.getAuthenticator().setup();
+        DatabaseDescriptor.getInternodeAuthenticator().setupInternode();
         DatabaseDescriptor.getAuthorizer().setup();
         DatabaseDescriptor.getNetworkAuthorizer().setup();
         Schema.instance.registerListener(new AuthSchemaChangeListener());
diff --git a/test/unit/org/apache/cassandra/Util.java b/test/unit/org/apache/cassandra/Util.java
index 0459cb3..b0b5964 100644
--- a/test/unit/org/apache/cassandra/Util.java
+++ b/test/unit/org/apache/cassandra/Util.java
@@ -23,6 +23,7 @@
 import java.io.EOFException;
 import java.io.IOError;
 import java.io.IOException;
+import java.math.BigInteger;
 import java.net.UnknownHostException;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
@@ -859,6 +860,199 @@
         assertEquals(expectedSSTableCount, fileCount);
     }
 
+    public static ByteBuffer generateMurmurCollision(ByteBuffer original, byte... bytesToAdd)
+    {
+        // Round size up to 16, and add another 16 bytes
+        ByteBuffer collision = ByteBuffer.allocate((original.remaining() + bytesToAdd.length + 31) & -16);
+        collision.put(original);    // we can use this as a copy of original with 0s appended at the end
+
+        original.flip();
+
+        long c1 = 0x87c37b91114253d5L;
+        long c2 = 0x4cf5ad432745937fL;
+
+        long h1 = 0;
+        long h2 = 0;
+
+        // Get hash of original
+        int index = 0;
+        final int length = original.limit();
+        while (index <= length - 16)
+        {
+            long k1 = Long.reverseBytes(collision.getLong(index + 0));
+            long k2 = Long.reverseBytes(collision.getLong(index + 8));
+
+            // 16 bytes
+            k1 *= c1;
+            k1 = rotl64(k1, 31);
+            k1 *= c2;
+            h1 ^= k1;
+            h1 = rotl64(h1, 27);
+            h1 += h2;
+            h1 = h1 * 5 + 0x52dce729;
+            k2 *= c2;
+            k2 = rotl64(k2, 33);
+            k2 *= c1;
+            h2 ^= k2;
+            h2 = rotl64(h2, 31);
+            h2 += h1;
+            h2 = h2 * 5 + 0x38495ab5;
+
+            index += 16;
+        }
+
+        long oh1 = h1;
+        long oh2 = h2;
+
+        // Process final unfilled chunk, but only adjust the original hash value
+        if (index < length)
+        {
+            long k1 = Long.reverseBytes(collision.getLong(index + 0));
+            long k2 = Long.reverseBytes(collision.getLong(index + 8));
+
+            // 16 bytes
+            k1 *= c1;
+            k1 = rotl64(k1, 31);
+            k1 *= c2;
+            oh1 ^= k1;
+
+            k2 *= c2;
+            k2 = rotl64(k2, 33);
+            k2 *= c1;
+            oh2 ^= k2;
+        }
+
+        // These are the hashes the original would provide, before final mixing
+        oh1 ^= original.capacity();
+        oh2 ^= original.capacity();
+
+        // Fill in the remaining bytes before the last 16 and get their hash
+        collision.put(bytesToAdd);
+        while ((collision.position() & 0x0f) != 0)
+            collision.put((byte) 0);
+
+        while (index < collision.position())
+        {
+            long k1 = Long.reverseBytes(collision.getLong(index + 0));
+            long k2 = Long.reverseBytes(collision.getLong(index + 8));
+
+            // 16 bytes
+            k1 *= c1;
+            k1 = rotl64(k1, 31);
+            k1 *= c2;
+            h1 ^= k1;
+            h1 = rotl64(h1, 27);
+            h1 += h2;
+            h1 = h1 * 5 + 0x52dce729;
+            k2 *= c2;
+            k2 = rotl64(k2, 33);
+            k2 *= c1;
+            h2 ^= k2;
+            h2 = rotl64(h2, 31);
+            h2 += h1;
+            h2 = h2 * 5 + 0x38495ab5;
+
+            index += 16;
+        }
+
+        // Working backwards, we must get this hash pair
+        long th1 = h1;
+        long th2 = h2;
+
+        // adjust ohx with length
+        h1 = oh1 ^ collision.capacity();
+        h2 = oh2 ^ collision.capacity();
+
+        // Get modulo-long inverses of the multipliers used in the computation
+        long i5i = inverse(5L);
+        long c1i = inverse(c1);
+        long c2i = inverse(c2);
+
+        // revert one step
+        h2 -= 0x38495ab5;
+        h2 *= i5i;
+        h2 -= h1;
+        h2 = rotl64(h2, 33);
+
+        h1 -= 0x52dce729;
+        h1 *= i5i;
+        h1 -= th2;  // use h2 before it's adjusted with k2
+        h1 = rotl64(h1, 37);
+
+        // extract the required modifiers and applies the inverse of their transformation
+        long k1 = h1 ^ th1;
+        k1 = c2i * k1;
+        k1 = rotl64(k1, 33);
+        k1 = c1i * k1;
+
+        long k2 = h2 ^ th2;
+        k2 = c1i * k2;
+        k2 = rotl64(k2, 31);
+        k2 = c2i * k2;
+
+        collision.putLong(Long.reverseBytes(k1));
+        collision.putLong(Long.reverseBytes(k2));
+        collision.flip();
+
+        return collision;
+    }
+
+    // Assumes a and b are positive
+    private static BigInteger[] xgcd(BigInteger a, BigInteger b) {
+        BigInteger x = a, y = b;
+        BigInteger[] qrem;
+        BigInteger[] result = new BigInteger[3];
+        BigInteger x0 = BigInteger.ONE, x1 = BigInteger.ZERO;
+        BigInteger y0 = BigInteger.ZERO, y1 = BigInteger.ONE;
+        while (true)
+        {
+            qrem = x.divideAndRemainder(y);
+            x = qrem[1];
+            x0 = x0.subtract(y0.multiply(qrem[0]));
+            x1 = x1.subtract(y1.multiply(qrem[0]));
+            if (x.equals(BigInteger.ZERO))
+            {
+                result[0] = y;
+                result[1] = y0;
+                result[2] = y1;
+                return result;
+            }
+
+            qrem = y.divideAndRemainder(x);
+            y = qrem[1];
+            y0 = y0.subtract(x0.multiply(qrem[0]));
+            y1 = y1.subtract(x1.multiply(qrem[0]));
+            if (y.equals(BigInteger.ZERO))
+            {
+                result[0] = x;
+                result[1] = x0;
+                result[2] = x1;
+                return result;
+            }
+        }
+    }
+
+    /**
+     * Find a mupltiplicative inverse for the given multiplier for long, i.e.
+     * such that x * inverse(x) = 1 where * is long multiplication.
+     * In other words, such an integer that x * inverse(x) == 1 (mod 2^64).
+     */
+    public static long inverse(long multiplier)
+    {
+        final BigInteger modulus = BigInteger.ONE.shiftLeft(64);
+        // Add the modulus to the multiplier to avoid problems with negatives (a + m == a (mod m))
+        BigInteger[] gcds = xgcd(BigInteger.valueOf(multiplier).add(modulus), modulus);
+        // xgcd gives g, a and b, such that ax + bm = g
+        // ie, ax = g (mod m). Return a
+        assert gcds[0].equals(BigInteger.ONE) : "Even number " + multiplier + " has no long inverse";
+        return gcds[1].longValueExact();
+    }
+
+    public static long rotl64(long v, int n)
+    {
+        return ((v << n) | (v >>> (64 - n)));
+    }
+
     /**
      * Disable bloom filter on all sstables of given table
      */
diff --git a/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java b/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java
index a76c24a..0ff2124 100644
--- a/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java
+++ b/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java
@@ -125,7 +125,7 @@
                                                          EncryptionOptions.TlsEncryptionPolicy expected)
         {
             return new ServerEncryptionOptionsTestCase(new EncryptionOptions.ServerEncryptionOptions(new ParameterizedClass("org.apache.cassandra.security.DefaultSslContextFactory",
-                                                                                                                            new HashMap<>()), keystorePath, "dummypass", "dummytruststore", "dummypass",
+                                                                                                                            new HashMap<>()), keystorePath, "dummypass", keystorePath, "dummypass", "dummytruststore", "dummypass",
                                                                                                Collections.emptyList(), null, null, null, "JKS", false, false, optional, internodeEncryption, false)
                                                        .applyConfig(),
                                                  expected,
diff --git a/test/unit/org/apache/cassandra/cql3/CQLTester.java b/test/unit/org/apache/cassandra/cql3/CQLTester.java
index eae939b..7c2eebb 100644
--- a/test/unit/org/apache/cassandra/cql3/CQLTester.java
+++ b/test/unit/org/apache/cassandra/cql3/CQLTester.java
@@ -1322,6 +1322,10 @@
         return executeFormattedQuery(formatViewQuery(KEYSPACE, query), values);
     }
 
+    /**
+     * Executes the provided query using the {@link ClientState#forInternalCalls()} as the expected ClientState. Note:
+     * this means permissions checking will not apply and queries will proceed regardless of role or guardrails.
+     */
     protected UntypedResultSet executeFormattedQuery(String query, Object... values) throws Throwable
     {
         UntypedResultSet rs;
@@ -1738,8 +1742,15 @@
         assertInvalidThrowMessage(Optional.empty(), errorMessage, exception, query, values);
     }
 
-    // if a protocol version > Integer.MIN_VALUE is supplied, executes
-    // the query via the java driver, mimicking a real client.
+    /**
+     * Asserts that the query provided throws the exceptions provided.
+     *
+     * NOTE: This method uses {@link ClientState#forInternalCalls()} which sets the {@link ClientState#isInternal} value
+     * to true, nullifying any system keyspace or other permissions checking for tables.
+     *
+     * If a protocol version > Integer.MIN_VALUE is supplied, executes
+     * the query via the java driver, mimicking a real client.
+     */
     protected void assertInvalidThrowMessage(Optional<ProtocolVersion> protocolVersion,
                                              String errorMessage,
                                              Class<? extends Throwable> exception,
diff --git a/test/unit/org/apache/cassandra/cql3/selection/SelectorSerializationTest.java b/test/unit/org/apache/cassandra/cql3/selection/SelectorSerializationTest.java
index ee4dd35..4eadb95 100644
--- a/test/unit/org/apache/cassandra/cql3/selection/SelectorSerializationTest.java
+++ b/test/unit/org/apache/cassandra/cql3/selection/SelectorSerializationTest.java
@@ -60,8 +60,9 @@
         checkSerialization(table.getColumn(new ColumnIdentifier("c1", false)), table);
 
         // Test WritetimeOrTTLSelector serialization
-        checkSerialization(new Selectable.WritetimeOrTTL(table.getColumn(new ColumnIdentifier("v", false)), true), table);
-        checkSerialization(new Selectable.WritetimeOrTTL(table.getColumn(new ColumnIdentifier("v", false)), false), table);
+        checkSerialization(new Selectable.WritetimeOrTTL(table.getColumn(new ColumnIdentifier("v", false)), Selectable.WritetimeOrTTL.Kind.WRITE_TIME), table);
+        checkSerialization(new Selectable.WritetimeOrTTL(table.getColumn(new ColumnIdentifier("v", false)), Selectable.WritetimeOrTTL.Kind.TTL), table);
+        checkSerialization(new Selectable.WritetimeOrTTL(table.getColumn(new ColumnIdentifier("v", false)), Selectable.WritetimeOrTTL.Kind.MAX_WRITE_TIME), table);
 
         // Test ListSelector serialization
         checkSerialization(new Selectable.WithList(asList(table.getColumn(new ColumnIdentifier("v", false)),
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/TimestampTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/TimestampTest.java
index 13090a6..7c6cd8f 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/TimestampTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/TimestampTest.java
@@ -17,11 +17,16 @@
  */
 package org.apache.cassandra.cql3.validation.entities;
 
+import java.util.Arrays;
+import java.util.List;
+
 import org.junit.Test;
 
 import org.junit.Assert;
 import org.apache.cassandra.cql3.CQLTester;
+import org.apache.cassandra.utils.Pair;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
@@ -97,6 +102,84 @@
                    row(1, null, null));
     }
 
+    private void setupSchemaForMaxTimestamp()
+    {
+        String myType = createType("CREATE TYPE %s (a int, b int)");
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, a text, " +
+                    "l list<int>, fl frozen<list<int>>," +
+                    "s set<int>, fs frozen<set<int>>," +
+                    "m map<int, text>, fm frozen<map<int, text>>," +
+                    "t " + myType + ", ft frozen<" + myType + ">)");
+    }
+
+    @Test
+    public void testCallMaxTimestampOnEmptyCollectionReturnsNull() throws Throwable
+    {
+        setupSchemaForMaxTimestamp();
+
+        execute("INSERT INTO %s (k) VALUES (1)");
+        Object[][] res = getRows(execute("SELECT maxwritetime(a), maxwritetime(l), maxwritetime(fl)," +
+                                         "maxwritetime(s), maxwritetime(fs), maxwritetime(m), maxwritetime(fm)," +
+                                         "maxwritetime(t), maxwritetime(ft) FROM %s WHERE k=1"));
+
+        assertEquals(1, res.length);
+        for (Object v : res[0])
+        {
+            assertNull("All the multi-cell data are empty (we did not insert), calling maxwritetime should return null",
+                       v);
+        }
+    }
+
+    @Test
+    public void testMaxTimestamp() throws Throwable
+    {
+        setupSchemaForMaxTimestamp();
+
+        execute("INSERT INTO %s (k, a, l, fl, s, fs, m, fm, t, ft) VALUES " +
+                "(1, 'test', [1], [2], {1}, {2}, {1 : 'a'}, {2 : 'b'}, {a : 1, b : 1 }, {a : 2, b : 2}) USING TIMESTAMP 1");
+
+        // enumerate through all multi-cell types and make sure maxwritetime reflects the expected result
+        testMaxTimestampWithColumnUpdate(Arrays.asList(
+           Pair.create(1, "UPDATE %s USING TIMESTAMP 10 SET l = l + [10] WHERE k = 1"),
+           Pair.create(3, "UPDATE %s USING TIMESTAMP 11 SET s = s + {10} WHERE k = 1"),
+           Pair.create(5, "UPDATE %s USING TIMESTAMP 12 SET m = m + {10 : 'c'} WHERE k = 1"),
+           Pair.create(7, "UPDATE %s USING TIMESTAMP 13 SET t.a = 10 WHERE k = 1")
+        ));
+    }
+
+    private void testMaxTimestampWithColumnUpdate(List<Pair<Integer, String>> updateStatements) throws Throwable
+    {
+        for (Pair<Integer, String> update : updateStatements)
+        {
+            int fieldPos = update.left();
+            String statement = update.right();
+
+            // run the update statement and update the timestamp of the column
+            execute(statement);
+
+            Object[][] res = getRows(execute("SELECT maxwritetime(a), maxwritetime(l), maxwritetime(fl)," +
+                                             "maxwritetime(s), maxwritetime(fs), maxwritetime(m), maxwritetime(fm)," +
+                                             "maxwritetime(t), maxwritetime(ft) FROM %s WHERE k=1"));
+            Assert.assertEquals(1, res.length);
+            Assert.assertEquals("maxwritetime should work on both single cell and complex columns",
+                                9, res[0].length);
+            for (Object ts : res[0])
+            {
+                assertTrue(ts instanceof Long); // all the result fields are timestamps
+            }
+
+            long updatedTs = (long) res[0][fieldPos]; // maxwritetime the updated column
+
+            for (int i = 0; i < res[0].length; i++)
+            {
+                long ts = (long) res[0][i];
+                if (i != fieldPos)
+                    assertTrue("The updated column should have a large maxwritetime since it is updated later",
+                               ts < updatedTs);
+            }
+        }
+    }
+
     /**
      * Migrated from cql_tests.py:TestCQL.invalid_custom_timestamp_test()
      */
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/TupleTypeTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/TupleTypeTest.java
index 9f53db4..f325655 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/TupleTypeTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/TupleTypeTest.java
@@ -33,6 +33,8 @@
 
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.cql3.UntypedResultSet;
+import org.apache.cassandra.db.SchemaCQLHelper;
+import org.apache.cassandra.db.marshal.ByteBufferAccessor;
 import org.apache.cassandra.db.marshal.TupleType;
 import org.apache.cassandra.utils.AbstractTypeGenerators.TypeSupport;
 import org.quicktheories.core.Gen;
@@ -266,7 +268,7 @@
             for (ByteBuffer value : testcase.uniqueRows)
             {
                 map.put(value, count);
-                ByteBuffer[] tupleBuffers = tupleType.split(value);
+                ByteBuffer[] tupleBuffers = tupleType.split(ByteBufferAccessor.instance, value);
 
                 // use cast to avoid warning
                 execute("INSERT INTO %s (id, value) VALUES (?, ?)", tuple((Object[]) tupleBuffers), count);
@@ -304,7 +306,7 @@
             for (ByteBuffer value : testcase.uniqueRows)
             {
                 map.put(value, count);
-                ByteBuffer[] tupleBuffers = tupleType.split(value);
+                ByteBuffer[] tupleBuffers = tupleType.split(ByteBufferAccessor.instance, value);
 
                 // use cast to avoid warning
                 execute("INSERT INTO %s (pk, ck, value) VALUES (?, ?, ?)", 1, tuple((Object[]) tupleBuffers), count);
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/UserTypesTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/UserTypesTest.java
index 0b05e8f..1520b4c 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/UserTypesTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/UserTypesTest.java
@@ -200,6 +200,56 @@
     }
 
     @Test
+    public void testNullsInIntUDT() throws Throwable
+    {
+        String myType = KEYSPACE + '.' + createType("CREATE TYPE %s (a int)");
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b frozen<" + myType + ">)");
+        execute("INSERT INTO %s (a, b) VALUES (1, ?)", userType("a", 1));
+
+        assertRows(execute("SELECT b.a FROM %s"), row(1));
+
+        flush();
+
+        schemaChange("ALTER TYPE " + myType + " ADD b int");
+        execute("INSERT INTO %s (a, b) VALUES (2, {a: 2, b: 2})");
+        execute("INSERT INTO %s (a, b) VALUES (3, {b: 3})");
+        execute("INSERT INTO %s (a, b) VALUES (4, {a: null, b: 4})");
+
+        beforeAndAfterFlush(() ->
+                            assertRows(execute("SELECT b.a, b.b FROM %s"),
+                                       row(1, null),
+                                       row(2, 2),
+                                       row(null, 3),
+                                       row(null, 4))
+        );
+    }
+
+    @Test
+    public void testNullsInTextUDT() throws Throwable
+    {
+        String myType = KEYSPACE + '.' + createType("CREATE TYPE %s (a text)");
+        createTable("CREATE TABLE %s (a int PRIMARY KEY, b frozen<" + myType + ">)");
+        execute("INSERT INTO %s (a, b) VALUES (1, {a: ''})");
+
+        assertRows(execute("SELECT b.a FROM %s"), row(""));
+
+        flush();
+
+        schemaChange("ALTER TYPE " + myType + " ADD b text");
+        execute("INSERT INTO %s (a, b) VALUES (2, {a: '', b: ''})");
+        execute("INSERT INTO %s (a, b) VALUES (3, {b: ''})");
+        execute("INSERT INTO %s (a, b) VALUES (4, {a: null, b: ''})");
+
+        beforeAndAfterFlush(() ->
+                            assertRows(execute("SELECT b.a, b.b FROM %s"),
+                                       row("", null),
+                                       row("", ""),
+                                       row(null, ""),
+                                       row(null, ""))
+        );
+    }
+
+    @Test
     public void testAlterNonFrozenUDT() throws Throwable
     {
         String myType = KEYSPACE + '.' + createType("CREATE TYPE %s (a int, b text)");
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/WritetimeOrTTLTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/WritetimeOrTTLTest.java
index cc6c663..16bab23 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/WritetimeOrTTLTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/WritetimeOrTTLTest.java
@@ -207,6 +207,10 @@
         String writetimeQuery = String.format("SELECT WRITETIME(%s) FROM %%s %s", column, where);
         assertRows(writetimeQuery, row(timestamp));
 
+        // Verify max write time
+        String maxwritetimeQuery = String.format("SELECT MAXWRITETIME(%s) FROM %%s %s", column, where);
+        assertRows(maxwritetimeQuery, row(timestamp));
+
         // Verify ttl
         UntypedResultSet rs = execute(String.format("SELECT TTL(%s) FROM %%s %s", column, where));
         assertRowCount(rs, 1);
@@ -242,9 +246,12 @@
 
     private void assertInvalidPrimaryKeySelection(String column) throws Throwable
     {
-        assertInvalidThrowMessage("Cannot use selection function writeTime on PRIMARY KEY part " + column,
+        assertInvalidThrowMessage("Cannot use selection function writetime on PRIMARY KEY part " + column,
                                   InvalidRequestException.class,
                                   String.format("SELECT WRITETIME(%s) FROM %%s", column));
+        assertInvalidThrowMessage("Cannot use selection function maxwritetime on PRIMARY KEY part " + column,
+                                  InvalidRequestException.class,
+                                  String.format("SELECT MAXWRITETIME(%s) FROM %%s", column));
         assertInvalidThrowMessage("Cannot use selection function ttl on PRIMARY KEY part " + column,
                                   InvalidRequestException.class,
                                   String.format("SELECT TTL(%s) FROM %%s", column));
@@ -254,9 +261,10 @@
     {
         String message = format("Cannot use selection function %%s on non-frozen %s %s",
                                 isCollection ? "collection" : "UDT", column);
-        assertInvalidThrowMessage(format(message, "writeTime"),
+        assertInvalidThrowMessage(format(message, "writetime"),
                                   InvalidRequestException.class,
                                   String.format("SELECT WRITETIME(%s) FROM %%s", column));
+        execute(format("SELECT MAXWRITETIME(%s) FROM %%s", column));
         assertInvalidThrowMessage(format(message, "ttl"),
                                   InvalidRequestException.class,
                                   String.format("SELECT TTL(%s) FROM %%s", column));
diff --git a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
index 6d8bd0f..6268d3b 100644
--- a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
+++ b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java
@@ -170,7 +170,7 @@
     }
 
     @Test
-    public void testDeleteStandardRowSticksAfterFlush() throws Throwable
+    public void testDeleteStandardRowSticksAfterFlush()
     {
         // test to make sure flushing after a delete doesn't resurrect delted cols.
         String keyspaceName = KEYSPACE1;
@@ -228,7 +228,7 @@
     }
 
     @Test
-    public void testClearEphemeralSnapshots() throws Throwable
+    public void testClearEphemeralSnapshots()
     {
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_INDEX1);
 
diff --git a/test/unit/org/apache/cassandra/db/DirectoriesTest.java b/test/unit/org/apache/cassandra/db/DirectoriesTest.java
index 252c630..8e47157 100644
--- a/test/unit/org/apache/cassandra/db/DirectoriesTest.java
+++ b/test/unit/org/apache/cassandra/db/DirectoriesTest.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
+import java.nio.file.Paths;
 import java.time.Instant;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -97,6 +98,7 @@
     public static final String TABLE_NAME = "FakeTable";
     public static final String SNAPSHOT1 = "snapshot1";
     public static final String SNAPSHOT2 = "snapshot2";
+    public static final String SNAPSHOT3 = "snapshot3";
 
     public static final String LEGACY_SNAPSHOT_NAME = "42";
     private static File tempDataDir;
@@ -105,7 +107,7 @@
     private static Set<TableMetadata> CFM;
     private static Map<String, List<File>> sstablesByTableName;
 
-    @Parameterized.Parameter(0)
+    @Parameterized.Parameter
     public SSTableId.Builder<? extends SSTableId> idBuilder;
 
     @Parameterized.Parameter(1)
@@ -151,7 +153,7 @@
     @AfterClass
     public static void afterClass()
     {
-        FileUtils.deleteRecursive(tempDataDir);
+        tempDataDir.deleteRecursive();
     }
 
     private static DataDirectory[] toDataDirectories(File location)
@@ -159,7 +161,7 @@
         return new DataDirectory[] { new DataDirectory(location) };
     }
 
-    private void createTestFiles() throws IOException
+    private void createTestFiles()
     {
         for (TableMetadata cfm : CFM)
         {
@@ -181,25 +183,27 @@
         }
     }
 
-    class FakeSnapshot {
+    static class FakeSnapshot {
         final TableMetadata table;
         final String tag;
         final File snapshotDir;
         final SnapshotManifest manifest;
+        final boolean ephemeral;
 
-        FakeSnapshot(TableMetadata table, String tag, File snapshotDir, SnapshotManifest manifest)
+        FakeSnapshot(TableMetadata table, String tag, File snapshotDir, SnapshotManifest manifest, boolean ephemeral)
         {
             this.table = table;
             this.tag = tag;
             this.snapshotDir = snapshotDir;
             this.manifest = manifest;
+            this.ephemeral = ephemeral;
         }
 
         public TableSnapshot asTableSnapshot()
         {
             Instant createdAt = manifest == null ? null : manifest.createdAt;
             Instant expiresAt = manifest == null ? null : manifest.expiresAt;
-            return new TableSnapshot(table.keyspace, table.name, table.id.asUUID(), tag, createdAt, expiresAt, Collections.singleton(snapshotDir));
+            return new TableSnapshot(table.keyspace, table.name, table.id.asUUID(), tag, createdAt, expiresAt, Collections.singleton(snapshotDir), ephemeral);
         }
     }
 
@@ -211,7 +215,7 @@
                             .build();
     }
 
-    public FakeSnapshot createFakeSnapshot(TableMetadata table, String tag, boolean createManifest) throws IOException
+    public FakeSnapshot createFakeSnapshot(TableMetadata table, String tag, boolean createManifest, boolean ephemeral) throws IOException
     {
         File tableDir = cfDir(table);
         tableDir.tryCreateDirectories();
@@ -225,11 +229,15 @@
         if (createManifest)
         {
             File manifestFile = Directories.getSnapshotManifestFile(snapshotDir);
-            manifest = new SnapshotManifest(Collections.singletonList(sstableDesc.filenameFor(Component.DATA)), new DurationSpec.IntSecondsBound("1m"), now());
+            manifest = new SnapshotManifest(Collections.singletonList(sstableDesc.filenameFor(Component.DATA)), new DurationSpec.IntSecondsBound("1m"), now(), ephemeral);
             manifest.serializeToJsonFile(manifestFile);
         }
+        else if (ephemeral)
+        {
+            Files.createFile(snapshotDir.toPath().resolve("ephemeral.snapshot"));
+        }
 
-        return new FakeSnapshot(table, tag, snapshotDir, manifest);
+        return new FakeSnapshot(table, tag, snapshotDir, manifest, ephemeral);
     }
 
     private List<File> createFakeSSTable(File dir, String cf, int gen)
@@ -269,7 +277,7 @@
     }
 
     @Test
-    public void testStandardDirs() throws IOException
+    public void testStandardDirs()
     {
         for (TableMetadata cfm : CFM)
         {
@@ -296,22 +304,27 @@
         assertThat(directories.listSnapshots()).isEmpty();
 
         // Create snapshot with and without manifest
-        FakeSnapshot snapshot1 = createFakeSnapshot(fakeTable, SNAPSHOT1, true);
-        FakeSnapshot snapshot2 = createFakeSnapshot(fakeTable, SNAPSHOT2, false);
+        FakeSnapshot snapshot1 = createFakeSnapshot(fakeTable, SNAPSHOT1, true, false);
+        FakeSnapshot snapshot2 = createFakeSnapshot(fakeTable, SNAPSHOT2, false, false);
+        // ephemeral without manifst
+        FakeSnapshot snapshot3 = createFakeSnapshot(fakeTable, SNAPSHOT3, false, true);
 
         // Both snapshots should be present
         Map<String, TableSnapshot> snapshots = directories.listSnapshots();
-        assertThat(snapshots.keySet()).isEqualTo(Sets.newHashSet(SNAPSHOT1, SNAPSHOT2));
+        assertThat(snapshots.keySet()).isEqualTo(Sets.newHashSet(SNAPSHOT1, SNAPSHOT2, SNAPSHOT3));
         assertThat(snapshots.get(SNAPSHOT1)).isEqualTo(snapshot1.asTableSnapshot());
         assertThat(snapshots.get(SNAPSHOT2)).isEqualTo(snapshot2.asTableSnapshot());
+        assertThat(snapshots.get(SNAPSHOT3)).isEqualTo(snapshot3.asTableSnapshot());
 
         // Now remove snapshot1
-        FileUtils.deleteRecursive(snapshot1.snapshotDir);
+        snapshot1.snapshotDir.deleteRecursive();
 
-        // Only snapshot 2 should be present
+        // Only snapshot 2 and 3 should be present
         snapshots = directories.listSnapshots();
-        assertThat(snapshots.keySet()).isEqualTo(Sets.newHashSet(SNAPSHOT2));
+        assertThat(snapshots.keySet()).isEqualTo(Sets.newHashSet(SNAPSHOT2, SNAPSHOT3));
         assertThat(snapshots.get(SNAPSHOT2)).isEqualTo(snapshot2.asTableSnapshot());
+        assertThat(snapshots.get(SNAPSHOT3)).isEqualTo(snapshot3.asTableSnapshot());
+        assertThat(snapshots.get(SNAPSHOT3).isEphemeral()).isTrue();
     }
 
     @Test
@@ -322,21 +335,23 @@
         assertThat(directories.listSnapshotDirsByTag()).isEmpty();
 
         // Create snapshot with and without manifest
-        FakeSnapshot snapshot1 = createFakeSnapshot(fakeTable, SNAPSHOT1, true);
-        FakeSnapshot snapshot2 = createFakeSnapshot(fakeTable, SNAPSHOT2, false);
+        FakeSnapshot snapshot1 = createFakeSnapshot(fakeTable, SNAPSHOT1, true, false);
+        FakeSnapshot snapshot2 = createFakeSnapshot(fakeTable, SNAPSHOT2, false, false);
+        FakeSnapshot snapshot3 = createFakeSnapshot(fakeTable, SNAPSHOT3, false, true);
 
         // Both snapshots should be present
         Map<String, Set<File>> snapshotDirs = directories.listSnapshotDirsByTag();
-        assertThat(snapshotDirs.keySet()).isEqualTo(Sets.newHashSet(SNAPSHOT1, SNAPSHOT2));
+        assertThat(snapshotDirs.keySet()).isEqualTo(Sets.newHashSet(SNAPSHOT1, SNAPSHOT2, SNAPSHOT3));
         assertThat(snapshotDirs.get(SNAPSHOT1)).allMatch(snapshotDir -> snapshotDir.equals(snapshot1.snapshotDir));
         assertThat(snapshotDirs.get(SNAPSHOT2)).allMatch(snapshotDir -> snapshotDir.equals(snapshot2.snapshotDir));
+        assertThat(snapshotDirs.get(SNAPSHOT3)).allMatch(snapshotDir -> snapshotDir.equals(snapshot3.snapshotDir));
 
         // Now remove snapshot1
-        FileUtils.deleteRecursive(snapshot1.snapshotDir);
+        snapshot1.snapshotDir.deleteRecursive();
 
-        // Only snapshot 2 should be present
+        // Only snapshot 2 and 3 should be present
         snapshotDirs = directories.listSnapshotDirsByTag();
-        assertThat(snapshotDirs.keySet()).isEqualTo(Sets.newHashSet(SNAPSHOT2));
+        assertThat(snapshotDirs.keySet()).isEqualTo(Sets.newHashSet(SNAPSHOT2, SNAPSHOT3));
     }
 
     @Test
@@ -353,7 +368,7 @@
 
             File manifestFile = directories.getSnapshotManifestFile(tag);
 
-            SnapshotManifest manifest = new SnapshotManifest(files, new DurationSpec.IntSecondsBound("1m"), now());
+            SnapshotManifest manifest = new SnapshotManifest(files, new DurationSpec.IntSecondsBound("1m"), now(), false);
             manifest.serializeToJsonFile(manifestFile);
 
             Set<File> dirs = new HashSet<>();
@@ -488,7 +503,7 @@
     }
 
     @Test
-    public void testTemporaryFile() throws IOException
+    public void testTemporaryFile()
     {
         for (TableMetadata cfm : CFM)
         {
@@ -552,11 +567,10 @@
             final Directories directories = new Directories(cfm, toDataDirectories(tempDataDir));
             assertEquals(cfDir(cfm), directories.getDirectoryForNewSSTables());
             final String n = Long.toString(nanoTime());
-            Callable<File> directoryGetter = new Callable<File>() {
-                public File call() throws Exception {
-                    Descriptor desc = new Descriptor(cfDir(cfm), KS, cfm.name, sstableId(1), SSTableFormat.Type.BIG);
-                    return Directories.getSnapshotDirectory(desc, n);
-                }
+            Callable<File> directoryGetter = () ->
+            {
+                Descriptor desc = new Descriptor(cfDir(cfm), KS, cfm.name, sstableId(1), SSTableFormat.Type.BIG);
+                return Directories.getSnapshotDirectory(desc, n);
             };
             List<Future<File>> invoked = Executors.newFixedThreadPool(2).invokeAll(Arrays.asList(directoryGetter, directoryGetter));
             for(Future<File> fut:invoked) {
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailAlterTableTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailAlterTableTest.java
new file mode 100644
index 0000000..1aa7095
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailAlterTableTest.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+
+/**
+ * Tests the guardrail for disabling user access to the ALTER TABLE statement, {@link Guardrails#alterTableEnabled}.
+ *
+ * NOTE: This test class depends on {@link #currentTable()} method for setup, cleanup, and execution of tests. You'll
+ * need to refactor this if you add tests that make changes to the current table as the test teardown will no longer match
+ * setup.
+ */
+public class GuardrailAlterTableTest extends GuardrailTester
+{
+    public GuardrailAlterTableTest()
+    {
+        super(Guardrails.alterTableEnabled);
+    }
+
+    @Before
+    public void setupTest() throws Throwable
+    {
+        createTable("CREATE TABLE IF NOT EXISTS %s (k INT, c INT, v TEXT, PRIMARY KEY(k, c))");
+    }
+
+    @After
+    public void afterTest() throws Throwable
+    {
+        dropTable("DROP TABLE %s");
+        setGuardrail(true);
+    }
+
+    private void setGuardrail(boolean alterTableEnabled)
+    {
+        guardrails().setAlterTableEnabled(alterTableEnabled);
+    }
+
+    /**
+     * Confirm that ALTER TABLE queries either work (guardrail enabled) or fail (guardrail disabled) appropriately
+     * @throws Throwable
+     */
+    @Test
+    public void testGuardrailEnabledAndDisabled() throws Throwable
+    {
+        setGuardrail(false);
+        assertFails("ALTER TABLE %s ADD test_one text;", "changing columns");
+
+        setGuardrail(true);
+        assertValid("ALTER TABLE %s ADD test_two text;");
+
+        setGuardrail(false);
+        assertFails("ALTER TABLE %s ADD test_three text;", "changing columns");
+    }
+
+    /**
+     * Confirm the guardrail appropriately catches the ALTER DROP case on a column
+     * @throws Throwable
+     */
+    @Test
+    public void testAppliesToAlterDropColumn() throws Throwable
+    {
+        setGuardrail(true);
+        assertValid("ALTER TABLE %s ADD test_one text;");
+
+        setGuardrail(false);
+        assertFails("ALTER TABLE %s DROP test_one", "changing columns");
+
+        setGuardrail(true);
+        assertValid("ALTER TABLE %s DROP test_one");
+    }
+
+    /**
+     * Confirm the guardrail appropriately catches the ALTER RENAME case on a column
+     * @throws Throwable
+     */
+    @Test
+    public void testAppliesToAlterRenameColumn() throws Throwable
+    {
+        setGuardrail(false);
+        assertFails("ALTER TABLE %s RENAME c TO renamed_c", "changing columns");
+
+        setGuardrail(true);
+        assertValid("ALTER TABLE %s RENAME c TO renamed_c");
+    }
+
+    /**
+     * Confirm we can always alter properties via the options map regardless of guardrail state
+     * @throws Throwable
+     */
+    @Test
+    public void testAlterViaMapAlwaysWorks() throws Throwable
+    {
+        setGuardrail(false);
+        assertValid("ALTER TABLE %s WITH compression = { 'class' : 'SnappyCompressor', 'chunk_length_in_kb' : 32 };");
+
+        setGuardrail(true);
+        assertValid("ALTER TABLE %s WITH compression = { 'class' : 'SnappyCompressor', 'chunk_length_in_kb' : 32 };");
+    }
+
+    /**
+     * Confirm the other form of ALTER TABLE property map changing always works regardless of guardrail state
+     * @throws Throwable
+     */
+    @Test
+    public void testAlterOptionsAlwaysWorks() throws Throwable
+    {
+        setGuardrail(true);
+        assertValid("ALTER TABLE %s WITH GC_GRACE_SECONDS = 456; ");
+
+        setGuardrail(false);
+        assertValid("ALTER TABLE %s WITH GC_GRACE_SECONDS = 123; ");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailDropKeyspaceTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailDropKeyspaceTest.java
new file mode 100644
index 0000000..de44725
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailDropKeyspaceTest.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.After;
+import org.junit.Test;
+
+public class GuardrailDropKeyspaceTest extends GuardrailTester
+{
+    private String keyspaceQuery = "CREATE KEYSPACE dkdt WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1}";
+
+    private void setGuardrail(boolean enabled)
+    {
+        Guardrails.instance.setDropKeyspaceEnabled(enabled);
+    }
+
+    public GuardrailDropKeyspaceTest()
+    {
+        super(Guardrails.dropKeyspaceEnabled);
+    }
+
+    @After
+    public void afterTest() throws Throwable
+    {
+        setGuardrail(true);
+        execute("DROP KEYSPACE IF EXISTS dkdt");
+    }
+
+    @Test
+    public void testCanDropWhileFeatureEnabled() throws Throwable
+    {
+        setGuardrail(true);
+        createKeyspace(keyspaceQuery);
+        execute("DROP KEYSPACE dkdt");
+    }
+
+    @Test
+    public void testCannotDropWhileFeatureDisabled() throws Throwable
+    {
+        setGuardrail(false);
+        createKeyspace(keyspaceQuery);
+        assertFails("DROP KEYSPACE dkdt", "DROP KEYSPACE functionality is not allowed");
+    }
+
+    @Test
+    public void testIfExistsDoesNotBypassCheck() throws Throwable
+    {
+        setGuardrail(false);
+        createKeyspace(keyspaceQuery);
+        assertFails("DROP KEYSPACE IF EXISTS dkdt", "DROP KEYSPACE functionality is not allowed");
+    }
+
+    @Test
+    public void testToggle() throws Throwable
+    {
+        setGuardrail(false);
+        createKeyspace(keyspaceQuery);
+        assertFails("DROP KEYSPACE IF EXISTS dkdt", "DROP KEYSPACE functionality is not allowed");
+
+        setGuardrail(true);
+        execute("DROP KEYSPACE dkdt");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailMaximumReplicationFactorTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailMaximumReplicationFactorTest.java
new file mode 100644
index 0000000..865ac23
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailMaximumReplicationFactorTest.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.UUID;
+import java.util.stream.Collectors;
+
+import org.junit.After;
+import org.junit.Test;
+
+import org.apache.cassandra.ServerTestUtils;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.Keyspace;
+import org.apache.cassandra.locator.AbstractEndpointSnitch;
+import org.apache.cassandra.locator.IEndpointSnitch;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.locator.Replica;
+import org.apache.cassandra.service.ClientWarn;
+import org.apache.cassandra.service.StorageService;
+import org.assertj.core.api.Assertions;
+
+import static java.lang.String.format;
+
+public class GuardrailMaximumReplicationFactorTest extends ThresholdTester
+{
+    private static final int MAXIMUM_REPLICATION_FACTOR_WARN_THRESHOLD = 2;
+    private static final int MAXIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD = 4;
+    private static final int DISABLED_GUARDRAIL = -1;
+
+    public GuardrailMaximumReplicationFactorTest()
+    {
+        super(MAXIMUM_REPLICATION_FACTOR_WARN_THRESHOLD,
+              MAXIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD,
+              Guardrails.maximumReplicationFactor,
+              Guardrails::setMaximumReplicationFactorThreshold,
+              Guardrails::getMaximumReplicationFactorWarnThreshold,
+              Guardrails::getMaximumReplicationFactorFailThreshold);
+    }
+
+    @After
+    public void cleanupTest() throws Throwable
+    {
+        execute("DROP KEYSPACE IF EXISTS ks");
+        DatabaseDescriptor.setDefaultKeyspaceRF(1);
+    }
+
+    @Override
+    protected long currentValue()
+    {
+        return Long.parseLong((Keyspace.open("ks").getReplicationStrategy()).configOptions.get("datacenter1"));
+    }
+
+    @Override
+    protected List<String> getWarnings()
+    {
+        List<String> warnings = ClientWarn.instance.getWarnings();
+
+        // filtering out non-guardrails produced warnings
+        return warnings == null
+               ? Collections.emptyList()
+               : warnings.stream()
+                         .filter(w -> !w.contains("keyspace ks is higher than the number of nodes 1 for datacenter1") &&
+                                      !w.contains("When increasing replication factor you need to run a full (-full) repair to distribute the data") &&
+                                      !w.contains("keyspace ks is higher than the number of nodes") &&
+                                      !w.contains("Your replication factor 3 for keyspace ks is higher than the number of nodes 2 for datacenter datacenter2"))
+                         .collect(Collectors.toList());
+    }
+
+    @Test
+    public void testMaxKeyspaceRFDisabled() throws Throwable
+    {
+        guardrails().setMaximumReplicationFactorThreshold(DISABLED_GUARDRAIL, DISABLED_GUARDRAIL);
+        assertMaxThresholdValid("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 6}");
+        assertMaxThresholdValid("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 10}");
+    }
+
+    @Test
+    public void testSimpleStrategyCreate() throws Throwable
+    {
+        guardrails().setMaximumReplicationFactorThreshold(MAXIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MAXIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': 3}", 3);
+        execute("DROP KEYSPACE IF EXISTS ks");
+        assertFails("CREATE KEYSPACE ks WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': 5}", 5);
+    }
+
+    @Test
+    public void testSimpleStrategyAlter() throws Throwable
+    {
+        guardrails().setMaximumReplicationFactorThreshold(MAXIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MAXIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        execute("CREATE KEYSPACE ks WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': 2}");
+        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': 3}", 3);
+        assertFails("ALTER KEYSPACE ks WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': 5}", 5);
+    }
+
+    @Test
+    public void testMultipleDatacenter() throws Throwable
+    {
+        IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
+        DatabaseDescriptor.setEndpointSnitch(new AbstractEndpointSnitch()
+        {
+            public static final String RACK1 = ServerTestUtils.RACK1;
+
+            @Override
+            public String getRack(InetAddressAndPort endpoint) { return RACK1; }
+
+            @Override
+            public String getDatacenter(InetAddressAndPort endpoint) { return "datacenter2"; }
+
+            @Override
+            public int compareEndpoints(InetAddressAndPort target, Replica a1, Replica a2) { return 0; }
+        });
+
+        List<String> twoWarnings = Arrays.asList(format("The keyspace ks has a replication factor of 3, above the warning threshold of %s.", MAXIMUM_REPLICATION_FACTOR_WARN_THRESHOLD),
+                                                 format("The keyspace ks has a replication factor of 3, above the warning threshold of %s.", MAXIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
+
+        StorageService.instance.getTokenMetadata().updateHostId(UUID.randomUUID(), InetAddressAndPort.getByName("127.0.0.255"));
+        guardrails().setMaximumReplicationFactorThreshold(MAXIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MAXIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        assertValid("CREATE KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 2, 'datacenter2' : 2}");
+        execute("DROP KEYSPACE IF EXISTS ks");
+        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 2, 'datacenter2' : 3}", 3);
+        execute("DROP KEYSPACE IF EXISTS ks");
+        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 3, 'datacenter2' : 3}", twoWarnings);
+        execute("DROP KEYSPACE IF EXISTS ks");
+        assertFails("CREATE KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 2, 'datacenter2' : 5}", 5);
+        execute("DROP KEYSPACE IF EXISTS ks");
+        assertFails("CREATE KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 5, 'datacenter2' : 5}", 5);
+        execute("DROP KEYSPACE IF EXISTS ks");
+
+        execute("CREATE KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 1, 'datacenter2' : 1}");
+        assertValid("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 2, 'datacenter2' : 2}");
+        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 2, 'datacenter2' : 3}", 3);
+        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 3, 'datacenter2' : 3}", twoWarnings);
+        assertFails("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 2, 'datacenter2' : 5}", 5);
+        assertFails("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 5, 'datacenter2' : 5}", 5);
+
+        DatabaseDescriptor.setEndpointSnitch(snitch);
+    }
+
+    @Test
+    public void testMaxKeyspaceRFOnlyWarnBelow() throws Throwable
+    {
+        guardrails().setMaximumReplicationFactorThreshold(MAXIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, DISABLED_GUARDRAIL);
+        assertMaxThresholdValid("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 2}");
+        assertMaxThresholdValid("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}");
+    }
+
+    @Test
+    public void testMaxKeyspaceRFOnlyWarnAbove() throws Throwable
+    {
+        guardrails().setMaximumReplicationFactorThreshold(MAXIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, DISABLED_GUARDRAIL);
+        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 3}", 3);
+        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 4}", 4);
+    }
+
+    @Test
+    public void testMaxKeyspaceRFOnlyFailBelow() throws Throwable
+    {
+        guardrails().setMaximumReplicationFactorThreshold(DISABLED_GUARDRAIL, MAXIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        assertMaxThresholdValid("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 2}");
+        assertMaxThresholdValid("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 3}");
+    }
+
+    @Test
+    public void testMaxKeyspaceRFOnlyFailAbove() throws Throwable
+    {
+        guardrails().setMaximumReplicationFactorThreshold(DISABLED_GUARDRAIL, MAXIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        assertFails("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 5}", 5);
+    }
+
+    @Test
+    public void testMaxKeyspaceRFOnlyFailAboveAlter() throws Throwable
+    {
+        guardrails().setMaximumReplicationFactorThreshold(DISABLED_GUARDRAIL, MAXIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        execute("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 3}");
+        assertFails("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 6}", 6);
+    }
+
+    @Test
+    public void testMaxKeyspaceRFWarnBelow() throws Throwable
+    {
+        guardrails().setMaximumReplicationFactorThreshold(MAXIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MAXIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        assertMaxThresholdValid("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}");
+        assertMaxThresholdValid("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 2}");
+    }
+
+    @Test
+    public void testMaxKeyspaceRFWarnFailBetween() throws Throwable
+    {
+        guardrails().setMaximumReplicationFactorThreshold(MAXIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MAXIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 3}", 3);
+        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 4}", 4);
+    }
+
+    @Test
+    public void testMaxKeyspaceRFFailAbove() throws Throwable
+    {
+        guardrails().setMaximumReplicationFactorThreshold(MAXIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MAXIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        assertFails("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 5}", 5);
+    }
+
+    @Test
+    public void testMaxKeyspaceRFFailAboveAlter() throws Throwable
+    {
+        guardrails().setMaximumReplicationFactorThreshold(MAXIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MAXIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        execute("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 4}");
+        assertFails("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 5}", 5);
+    }
+
+    @Test
+    public void testMaxRFLesserThanDefaultRF()
+    {
+        DatabaseDescriptor.setDefaultKeyspaceRF(3);
+        Assertions.assertThatThrownBy(() -> guardrails().setMaximumReplicationFactorThreshold(1, 2))
+                  .isInstanceOf(IllegalArgumentException.class)
+                  .hasMessageContaining("maximum_replication_factor_fail_threshold to be set (2) cannot be lesser than default_keyspace_rf (3)");
+
+        DatabaseDescriptor.setDefaultKeyspaceRF(1);
+        guardrails().setMaximumReplicationFactorThreshold(1, 2);
+        Assertions.assertThatThrownBy(() -> DatabaseDescriptor.setDefaultKeyspaceRF(3))
+                  .isInstanceOf(IllegalArgumentException.class)
+                  .hasMessageContaining("default_keyspace_rf to be set (3) cannot be greater than maximum_replication_factor_fail_threshold (2)");
+    }
+
+    private void assertWarns(String query, int rf) throws Throwable
+    {
+        assertWarns(query, format("The keyspace ks has a replication factor of %d, above the warning threshold of %s.",
+                                  rf, MAXIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
+    }
+
+    private void assertFails(String query, int rf) throws Throwable
+    {
+        assertFails(query, format("The keyspace ks has a replication factor of %d, above the failure threshold of %s.",
+                                  rf, MAXIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailMinimumReplicationFactorTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailMinimumReplicationFactorTest.java
index 9c984e9..8817f9a 100644
--- a/test/unit/org/apache/cassandra/db/guardrails/GuardrailMinimumReplicationFactorTest.java
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailMinimumReplicationFactorTest.java
@@ -31,7 +31,6 @@
 import org.apache.cassandra.ServerTestUtils;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.locator.AbstractEndpointSnitch;
 import org.apache.cassandra.locator.IEndpointSnitch;
 import org.apache.cassandra.locator.InetAddressAndPort;
@@ -90,6 +89,7 @@
     {
         List<String> warnings = ClientWarn.instance.getWarnings();
 
+        // filtering out non-guardrails produced warnings
         return warnings == null
                ? Collections.emptyList()
                : warnings.stream()
@@ -133,13 +133,21 @@
     }
 
     @Test
-    public void testSimpleStrategy() throws Throwable
+    public void testSimpleStrategyCreate() throws Throwable
     {
         guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
-        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': 3}",
-                    format("The keyspace %s has a replication factor of 3, below the warning threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
-        assertFails("ALTER KEYSPACE ks WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': 1}",
-                    format("The keyspace %s has a replication factor of 1, below the failure threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
+        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': 3}", 3);
+        execute("DROP KEYSPACE IF EXISTS ks");
+        assertFails("CREATE KEYSPACE ks WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': 1}", 1);
+    }
+
+    @Test
+    public void testSimpleStrategyAlter() throws Throwable
+    {
+        guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
+        execute("CREATE KEYSPACE ks WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': 4}");
+        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': 3}", 3);
+        assertFails("ALTER KEYSPACE ks WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': 1}", 1);
     }
 
     @Test
@@ -162,17 +170,26 @@
 
         List<String> twoWarnings = Arrays.asList(format("The keyspace %s has a replication factor of 2, below the warning threshold of %d.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD),
                                                  format("The keyspace %s has a replication factor of 2, below the warning threshold of %d.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
-        
+
         StorageService.instance.getTokenMetadata().updateHostId(UUID.randomUUID(), InetAddressAndPort.getByName("127.0.0.255"));
         guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
-        assertValid("CREATE KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 4, 'datacenter2' : 4 };");
-        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 4, 'datacenter2' : 2 };",
-                    format("The keyspace %s has a replication factor of 2, below the warning threshold of %d.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
-        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 2, 'datacenter2' : 2 };", twoWarnings);
-        assertFails("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 4, 'datacenter2' : 1 };",
-                    format("The keyspace %s has a replication factor of 1, below the failure threshold of %d.", KS, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
-        assertFails("CREATE KEYSPACE ks1 WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 1, 'datacenter2' : 1 };",
-                    format("The keyspace ks1 has a replication factor of 1, below the failure threshold of %d.", MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
+        assertValid("CREATE KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 4, 'datacenter2' : 4 }");
+        execute("DROP KEYSPACE IF EXISTS ks");
+        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 4, 'datacenter2' : 2 }", 2);
+        execute("DROP KEYSPACE IF EXISTS ks");
+        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 2, 'datacenter2' : 2 }", twoWarnings);
+        execute("DROP KEYSPACE IF EXISTS ks");
+        assertFails("CREATE KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 4, 'datacenter2' : 1 }", 1);
+        execute("DROP KEYSPACE IF EXISTS ks");
+        assertFails("CREATE KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 1, 'datacenter2' : 1 }", 1);
+        execute("DROP KEYSPACE IF EXISTS ks");
+
+        execute("CREATE KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 5, 'datacenter2' : 5}");
+        assertValid("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 4, 'datacenter2' : 4 }");
+        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 4, 'datacenter2' : 2 }", 2);
+        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 2, 'datacenter2' : 2 }", twoWarnings);
+        assertFails("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 4, 'datacenter2' : 1 }", 1);
+        assertFails("ALTER KEYSPACE ks WITH replication = { 'class' : 'NetworkTopologyStrategy', 'datacenter1': 1, 'datacenter2' : 1 }", 1);
 
         DatabaseDescriptor.setEndpointSnitch(snitch);
         execute("DROP KEYSPACE IF EXISTS ks1");
@@ -190,10 +207,8 @@
     public void testMinKeyspaceRFOnlyWarnBelow() throws Throwable
     {
         guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, DISABLED_GUARDRAIL);
-        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 3}",
-                    format("The keyspace %s has a replication factor of 3, below the warning threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
-        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 2}",
-                    format("The keyspace %s has a replication factor of 2, below the warning threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
+        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 3}", 3);
+        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 2}", 2);
     }
 
     @Test
@@ -208,8 +223,7 @@
     public void testMinKeyspaceRFOnlyFailBelow() throws Throwable
     {
         guardrails().setMinimumReplicationFactorThreshold(DISABLED_GUARDRAIL, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
-        assertFails("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}",
-                    format("The keyspace %s has a replication factor of 1, below the failure threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
+        assertFails("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}", 1);
     }
 
     @Test
@@ -217,8 +231,7 @@
     {
         guardrails().setMinimumReplicationFactorThreshold(DISABLED_GUARDRAIL, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
         execute("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 3}");
-        assertFails("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}",
-                    format("The keyspace %s has a replication factor of 1, below the failure threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
+        assertFails("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}", 1);
     }
 
     @Test
@@ -233,18 +246,15 @@
     public void testMinKeyspaceRFWarnFailBetween() throws Throwable
     {
         guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
-        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 3}",
-                    format("The keyspace %s has a replication factor of 3, below the warning threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
-        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 2}",
-                    format("The keyspace %s has a replication factor of 2, below the warning threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
+        assertWarns("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 3}", 3);
+        assertWarns("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 2}", 2);
     }
 
     @Test
     public void testMinKeyspaceRFFailBelow() throws Throwable
     {
         guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
-        assertFails("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}",
-                    format("The keyspace %s has a replication factor of 1, below the failure threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
+        assertFails("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}", 1);
     }
 
     @Test
@@ -252,26 +262,33 @@
     {
         guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
         execute("CREATE KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 4}");
-        assertFails("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}",
-                    format("The keyspace %s has a replication factor of 1, below the failure threshold of %s.", KS, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
+        assertFails("ALTER KEYSPACE ks WITH replication = { 'class': 'NetworkTopologyStrategy', 'datacenter1': 1}", 1);
     }
 
     @Test
     public void testMinRFGreaterThanDefaultRF()
     {
-        try
-        {
-            DatabaseDescriptor.setDefaultKeyspaceRF(1);
-            guardrails().setMinimumReplicationFactorThreshold(MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD);
-        }
-        catch (IllegalArgumentException e)
-        {
-            String expectedMessage = "";
+        DatabaseDescriptor.setDefaultKeyspaceRF(3);
+        Assertions.assertThatThrownBy(() -> guardrails().setMinimumReplicationFactorThreshold(5, 4))
+                  .isInstanceOf(IllegalArgumentException.class)
+                  .hasMessageContaining("minimum_replication_factor_fail_threshold to be set (4) cannot be greater than default_keyspace_rf (3)");
 
-            if(guardrails().getMinimumReplicationFactorFailThreshold() > DatabaseDescriptor.getDefaultKeyspaceRF())
-                expectedMessage = format("%s_fail_threshold to be set (%d) cannot be greater than default_keyspace_rf (%d)",
-                                         WHAT, guardrails().getMinimumReplicationFactorFailThreshold(), DatabaseDescriptor.getDefaultKeyspaceRF());
-            Assertions.assertThat(e.getMessage()).contains(expectedMessage);
-        }
+        DatabaseDescriptor.setDefaultKeyspaceRF(6);
+        guardrails().setMinimumReplicationFactorThreshold(5, 4);
+        Assertions.assertThatThrownBy(() -> DatabaseDescriptor.setDefaultKeyspaceRF(3))
+                  .isInstanceOf(IllegalArgumentException.class)
+                  .hasMessageContaining("default_keyspace_rf to be set (3) cannot be less than minimum_replication_factor_fail_threshold (4)");
+    }
+
+    private void assertWarns(String query, int rf) throws Throwable
+    {
+        assertWarns(query, format("The keyspace ks has a replication factor of %d, below the warning threshold of %s.",
+                                  rf, MINIMUM_REPLICATION_FACTOR_WARN_THRESHOLD));
+    }
+
+    private void assertFails(String query, int rf) throws Throwable
+    {
+        assertFails(query, format("The keyspace ks has a replication factor of %d, below the failure threshold of %s.",
+                                  rf, MINIMUM_REPLICATION_FACTOR_FAIL_THRESHOLD));
     }
 }
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailSimpleStrategyTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailSimpleStrategyTest.java
new file mode 100644
index 0000000..3cc6bc7
--- /dev/null
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailSimpleStrategyTest.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.db.guardrails;
+
+import org.junit.After;
+import org.junit.Test;
+
+public class GuardrailSimpleStrategyTest extends GuardrailTester
+{
+    public static String ERROR_MSG = "SimpleStrategy is not allowed";
+
+    public GuardrailSimpleStrategyTest()
+    {
+        super(Guardrails.simpleStrategyEnabled);
+    }
+
+    private void setGuardrail(boolean enabled)
+    {
+        guardrails().setSimpleStrategyEnabled(enabled);
+    }
+
+    @After
+    public void afterTest() throws Throwable
+    {
+        setGuardrail(true);
+        execute("DROP KEYSPACE IF EXISTS test_ss;");
+    }
+
+    @Test
+    public void testCanCreateWithGuardrailEnabled() throws Throwable
+    {
+        assertValid("CREATE KEYSPACE test_ss WITH replication = {'class': 'SimpleStrategy'};");
+    }
+
+    @Test
+    public void testCanAlterWithGuardrailEnabled() throws Throwable
+    {
+        execute("CREATE KEYSPACE test_ss WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1':2, 'datacenter2':0};");
+        assertValid("ALTER KEYSPACE test_ss WITH replication = {'class': 'SimpleStrategy'};");
+    }
+
+    @Test
+    public void testGuardrailBlocksCreate() throws Throwable
+    {
+        setGuardrail(false);
+        assertFails("CREATE KEYSPACE test_ss WITH replication = {'class': 'SimpleStrategy'};", ERROR_MSG);
+    }
+
+    @Test
+    public void testGuardrailBlocksAlter() throws Throwable
+    {
+        setGuardrail(false);
+        execute("CREATE KEYSPACE test_ss WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1':2, 'datacenter2':0};");
+        assertFails("ALTER KEYSPACE test_ss WITH replication = {'class': 'SimpleStrategy'};", ERROR_MSG);
+    }
+
+    @Test
+    public void testToggle() throws Throwable
+    {
+        setGuardrail(false);
+        assertFails("CREATE KEYSPACE test_ss WITH replication = {'class': 'SimpleStrategy'};", ERROR_MSG);
+
+        setGuardrail(true);
+        assertValid("CREATE KEYSPACE test_ss WITH replication = {'class': 'SimpleStrategy'};");
+        execute("ALTER KEYSPACE test_ss WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1':2, 'datacenter2':0};");
+
+        setGuardrail(false);
+        assertFails("ALTER KEYSPACE test_ss WITH replication = {'class': 'SimpleStrategy'};", ERROR_MSG);
+
+        setGuardrail(true);
+        assertValid("ALTER KEYSPACE test_ss WITH replication = {'class': 'SimpleStrategy'};");
+    }
+}
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailTester.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailTester.java
index 7c94702..5452374 100644
--- a/test/unit/org/apache/cassandra/db/guardrails/GuardrailTester.java
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailTester.java
@@ -67,6 +67,18 @@
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+/**
+ * This class provides specific utility methods for testing Guardrails that should be used instead of the provided
+ * {@link CQLTester} methods. Many of the methods in CQLTester don't respect the {@link ClientState} provided for a query
+ * and instead use {@link ClientState#forInternalCalls()} which flags as an internal query and thus bypasses auth and
+ * guardrail checks.
+ *
+ * Some GuardrailTester methods and their usage is as follows:
+ *      {@link GuardrailTester#assertValid(String)} to confirm the query as structured is valid given the state of the db
+ *      {@link GuardrailTester#assertWarns(String, String)} to confirm a query succeeds but warns the text provided
+ *      {@link GuardrailTester#assertFails(String, String)} to confirm a query fails with the message provided
+ *      {@link GuardrailTester#testExcludedUsers} to confirm superusers are excluded from application of the guardrail
+ */
 public abstract class GuardrailTester extends CQLTester
 {
     // Name used when testing CREATE TABLE that should be aborted (we need to provide it as assertFails, which
@@ -318,6 +330,10 @@
         assertFails(function, true, messages, redactedMessages);
     }
 
+    /**
+     * Unlike {@link CQLTester#assertInvalidThrowMessage}, the chain of methods ending here in {@link GuardrailTester}
+     * respect the input ClientState so guardrails permissions will be correctly checked.
+     */
     protected void assertFails(CheckedFunction function, boolean thrown, List<String> messages, List<String> redactedMessages) throws Throwable
     {
         ClientWarn.instance.captureWarnings();
@@ -478,6 +494,10 @@
         return execute(state, query, options);
     }
 
+    /**
+     * Performs execution of query using the input {@link ClientState} (i.e. unlike {@link ClientState#forInternalCalls()}
+     * which may not) to ensure guardrails are approprieately applied to the query provided.
+     */
     protected ResultMessage execute(ClientState state, String query, QueryOptions options)
     {
         QueryState queryState = new QueryState(state);
diff --git a/test/unit/org/apache/cassandra/db/guardrails/GuardrailsTest.java b/test/unit/org/apache/cassandra/db/guardrails/GuardrailsTest.java
index a0a5823..5c7e724 100644
--- a/test/unit/org/apache/cassandra/db/guardrails/GuardrailsTest.java
+++ b/test/unit/org/apache/cassandra/db/guardrails/GuardrailsTest.java
@@ -67,8 +67,8 @@
         MaxThreshold guard = new MaxThreshold("x",
                                         state -> 10,
                                         state -> 100,
-                                        (isWarn, what, v, t) -> format("%s: for %s, %s > %s",
-                                                                       isWarn ? "Warning" : "Aborting", what, v, t));
+                                        (isWarn, featureName, v, t) -> format("%s: for %s, %s > %s",
+                                                                       isWarn ? "Warning" : "Aborting", featureName, v, t));
 
         assertTrue(guard.enabled(userClientState));
 
@@ -93,8 +93,8 @@
         MaxThreshold guard = new MaxThreshold("x",
                                         state -> 10,
                                         state -> DISABLED,
-                                        (isWarn, what, v, t) -> format("%s: for %s, %s > %s",
-                                                                       isWarn ? "Warning" : "Aborting", what, v, t));
+                                        (isWarn, featureName, v, t) -> format("%s: for %s, %s > %s",
+                                                                       isWarn ? "Warning" : "Aborting", featureName, v, t));
 
         assertTrue(guard.enabled(userClientState));
 
@@ -111,8 +111,8 @@
         MaxThreshold guard = new MaxThreshold("x",
                                         state -> DISABLED,
                                         state -> 10,
-                                        (isWarn, what, v, t) -> format("%s: for %s, %s > %s",
-                                                                       isWarn ? "Warning" : "Aborting", what, v, t));
+                                        (isWarn, featureName, v, t) -> format("%s: for %s, %s > %s",
+                                                                       isWarn ? "Warning" : "Aborting", featureName, v, t));
 
         assertTrue(guard.enabled(userClientState));
 
@@ -129,8 +129,8 @@
         MaxThreshold guard = new MaxThreshold("x",
                                         state -> 10,
                                         state -> 100,
-                                        (isWarn, what, v, t) -> format("%s: for %s, %s > %s",
-                                                                       isWarn ? "Warning" : "Failure", what, v, t));
+                                        (isWarn, featureName, v, t) -> format("%s: for %s, %s > %s",
+                                                                       isWarn ? "Warning" : "Failure", featureName, v, t));
 
         // value under both thresholds
         assertValid(() -> guard.guard(5, "x", false, null));
@@ -251,25 +251,25 @@
     }
 
     @Test
-    public void testDisableFlag() throws Throwable
+    public void testEnableFlag() throws Throwable
     {
-        assertFails(() -> new DisableFlag("x", state -> true, "X").ensureEnabled(userClientState), "X is not allowed");
-        assertValid(() -> new DisableFlag("x", state -> false, "X").ensureEnabled(userClientState));
+        assertFails(() -> new EnableFlag("x", state -> false, "X").ensureEnabled(userClientState), "X is not allowed");
+        assertValid(() -> new EnableFlag("x", state -> true, "X").ensureEnabled(userClientState));
 
-        assertFails(() -> new DisableFlag("x", state -> true, "X").ensureEnabled("Y", userClientState), "Y is not allowed");
-        assertValid(() -> new DisableFlag("x", state -> false, "X").ensureEnabled("Y", userClientState));
+        assertFails(() -> new EnableFlag("x", state -> false, "X").ensureEnabled("Y", userClientState), "Y is not allowed");
+        assertValid(() -> new EnableFlag("x", state -> true, "X").ensureEnabled("Y", userClientState));
     }
 
     @Test
-    public void testDisableFlagUsers() throws Throwable
+    public void testEnableFlagUsers() throws Throwable
     {
-        DisableFlag enabled = new DisableFlag("x", state -> false, "X");
+        EnableFlag enabled = new EnableFlag("x", state -> true, "X");
         assertValid(() -> enabled.ensureEnabled(null));
         assertValid(() -> enabled.ensureEnabled(userClientState));
         assertValid(() -> enabled.ensureEnabled(systemClientState));
         assertValid(() -> enabled.ensureEnabled(superClientState));
 
-        DisableFlag disabled = new DisableFlag("x", state -> true, "X");
+        EnableFlag disabled = new EnableFlag("x", state -> false, "X");
         assertFails(() -> disabled.ensureEnabled(userClientState), "X is not allowed");
         assertValid(() -> disabled.ensureEnabled(systemClientState));
         assertValid(() -> disabled.ensureEnabled(superClientState));
diff --git a/test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java b/test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java
index d22a8ac..69c1eb5 100644
--- a/test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/DynamicCompositeTypeTest.java
@@ -47,7 +47,7 @@
 {
     private static final String KEYSPACE1 = "DynamicCompositeType";
     private static final String CF_STANDARDDYNCOMPOSITE = "StandardDynamicComposite";
-    private static Map<Byte, AbstractType<?>> aliases = new HashMap<>();
+    public static Map<Byte, AbstractType<?>> aliases = new HashMap<>();
 
     private static final DynamicCompositeType comparator;
     static
@@ -60,7 +60,7 @@
     }
 
     private static final int UUID_COUNT = 3;
-    private static final UUID[] uuids = new UUID[UUID_COUNT];
+    public static final UUID[] uuids = new UUID[UUID_COUNT];
     static
     {
         for (int i = 0; i < UUID_COUNT; ++i)
@@ -320,13 +320,12 @@
         assert !TypeParser.parse("DynamicCompositeType(a => BytesType)").isCompatibleWith(TypeParser.parse("DynamicCompositeType(a => BytesType, b => AsciiType)"));
     }
 
-    private ByteBuffer createDynamicCompositeKey(String s, UUID uuid, int i, boolean lastIsOne)
+    private static ByteBuffer createDynamicCompositeKey(String s, UUID uuid, int i, boolean lastIsOne)
     {
         return createDynamicCompositeKey(s, uuid, i, lastIsOne, false);
     }
 
-    private ByteBuffer createDynamicCompositeKey(String s, UUID uuid, int i, boolean lastIsOne,
-            final boolean reversed)
+    public static ByteBuffer createDynamicCompositeKey(String s, UUID uuid, int i, boolean lastIsOne, boolean reversed)
     {
         String intType = (reversed ? "ReversedType(IntegerType)" : "IntegerType");
         ByteBuffer bytes = ByteBufferUtil.bytes(s);
diff --git a/test/unit/org/apache/cassandra/db/marshal/TypeValidationTest.java b/test/unit/org/apache/cassandra/db/marshal/TypeValidationTest.java
index 4d25a1f..474b867 100644
--- a/test/unit/org/apache/cassandra/db/marshal/TypeValidationTest.java
+++ b/test/unit/org/apache/cassandra/db/marshal/TypeValidationTest.java
@@ -204,7 +204,7 @@
         qt().forAll(tupleWithValueGen(baseGen)).checkAssert(pair -> {
             TupleType tuple = pair.left;
             ByteBuffer value = pair.right;
-            Assertions.assertThat(TupleType.buildValue(tuple.split(value)))
+            Assertions.assertThat(TupleType.buildValue(tuple.split(ByteBufferAccessor.instance, value)))
                       .as("TupleType.buildValue(split(value)) == value")
                       .isEqualTo(value);
         });
diff --git a/test/unit/org/apache/cassandra/dht/KeyCollisionTest.java b/test/unit/org/apache/cassandra/dht/KeyCollisionTest.java
index 5b5365d..c24690b 100644
--- a/test/unit/org/apache/cassandra/dht/KeyCollisionTest.java
+++ b/test/unit/org/apache/cassandra/dht/KeyCollisionTest.java
@@ -27,6 +27,7 @@
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.marshal.IntegerType;
 import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
@@ -36,6 +37,8 @@
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 import org.apache.cassandra.utils.FBUtilities;
 
 /**
@@ -124,5 +127,11 @@
         {
             return 0;
         }
+
+        @Override
+        public ByteSource asComparableBytes(ByteComparable.Version version)
+        {
+            return IntegerType.instance.asComparableBytes(IntegerType.instance.decompose(token), version);
+        }
     }
 }
diff --git a/test/unit/org/apache/cassandra/dht/LengthPartitioner.java b/test/unit/org/apache/cassandra/dht/LengthPartitioner.java
index c4e5db8..ca6504c 100644
--- a/test/unit/org/apache/cassandra/dht/LengthPartitioner.java
+++ b/test/unit/org/apache/cassandra/dht/LengthPartitioner.java
@@ -34,6 +34,8 @@
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.Pair;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable;
+import org.apache.cassandra.utils.bytecomparable.ByteSource;
 
 public class LengthPartitioner implements IPartitioner
 {
@@ -95,6 +97,11 @@
             return new BigIntegerToken(new BigInteger(ByteBufferUtil.getArray(bytes)));
         }
 
+        public Token fromComparableBytes(ByteSource.Peekable comparableBytes, ByteComparable.Version version)
+        {
+            return fromByteArray(IntegerType.instance.fromComparableBytes(comparableBytes, version));
+        }
+
         public String toString(Token token)
         {
             BigIntegerToken bigIntegerToken = (BigIntegerToken) token;
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
index c941a81..0af6d24 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
@@ -24,6 +24,7 @@
 
 import com.google.common.io.Files;
 
+import org.apache.cassandra.db.rows.Row;
 import org.apache.cassandra.io.util.File;
 import org.junit.After;
 import org.junit.Before;
@@ -40,7 +41,6 @@
 import org.apache.cassandra.db.partitions.*;
 import org.apache.cassandra.db.marshal.AsciiType;
 import org.apache.cassandra.io.FSWriteError;
-import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.streaming.StreamEvent;
@@ -93,16 +93,33 @@
     @After
     public void cleanup()
     {
-        try {
-            FileUtils.deleteRecursive(tmpdir);
-        } catch (FSWriteError e) {
+        try
+        {
+            tmpdir.deleteRecursive();
+        }
+        catch (FSWriteError e)
+        {
             /*
               We force a GC here to force buffer deallocation, and then try deleting the directory again.
               For more information, see: http://bugs.java.com/bugdatabase/view_bug.do?bug_id=4715154
               If this is not the problem, the exception will be rethrown anyway.
              */
             System.gc();
-            FileUtils.deleteRecursive(tmpdir);
+            tmpdir.deleteRecursive();
+        }
+
+        try
+        {
+            for (String[] keyspaceTable : new String[][] { {KEYSPACE1, CF_STANDARD1},
+                                                           {KEYSPACE1, CF_STANDARD2},
+                                                           {KEYSPACE1, CF_BACKUPS},
+                                                           {KEYSPACE2, CF_STANDARD1},
+                                                           {KEYSPACE2, CF_STANDARD2}})
+            StorageService.instance.truncate(keyspaceTable[0], keyspaceTable[1]);
+        }
+        catch (Exception ex)
+        {
+            throw new RuntimeException("Unable to truncate table!", ex);
         }
     }
 
@@ -150,9 +167,11 @@
         assertEquals(1, partitions.size());
         assertEquals("key1", AsciiType.instance.getString(partitions.get(0).partitionKey().getKey()));
         assert metadata != null;
-        assertEquals(ByteBufferUtil.bytes("100"), partitions.get(0).getRow(Clustering.make(ByteBufferUtil.bytes("col1")))
-                                                            .getCell(metadata.getColumn(ByteBufferUtil.bytes("val")))
-                                                            .buffer());
+
+        Row row = partitions.get(0).getRow(Clustering.make(ByteBufferUtil.bytes("col1")));
+        assert row != null;
+
+        assertEquals(ByteBufferUtil.bytes("100"), row.getCell(metadata.getColumn(ByteBufferUtil.bytes("val"))).buffer());
 
         // The stream future is signalled when the work is complete but before releasing references. Wait for release
         // before cleanup (CASSANDRA-10118).
@@ -168,7 +187,7 @@
                                                   .inDirectory(dataDir)
                                                   .forTable(String.format(schema, KEYSPACE1, CF_STANDARD2))
                                                   .using(String.format(query, KEYSPACE1, CF_STANDARD2))
-                                                  .withBufferSizeInMB(1)
+                                                  .withBufferSizeInMiB(1)
                                                   .build();
 
         int NB_PARTITIONS = 5000; // Enough to write >1MiB and get at least one completed sstable before we've closed the writer
@@ -209,10 +228,9 @@
     }
 
     @Test
-    public void testLoadingSSTableToDifferentKeyspace() throws Exception
+    public void testLoadingSSTableToDifferentKeyspaceAndTable() throws Exception
     {
-        File dataDir = new File(tmpdir.absolutePath() + File.pathSeparator() + KEYSPACE1 + File.pathSeparator() + CF_STANDARD1);
-        assert dataDir.tryCreateDirectories();
+        File dataDir = dataDir(CF_STANDARD1);
         TableMetadata metadata = Schema.instance.getTableMetadata(KEYSPACE1, CF_STANDARD1);
 
         String schema = "CREATE TABLE %s.%s (key ascii, name ascii, val ascii, val1 ascii, PRIMARY KEY (key, name))";
@@ -230,25 +248,31 @@
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1);
         Util.flush(cfs); // wait for sstables to be on disk else we won't be able to stream them
 
-        final CountDownLatch latch = new CountDownLatch(1);
-        SSTableLoader loader = new SSTableLoader(dataDir, new TestClient(), new OutputHandler.SystemOutput(false, false), 1, KEYSPACE2);
-        loader.stream(Collections.emptySet(), completionStreamListener(latch)).get();
+        for (String table : new String[] { CF_STANDARD2, null })
+        {
+            final CountDownLatch latch = new CountDownLatch(1);
+            SSTableLoader loader = new SSTableLoader(dataDir, new TestClient(), new OutputHandler.SystemOutput(false, false), 1, KEYSPACE2, table);
+            loader.stream(Collections.emptySet(), completionStreamListener(latch)).get();
 
-        cfs = Keyspace.open(KEYSPACE2).getColumnFamilyStore(CF_STANDARD1);
-        Util.flush(cfs);
+            String targetTable = table == null ? CF_STANDARD1 : table;
+            cfs = Keyspace.open(KEYSPACE2).getColumnFamilyStore(targetTable);
+            Util.flush(cfs);
 
-        List<FilteredPartition> partitions = Util.getAll(Util.cmd(cfs).build());
+            List<FilteredPartition> partitions = Util.getAll(Util.cmd(cfs).build());
 
-        assertEquals(1, partitions.size());
-        assertEquals("key1", AsciiType.instance.getString(partitions.get(0).partitionKey().getKey()));
-        assert metadata != null;
-        assertEquals(ByteBufferUtil.bytes("100"), partitions.get(0).getRow(Clustering.make(ByteBufferUtil.bytes("col1")))
-                                                            .getCell(metadata.getColumn(ByteBufferUtil.bytes("val")))
-                                                            .buffer());
+            assertEquals(1, partitions.size());
+            assertEquals("key1", AsciiType.instance.getString(partitions.get(0).partitionKey().getKey()));
+            assert metadata != null;
 
-        // The stream future is signalled when the work is complete but before releasing references. Wait for release
-        // before cleanup (CASSANDRA-10118).
-        latch.await();
+            Row row = partitions.get(0).getRow(Clustering.make(ByteBufferUtil.bytes("col1")));
+            assert row != null;
+
+            assertEquals(ByteBufferUtil.bytes("100"), row.getCell(metadata.getColumn(ByteBufferUtil.bytes("val"))).buffer());
+
+            // The stream future is signalled when the work is complete but before releasing references. Wait for release
+            // before cleanup (CASSANDRA-10118).
+            latch.await();
+        }
     }
 
     @Test
@@ -278,9 +302,11 @@
         assertEquals(1, partitions.size());
         assertEquals("key", AsciiType.instance.getString(partitions.get(0).partitionKey().getKey()));
         assert metadata != null;
-        assertEquals(ByteBufferUtil.bytes("100"), partitions.get(0).getRow(Clustering.make(ByteBufferUtil.bytes("col1")))
-                                                            .getCell(metadata.getColumn(ByteBufferUtil.bytes("val")))
-                                                            .buffer());
+
+        Row row = partitions.get(0).getRow(Clustering.make(ByteBufferUtil.bytes("col1")));
+        assert row != null;
+
+        assertEquals(ByteBufferUtil.bytes("100"), row.getCell(metadata.getColumn(ByteBufferUtil.bytes("val"))).buffer());
 
         // The stream future is signalled when the work is complete but before releasing references. Wait for release
         // before cleanup (CASSANDRA-10118).
diff --git a/test/unit/org/apache/cassandra/net/MessagingServiceTest.java b/test/unit/org/apache/cassandra/net/MessagingServiceTest.java
index 349d865..32d5050 100644
--- a/test/unit/org/apache/cassandra/net/MessagingServiceTest.java
+++ b/test/unit/org/apache/cassandra/net/MessagingServiceTest.java
@@ -25,6 +25,7 @@
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.nio.channels.AsynchronousSocketChannel;
+import java.security.cert.Certificate;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashSet;
@@ -35,31 +36,35 @@
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
-import java.util.regex.*;
 import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import com.google.common.net.InetAddresses;
-
-import com.codahale.metrics.Timer;
-
-import org.apache.cassandra.auth.IInternodeAuthenticator;
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions;
-import org.apache.cassandra.db.commitlog.CommitLog;
-import org.apache.cassandra.metrics.MessagingMetrics;
-import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.locator.InetAddressAndPort;
-import org.apache.cassandra.utils.FBUtilities;
-import org.awaitility.Awaitility;
-import org.caffinitas.ohc.histo.EstimatedHistogram;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import com.codahale.metrics.Timer;
+import org.apache.cassandra.auth.IInternodeAuthenticator;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions;
+import org.apache.cassandra.db.commitlog.CommitLog;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.locator.InetAddressAndPort;
+import org.apache.cassandra.metrics.MessagingMetrics;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
+import org.awaitility.Awaitility;
+import org.caffinitas.ohc.histo.EstimatedHistogram;
+
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 public class MessagingServiceTest
 {
@@ -67,7 +72,8 @@
     public static AtomicInteger rejectedConnections = new AtomicInteger();
     public static final IInternodeAuthenticator ALLOW_NOTHING_AUTHENTICATOR = new IInternodeAuthenticator()
     {
-        public boolean authenticate(InetAddress remoteAddress, int remotePort)
+        public boolean authenticate(InetAddress remoteAddress, int remotePort,
+                                    Certificate[] certificates, InternodeConnectionDirection connectionType)
         {
             rejectedConnections.incrementAndGet();
             return false;
@@ -78,6 +84,25 @@
 
         }
     };
+
+    public static final IInternodeAuthenticator REJECT_OUTBOUND_AUTHENTICATOR = new IInternodeAuthenticator()
+    {
+        public boolean authenticate(InetAddress remoteAddress, int remotePort,
+                                    Certificate[] certificates, InternodeConnectionDirection connectionType)
+        {
+            if (connectionType == InternodeConnectionDirection.OUTBOUND)
+            {
+                rejectedConnections.incrementAndGet();
+                return false;
+            }
+            return true;
+        }
+
+        public void validateConfiguration() throws ConfigurationException
+        {
+
+        }
+    };
     private static IInternodeAuthenticator originalAuthenticator;
     private static ServerEncryptionOptions originalServerEncryptionOptions;
     private static InetAddressAndPort originalListenAddress;
@@ -228,19 +253,38 @@
     @Test
     public void testFailedOutboundInternodeAuth() throws Exception
     {
-        MessagingService ms = MessagingService.instance();
-        DatabaseDescriptor.setInternodeAuthenticator(ALLOW_NOTHING_AUTHENTICATOR);
-        InetAddressAndPort address = InetAddressAndPort.getByName("127.0.0.250");
+        // Listen on serverside for connections
+        ServerEncryptionOptions serverEncryptionOptions = new ServerEncryptionOptions()
+        .withInternodeEncryption(ServerEncryptionOptions.InternodeEncryption.none);
 
-        //Should return null
-        int rejectedBefore = rejectedConnections.get();
-        Message<?> messageOut = Message.out(Verb.ECHO_REQ, NoPayload.noPayload);
-        ms.send(messageOut, address);
-        Awaitility.await().atMost(10, TimeUnit.SECONDS).until(() -> rejectedConnections.get() > rejectedBefore);
+        DatabaseDescriptor.setInternodeAuthenticator(REJECT_OUTBOUND_AUTHENTICATOR);
+        InetAddress listenAddress = FBUtilities.getJustLocalAddress();
 
-        //Should tolerate null
-        ms.closeOutbound(address);
-        ms.send(messageOut, address);
+        InboundConnectionSettings settings = new InboundConnectionSettings().withEncryption(serverEncryptionOptions);
+        InboundSockets connections = new InboundSockets(settings);
+
+        try
+        {
+            connections.open().await();
+            Assert.assertTrue(connections.isListening());
+
+            MessagingService ms = MessagingService.instance();
+            //Should return null
+            int rejectedBefore = rejectedConnections.get();
+            Message<?> messageOut = Message.out(Verb.ECHO_REQ, NoPayload.noPayload);
+            InetAddressAndPort address = InetAddressAndPort.getByAddress(listenAddress);
+            ms.send(messageOut, address);
+            Awaitility.await().atMost(10, TimeUnit.SECONDS).until(() -> rejectedConnections.get() > rejectedBefore);
+
+            //Should tolerate null
+            ms.closeOutbound(address);
+            ms.send(messageOut, address);
+        }
+        finally
+        {
+            connections.close().await();
+            Assert.assertFalse(connections.isListening());
+        }
     }
 
     @Test
@@ -262,6 +306,11 @@
 
             int rejectedBefore = rejectedConnections.get();
             Future<Void> connectFuture = testChannel.connect(new InetSocketAddress(listenAddress, DatabaseDescriptor.getStoragePort()));
+            Awaitility.await().atMost(10, TimeUnit.SECONDS).until(connectFuture::isDone);
+
+            // Since authentication doesn't happen during connect, try writing a dummy string which triggers
+            // authentication handler.
+            testChannel.write(ByteBufferUtil.bytes("dummy string"));
             Awaitility.await().atMost(10, TimeUnit.SECONDS).until(() -> rejectedConnections.get() > rejectedBefore);
 
             connectFuture.cancel(true);
diff --git a/test/unit/org/apache/cassandra/security/DefaultSslContextFactoryTest.java b/test/unit/org/apache/cassandra/security/DefaultSslContextFactoryTest.java
index 13d1fac..3edf9c1 100644
--- a/test/unit/org/apache/cassandra/security/DefaultSslContextFactoryTest.java
+++ b/test/unit/org/apache/cassandra/security/DefaultSslContextFactoryTest.java
@@ -53,15 +53,23 @@
         config.put("keystore_password", "cassandra");
     }
 
+    private void addOutboundKeystoreOptions(Map<String, Object> config)
+    {
+        config.put("outbound_keystore", "test/conf/cassandra_ssl_test_outbound.keystore");
+        config.put("outbound_keystore_password", "cassandra");
+    }
+
     @Test
     public void getSslContextOpenSSL() throws IOException
     {
-        EncryptionOptions options = new EncryptionOptions().withTrustStore("test/conf/cassandra_ssl_test.truststore")
-                                                           .withTrustStorePassword("cassandra")
-                                                           .withKeyStore("test/conf/cassandra_ssl_test.keystore")
-                                                           .withKeyStorePassword("cassandra")
-                                                           .withRequireClientAuth(false)
-                                                           .withCipherSuites("TLS_RSA_WITH_AES_128_CBC_SHA");
+        EncryptionOptions.ServerEncryptionOptions options = new EncryptionOptions.ServerEncryptionOptions().withTrustStore("test/conf/cassandra_ssl_test.truststore")
+                                                                                                           .withTrustStorePassword("cassandra")
+                                                                                                           .withKeyStore("test/conf/cassandra_ssl_test.keystore")
+                                                                                                           .withKeyStorePassword("cassandra")
+                                                                                                           .withOutboundKeystore("test/conf/cassandra_ssl_test_outbound.keystore")
+                                                                                                           .withOutboundKeystorePassword("cassandra")
+                                                                                                           .withRequireClientAuth(false)
+                                                                                                           .withCipherSuites("TLS_RSA_WITH_AES_128_CBC_SHA");
         SslContext sslContext = SSLFactory.getOrCreateSslContext(options, true, ISslContextFactory.SocketType.CLIENT);
         Assert.assertNotNull(sslContext);
         if (OpenSsl.isAvailable())
@@ -78,7 +86,7 @@
         config.put("truststore", "/this/is/probably/not/a/file/on/your/test/machine");
 
         DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
-        defaultSslContextFactoryImpl.checkedExpiry = false;
+        defaultSslContextFactoryImpl.keystoreContext.checkedExpiry = false;
         defaultSslContextFactoryImpl.buildTrustManagerFactory();
     }
 
@@ -90,7 +98,7 @@
         config.put("truststore_password", "HomeOfBadPasswords");
 
         DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
-        defaultSslContextFactoryImpl.checkedExpiry = false;
+        defaultSslContextFactoryImpl.keystoreContext.checkedExpiry = false;
         defaultSslContextFactoryImpl.buildTrustManagerFactory();
     }
 
@@ -101,7 +109,7 @@
         config.putAll(commonConfig);
 
         DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
-        defaultSslContextFactoryImpl.checkedExpiry = false;
+        defaultSslContextFactoryImpl.keystoreContext.checkedExpiry = false;
         TrustManagerFactory trustManagerFactory = defaultSslContextFactoryImpl.buildTrustManagerFactory();
         Assert.assertNotNull(trustManagerFactory);
     }
@@ -114,7 +122,7 @@
         config.put("keystore", "/this/is/probably/not/a/file/on/your/test/machine");
 
         DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
-        defaultSslContextFactoryImpl.checkedExpiry = false;
+        defaultSslContextFactoryImpl.keystoreContext.checkedExpiry = false;
         defaultSslContextFactoryImpl.buildKeyManagerFactory();
     }
 
@@ -138,20 +146,70 @@
 
         DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
         // Make sure the exiry check didn't happen so far for the private key
-        Assert.assertFalse(defaultSslContextFactoryImpl.checkedExpiry);
+        Assert.assertFalse(defaultSslContextFactoryImpl.keystoreContext.checkedExpiry);
 
         addKeystoreOptions(config);
         DefaultSslContextFactory defaultSslContextFactoryImpl2 = new DefaultSslContextFactory(config);
         // Trigger the private key loading. That will also check for expired private key
         defaultSslContextFactoryImpl2.buildKeyManagerFactory();
         // Now we should have checked the private key's expiry
-        Assert.assertTrue(defaultSslContextFactoryImpl2.checkedExpiry);
+        Assert.assertTrue(defaultSslContextFactoryImpl2.keystoreContext.checkedExpiry);
 
         // Make sure that new factory object preforms the fresh private key expiry check
         DefaultSslContextFactory defaultSslContextFactoryImpl3 = new DefaultSslContextFactory(config);
-        Assert.assertFalse(defaultSslContextFactoryImpl3.checkedExpiry);
+        Assert.assertFalse(defaultSslContextFactoryImpl3.keystoreContext.checkedExpiry);
         defaultSslContextFactoryImpl3.buildKeyManagerFactory();
-        Assert.assertTrue(defaultSslContextFactoryImpl3.checkedExpiry);
+        Assert.assertTrue(defaultSslContextFactoryImpl3.keystoreContext.checkedExpiry);
+    }
+
+    @Test(expected = IOException.class)
+    public void buildOutboundKeyManagerFactoryWithInvalidKeystoreFile() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        config.put("outbound_keystore", "/this/is/probably/not/a/file/on/your/test/machine");
+
+        DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
+        defaultSslContextFactoryImpl.outboundKeystoreContext.checkedExpiry = false;
+        defaultSslContextFactoryImpl.buildOutboundKeyManagerFactory();
+    }
+
+    @Test(expected = IOException.class)
+    public void buildOutboundKeyManagerFactoryWithBadPassword() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+        addOutboundKeystoreOptions(config);
+        config.put("outbound_keystore_password", "HomeOfBadPasswords");
+
+        DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
+        defaultSslContextFactoryImpl.buildKeyManagerFactory();
+    }
+
+    @Test
+    public void buildOutboundKeyManagerFactoryHappyPath() throws IOException
+    {
+        Map<String, Object> config = new HashMap<>();
+        config.putAll(commonConfig);
+
+        DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
+        // Make sure the exiry check didn't happen so far for the private key
+        Assert.assertFalse(defaultSslContextFactoryImpl.outboundKeystoreContext.checkedExpiry);
+
+        addOutboundKeystoreOptions(config);
+        DefaultSslContextFactory defaultSslContextFactoryImpl2 = new DefaultSslContextFactory(config);
+        // Trigger the private key loading. That will also check for expired private key
+        defaultSslContextFactoryImpl2.buildOutboundKeyManagerFactory();
+        // Now we should have checked the private key's expiry
+        Assert.assertTrue(defaultSslContextFactoryImpl2.outboundKeystoreContext.checkedExpiry);
+        Assert.assertFalse(defaultSslContextFactoryImpl2.keystoreContext.checkedExpiry);
+
+        // Make sure that new factory object preforms the fresh private key expiry check
+        DefaultSslContextFactory defaultSslContextFactoryImpl3 = new DefaultSslContextFactory(config);
+        Assert.assertFalse(defaultSslContextFactoryImpl3.outboundKeystoreContext.checkedExpiry);
+        defaultSslContextFactoryImpl3.buildOutboundKeyManagerFactory();
+        Assert.assertTrue(defaultSslContextFactoryImpl3.outboundKeystoreContext.checkedExpiry);
+        Assert.assertFalse(defaultSslContextFactoryImpl2.keystoreContext.checkedExpiry);
     }
 
     @Test
diff --git a/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryTest.java b/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryTest.java
index 243d300..f919a19 100644
--- a/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryTest.java
+++ b/test/unit/org/apache/cassandra/security/PEMBasedSslContextFactoryTest.java
@@ -216,6 +216,27 @@
                                                            .withRequireClientAuth(false)
                                                            .withCipherSuites("TLS_RSA_WITH_AES_128_CBC_SHA")
                                                            .withSslContextFactory(sslContextFactory);
+        SslContext sslContext = SSLFactory.getOrCreateSslContext(options, true, ISslContextFactory.SocketType.SERVER);
+        Assert.assertNotNull(sslContext);
+        if (OpenSsl.isAvailable())
+            Assert.assertTrue(sslContext instanceof OpenSslContext);
+        else
+            Assert.assertTrue(sslContext instanceof SslContext);
+    }
+
+    @Test
+    public void getSslContextOpenSSLOutboundKeystore() throws IOException
+    {
+        ParameterizedClass sslContextFactory = new ParameterizedClass(PEMBasedSslContextFactory.class.getSimpleName()
+        , new HashMap<>());
+        EncryptionOptions.ServerEncryptionOptions options = new EncryptionOptions.ServerEncryptionOptions().withTrustStore("test/conf/cassandra_ssl_test.truststore.pem")
+                                                                                                           .withKeyStore("test/conf/cassandra_ssl_test.keystore.pem")
+                                                                                                           .withKeyStorePassword("cassandra")
+                                                                                                           .withOutboundKeystore("test/conf/cassandra_ssl_test.keystore.pem")
+                                                                                                           .withOutboundKeystorePassword("cassandra")
+                                                                                                           .withRequireClientAuth(false)
+                                                                                                           .withCipherSuites("TLS_RSA_WITH_AES_128_CBC_SHA")
+                                                                                                           .withSslContextFactory(sslContextFactory);
         SslContext sslContext = SSLFactory.getOrCreateSslContext(options, true, ISslContextFactory.SocketType.CLIENT);
         Assert.assertNotNull(sslContext);
         if (OpenSsl.isAvailable())
@@ -233,7 +254,7 @@
         config.put("truststore", "/this/is/probably/not/a/file/on/your/test/machine");
 
         DefaultSslContextFactory defaultSslContextFactoryImpl = new DefaultSslContextFactory(config);
-        defaultSslContextFactoryImpl.checkedExpiry = false;
+        defaultSslContextFactoryImpl.keystoreContext.checkedExpiry = false;
         defaultSslContextFactoryImpl.buildTrustManagerFactory();
     }
 
@@ -244,7 +265,7 @@
         config.putAll(commonConfig);
 
         PEMBasedSslContextFactory sslContextFactory = new PEMBasedSslContextFactory(config);
-        sslContextFactory.checkedExpiry = false;
+        sslContextFactory.keystoreContext.checkedExpiry = false;
         TrustManagerFactory trustManagerFactory = sslContextFactory.buildTrustManagerFactory();
         Assert.assertNotNull(trustManagerFactory);
     }
@@ -258,7 +279,7 @@
         addFileBaseTrustStoreOptions(config);
 
         PEMBasedSslContextFactory sslContextFactory = new PEMBasedSslContextFactory(config);
-        sslContextFactory.checkedExpiry = false;
+        sslContextFactory.keystoreContext.checkedExpiry = false;
         TrustManagerFactory trustManagerFactory = sslContextFactory.buildTrustManagerFactory();
         Assert.assertNotNull(trustManagerFactory);
     }
@@ -271,7 +292,7 @@
         config.put("keystore", "/this/is/probably/not/a/file/on/your/test/machine");
 
         PEMBasedSslContextFactory sslContextFactory = new PEMBasedSslContextFactory(config);
-        sslContextFactory.checkedExpiry = false;
+        sslContextFactory.keystoreContext.checkedExpiry = false;
         sslContextFactory.buildKeyManagerFactory();
     }
 
@@ -295,20 +316,20 @@
 
         PEMBasedSslContextFactory sslContextFactory1 = new PEMBasedSslContextFactory(config);
         // Make sure the exiry check didn't happen so far for the private key
-        Assert.assertFalse(sslContextFactory1.checkedExpiry);
+        Assert.assertFalse(sslContextFactory1.keystoreContext.checkedExpiry);
 
         addKeyStoreOptions(config);
         PEMBasedSslContextFactory sslContextFactory2 = new PEMBasedSslContextFactory(config);
         // Trigger the private key loading. That will also check for expired private key
         sslContextFactory2.buildKeyManagerFactory();
         // Now we should have checked the private key's expiry
-        Assert.assertTrue(sslContextFactory2.checkedExpiry);
+        Assert.assertTrue(sslContextFactory2.keystoreContext.checkedExpiry);
 
         // Make sure that new factory object preforms the fresh private key expiry check
         PEMBasedSslContextFactory sslContextFactory3 = new PEMBasedSslContextFactory(config);
-        Assert.assertFalse(sslContextFactory3.checkedExpiry);
+        Assert.assertFalse(sslContextFactory3.keystoreContext.checkedExpiry);
         sslContextFactory3.buildKeyManagerFactory();
-        Assert.assertTrue(sslContextFactory3.checkedExpiry);
+        Assert.assertTrue(sslContextFactory3.keystoreContext.checkedExpiry);
     }
 
     @Test(expected = IllegalArgumentException.class)
@@ -343,20 +364,20 @@
 
         PEMBasedSslContextFactory sslContextFactory1 = new PEMBasedSslContextFactory(config);
         // Make sure the expiry check didn't happen so far for the private key
-        Assert.assertFalse(sslContextFactory1.checkedExpiry);
+        Assert.assertFalse(sslContextFactory1.keystoreContext.checkedExpiry);
 
         addFileBaseKeyStoreOptions(config);
         PEMBasedSslContextFactory sslContextFactory2 = new PEMBasedSslContextFactory(config);
         // Trigger the private key loading. That will also check for expired private key
         sslContextFactory2.buildKeyManagerFactory();
         // Now we should have checked the private key's expiry
-        Assert.assertTrue(sslContextFactory2.checkedExpiry);
+        Assert.assertTrue(sslContextFactory2.keystoreContext.checkedExpiry);
 
         // Make sure that new factory object preforms the fresh private key expiry check
         PEMBasedSslContextFactory sslContextFactory3 = new PEMBasedSslContextFactory(config);
-        Assert.assertFalse(sslContextFactory3.checkedExpiry);
+        Assert.assertFalse(sslContextFactory3.keystoreContext.checkedExpiry);
         sslContextFactory3.buildKeyManagerFactory();
-        Assert.assertTrue(sslContextFactory3.checkedExpiry);
+        Assert.assertTrue(sslContextFactory3.keystoreContext.checkedExpiry);
     }
 
     @Test
diff --git a/test/unit/org/apache/cassandra/security/SSLFactoryTest.java b/test/unit/org/apache/cassandra/security/SSLFactoryTest.java
index e5aa4b1..ff3bab9 100644
--- a/test/unit/org/apache/cassandra/security/SSLFactoryTest.java
+++ b/test/unit/org/apache/cassandra/security/SSLFactoryTest.java
@@ -18,11 +18,19 @@
 */
 package org.apache.cassandra.security;
 
-import org.apache.cassandra.io.util.File;
+import java.io.FileInputStream;
 import java.io.IOException;
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.cert.Certificate;
 import java.security.cert.CertificateException;
 import java.util.HashMap;
 import java.util.Map;
+import javax.net.ssl.X509KeyManager;
 
 import org.apache.commons.io.FileUtils;
 import org.junit.Assert;
@@ -31,12 +39,19 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import io.netty.handler.ssl.OpenSslClientContext;
+import io.netty.handler.ssl.OpenSslServerContext;
+import io.netty.handler.ssl.OpenSslSessionContext;
 import io.netty.handler.ssl.SslContext;
 import io.netty.handler.ssl.util.SelfSignedCertificate;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.EncryptionOptions;
 import org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions;
 import org.apache.cassandra.config.ParameterizedClass;
+import org.apache.cassandra.io.util.File;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 
 public class SSLFactoryTest
 {
@@ -65,13 +80,17 @@
                             .withTrustStore("test/conf/cassandra_ssl_test.truststore")
                             .withTrustStorePassword("cassandra")
                             .withRequireClientAuth(false)
-                            .withCipherSuites("TLS_RSA_WITH_AES_128_CBC_SHA");
+                            .withCipherSuites("TLS_RSA_WITH_AES_128_CBC_SHA")
+                            .withSslContextFactory(new ParameterizedClass(TestFileBasedSSLContextFactory.class.getName(),
+                                                                          new HashMap<>()));
     }
 
     private ServerEncryptionOptions addKeystoreOptions(ServerEncryptionOptions options)
     {
         return options.withKeyStore("test/conf/cassandra_ssl_test.keystore")
-                      .withKeyStorePassword("cassandra");
+                      .withKeyStorePassword("cassandra")
+                      .withOutboundKeystore("test/conf/cassandra_ssl_test_outbound.keystore")
+                      .withOutboundKeystorePassword("cassandra");
     }
 
     private ServerEncryptionOptions addPEMKeystoreOptions(ServerEncryptionOptions options)
@@ -81,6 +100,8 @@
         return options.withSslContextFactory(sslContextFactoryClass)
                       .withKeyStore("test/conf/cassandra_ssl_test.keystore.pem")
                       .withKeyStorePassword("cassandra")
+                      .withOutboundKeystore("test/conf/cassandra_ssl_test.keystore.pem")
+                      .withOutboundKeystorePassword("cassandra")
                       .withTrustStore("test/conf/cassandra_ssl_test.truststore.pem");
     }
 
@@ -117,7 +138,41 @@
     }
 
     @Test
-    public void testPEMSslContextReload_HappyPath() throws IOException, InterruptedException
+    public void testServerSocketShouldUseKeystore() throws IOException, CertificateException, KeyStoreException, NoSuchAlgorithmException, NoSuchFieldException, IllegalAccessException, ClassNotFoundException, NoSuchMethodException, InvocationTargetException
+    {
+        ServerEncryptionOptions options = addKeystoreOptions(encryptionOptions)
+        .withOutboundKeystore("dummyKeystore")
+        .withOutboundKeystorePassword("dummyPassword");
+
+        // Server socket type should create a keystore with keystore & keystore password
+        final OpenSslServerContext context = (OpenSslServerContext) SSLFactory.createNettySslContext(options, true, ISslContextFactory.SocketType.SERVER);
+        assertNotNull(context);
+
+        // Verify if right certificate is loaded into SslContext
+        final Certificate loadedCertificate = getCertificateLoadedInSslContext(context.sessionContext());
+        final Certificate certificate = getCertificates("test/conf/cassandra_ssl_test.keystore", "cassandra");
+        assertEquals(loadedCertificate, certificate);
+    }
+
+    @Test
+    public void testClientSocketShouldUseOutboundKeystore() throws IOException, CertificateException, KeyStoreException, NoSuchAlgorithmException, NoSuchFieldException, ClassNotFoundException, InvocationTargetException, IllegalAccessException, NoSuchMethodException
+    {
+        ServerEncryptionOptions options = addKeystoreOptions(encryptionOptions)
+        .withKeyStore("dummyKeystore")
+        .withKeyStorePassword("dummyPassword");
+
+        // Client socket type should create a keystore with outbound Keystore & outbound password
+        final OpenSslClientContext context = (OpenSslClientContext) SSLFactory.createNettySslContext(options, true, ISslContextFactory.SocketType.CLIENT);
+        assertNotNull(context);
+
+        // Verify if right certificate is loaded into SslContext
+        final Certificate loadedCertificate = getCertificateLoadedInSslContext(context.sessionContext());
+        final Certificate certificate = getCertificates("test/conf/cassandra_ssl_test_outbound.keystore", "cassandra");
+        assertEquals(loadedCertificate, certificate);
+    }
+
+    @Test
+    public void testPEMSslContextReload_HappyPath() throws IOException
     {
         try
         {
@@ -223,8 +278,7 @@
     @Test
     public void getSslContext_ParamChanges() throws IOException
     {
-        EncryptionOptions options = addKeystoreOptions(encryptionOptions)
-                                    .withEnabled(true)
+        ServerEncryptionOptions options = addKeystoreOptions(encryptionOptions)
                                     .withCipherSuites("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256");
 
         SslContext ctx1 = SSLFactory.getOrCreateSslContext(options, true,
@@ -301,4 +355,36 @@
 
         Assert.assertNotEquals(cacheKey1, cacheKey2);
     }
+
+    public static class TestFileBasedSSLContextFactory extends FileBasedSslContextFactory {
+        public TestFileBasedSSLContextFactory(Map<String, Object> parameters)
+        {
+            super(parameters);
+        }
+    }
+
+    private static Certificate getCertificates(final String filename, final String password) throws KeyStoreException, IOException, CertificateException, NoSuchAlgorithmException
+    {
+        FileInputStream is = new FileInputStream(filename);
+        KeyStore keystore = KeyStore.getInstance(KeyStore.getDefaultType());
+        char[] passwd = password.toCharArray();
+        keystore.load(is, passwd);
+        return keystore.getCertificate("cassandra_ssl_test");
+    }
+
+    private static Certificate getCertificateLoadedInSslContext(final OpenSslSessionContext session)
+    throws ClassNotFoundException, InvocationTargetException, IllegalAccessException, NoSuchMethodException, NoSuchFieldException
+    {
+        Field providerField = OpenSslSessionContext.class.getDeclaredField("provider");
+        providerField.setAccessible(true);
+
+        Class<?> keyMaterialProvider = Class.forName("io.netty.handler.ssl.OpenSslKeyMaterialProvider");
+        Object provider = keyMaterialProvider.cast(providerField.get(session));
+
+        Method keyManager = provider.getClass().getDeclaredMethod("keyManager");
+        keyManager.setAccessible(true);
+        X509KeyManager keyManager1 = (X509KeyManager) keyManager.invoke(provider);
+        final Certificate[] certificates = keyManager1.getCertificateChain("cassandra_ssl_test");
+        return certificates[0];
+    }
 }
diff --git a/test/unit/org/apache/cassandra/service/snapshot/SnapshotLoaderTest.java b/test/unit/org/apache/cassandra/service/snapshot/SnapshotLoaderTest.java
index 21ab7d9..a4a6ad2 100644
--- a/test/unit/org/apache/cassandra/service/snapshot/SnapshotLoaderTest.java
+++ b/test/unit/org/apache/cassandra/service/snapshot/SnapshotLoaderTest.java
@@ -27,6 +27,7 @@
 import java.util.UUID;
 import java.util.concurrent.ThreadLocalRandom;
 
+import org.junit.Assert;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
@@ -123,9 +124,40 @@
                                                                  Paths.get(baseDir.toString(), DATA_DIR_3)));
         Set<TableSnapshot> snapshots = loader.loadSnapshots();
         assertThat(snapshots).hasSize(3);
-        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_1, TABLE1_NAME, TABLE1_ID, TAG1, null, null, tag1Files));
-        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_1, TABLE2_NAME, TABLE2_ID,  TAG2, null, null, tag2Files));
-        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_2, TABLE3_NAME, TABLE3_ID,  TAG3, null, null, tag3Files));
+        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_1, TABLE1_NAME, TABLE1_ID, TAG1, null, null, tag1Files, false));
+        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_1, TABLE2_NAME, TABLE2_ID,  TAG2, null, null, tag2Files, false));
+        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_2, TABLE3_NAME, TABLE3_ID,  TAG3, null, null, tag3Files, false));
+    }
+
+    @Test
+    public void testEphemeralSnapshotWithoutManifest() throws IOException
+    {
+        Set<File> tag1Files = new HashSet<>();
+
+        // Create one snapshot per table - without manifests:
+        // - ks1.t1 : tag1
+        File baseDir  = new File(tmpDir.newFolder());
+        boolean ephemeralFileCreated = false;
+        for (String dataDir : DATA_DIRS)
+        {
+            File dir = createDir(baseDir, dataDir, KEYSPACE_1, tableDirName(TABLE1_NAME, TABLE1_ID), Directories.SNAPSHOT_SUBDIR, TAG1);
+            tag1Files.add(dir);
+            if (!ephemeralFileCreated)
+            {
+                createEphemeralMarkerFile(dir);
+                ephemeralFileCreated = true;
+            }
+        }
+
+        // Verify snapshot is found correctly from data directories
+        SnapshotLoader loader = new SnapshotLoader(Arrays.asList(Paths.get(baseDir.toString(), DATA_DIR_1),
+                                                                 Paths.get(baseDir.toString(), DATA_DIR_2),
+                                                                 Paths.get(baseDir.toString(), DATA_DIR_3)));
+
+        Set<TableSnapshot> snapshots = loader.loadSnapshots();
+        assertThat(snapshots).hasSize(1);
+        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_1, TABLE1_NAME, TABLE1_ID, TAG1, null, null, tag1Files, true));
+        Assert.assertTrue(snapshots.stream().findFirst().get().isEphemeral());
     }
 
     @Test
@@ -169,9 +201,9 @@
                                                                  Paths.get(baseDir.toString(), DATA_DIR_3)));
         Set<TableSnapshot> snapshots = loader.loadSnapshots();
         assertThat(snapshots).hasSize(3);
-        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_1, TABLE1_NAME, TABLE1_ID, TAG1, tag1Ts, null, tag1Files));
-        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_1, TABLE2_NAME, TABLE2_ID,  TAG2, tag2Ts, tag2Ts.plusSeconds(tag2Ttl.toSeconds()), tag2Files));
-        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_2, TABLE3_NAME, TABLE3_ID,  TAG3, tag3Ts, null, tag3Files));
+        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_1, TABLE1_NAME, TABLE1_ID, TAG1, tag1Ts, null, tag1Files, false));
+        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_1, TABLE2_NAME, TABLE2_ID,  TAG2, tag2Ts, tag2Ts.plusSeconds(tag2Ttl.toSeconds()), tag2Files, false));
+        assertThat(snapshots).contains(new TableSnapshot(KEYSPACE_2, TABLE3_NAME, TABLE3_ID,  TAG3, tag3Ts, null, tag3Files, false));
     }
 
     @Test
@@ -208,7 +240,7 @@
 
     private void writeManifest(File snapshotDir, Instant creationTime, DurationSpec.IntSecondsBound ttl) throws IOException
     {
-        SnapshotManifest manifest = new SnapshotManifest(Lists.newArrayList("f1", "f2", "f3"), ttl, creationTime);
+        SnapshotManifest manifest = new SnapshotManifest(Lists.newArrayList("f1", "f2", "f3"), ttl, creationTime, false);
         manifest.serializeToJsonFile(getManifestFile(snapshotDir));
     }
 
@@ -219,6 +251,11 @@
         return file;
     }
 
+    private static void createEphemeralMarkerFile(File dir)
+    {
+        Assert.assertTrue(new File(dir, "ephemeral.snapshot").createFileIfNotExists());
+    }
+
     static String tableDirName(String tableName, UUID tableId)
     {
         return String.format("%s-%s", tableName, removeDashes(tableId));
diff --git a/test/unit/org/apache/cassandra/service/snapshot/SnapshotManagerTest.java b/test/unit/org/apache/cassandra/service/snapshot/SnapshotManagerTest.java
index 0c078d4..eeb3b63 100644
--- a/test/unit/org/apache/cassandra/service/snapshot/SnapshotManagerTest.java
+++ b/test/unit/org/apache/cassandra/service/snapshot/SnapshotManagerTest.java
@@ -50,7 +50,7 @@
     @ClassRule
     public static TemporaryFolder temporaryFolder = new TemporaryFolder();
 
-    private TableSnapshot generateSnapshotDetails(String tag, Instant expiration) throws Exception {
+    private TableSnapshot generateSnapshotDetails(String tag, Instant expiration, boolean ephemeral) throws Exception {
         return new TableSnapshot(
         "ks",
         "tbl",
@@ -58,15 +58,16 @@
         tag,
         Instant.EPOCH,
         expiration,
-        createFolders(temporaryFolder)
+        createFolders(temporaryFolder),
+        ephemeral
         );
     }
 
     @Test
     public void testLoadSnapshots() throws Exception {
-        TableSnapshot expired = generateSnapshotDetails("expired", Instant.EPOCH);
-        TableSnapshot nonExpired = generateSnapshotDetails("non-expired", now().plusSeconds(ONE_DAY_SECS));
-        TableSnapshot nonExpiring = generateSnapshotDetails("non-expiring", null);
+        TableSnapshot expired = generateSnapshotDetails("expired", Instant.EPOCH, false);
+        TableSnapshot nonExpired = generateSnapshotDetails("non-expired", now().plusSeconds(ONE_DAY_SECS), false);
+        TableSnapshot nonExpiring = generateSnapshotDetails("non-expiring", null, false);
         List<TableSnapshot> snapshots = Arrays.asList(expired, nonExpired, nonExpiring);
 
         // Create SnapshotManager with 3 snapshots: expired, non-expired and non-expiring
@@ -84,9 +85,9 @@
         SnapshotManager manager = new SnapshotManager(3, 3);
 
         // Add 3 snapshots: expired, non-expired and non-expiring
-        TableSnapshot expired = generateSnapshotDetails("expired", Instant.EPOCH);
-        TableSnapshot nonExpired = generateSnapshotDetails("non-expired", now().plusMillis(ONE_DAY_SECS));
-        TableSnapshot nonExpiring = generateSnapshotDetails("non-expiring", null);
+        TableSnapshot expired = generateSnapshotDetails("expired", Instant.EPOCH, false);
+        TableSnapshot nonExpired = generateSnapshotDetails("non-expired", now().plusMillis(ONE_DAY_SECS), false);
+        TableSnapshot nonExpiring = generateSnapshotDetails("non-expiring", null, false);
         manager.addSnapshot(expired);
         manager.addSnapshot(nonExpired);
         manager.addSnapshot(nonExpiring);
@@ -118,8 +119,8 @@
 
             // Add 2 expiring snapshots: one to expire in 2 seconds, another in 1 day
             int TTL_SECS = 2;
-            TableSnapshot toExpire = generateSnapshotDetails("to-expire", now().plusSeconds(TTL_SECS));
-            TableSnapshot nonExpired = generateSnapshotDetails("non-expired", now().plusMillis(ONE_DAY_SECS));
+            TableSnapshot toExpire = generateSnapshotDetails("to-expire", now().plusSeconds(TTL_SECS), false);
+            TableSnapshot nonExpired = generateSnapshotDetails("non-expired", now().plusMillis(ONE_DAY_SECS), false);
             manager.addSnapshot(toExpire);
             manager.addSnapshot(nonExpired);
 
@@ -150,7 +151,7 @@
     {
         // Given
         SnapshotManager manager = new SnapshotManager(1, 3);
-        TableSnapshot expiringSnapshot = generateSnapshotDetails("snapshot", now().plusMillis(50000));
+        TableSnapshot expiringSnapshot = generateSnapshotDetails("snapshot", now().plusMillis(50000), false);
         manager.addSnapshot(expiringSnapshot);
         assertThat(manager.getExpiringSnapshots()).contains(expiringSnapshot);
         assertThat(expiringSnapshot.exists()).isTrue();
diff --git a/test/unit/org/apache/cassandra/service/snapshot/SnapshotManifestTest.java b/test/unit/org/apache/cassandra/service/snapshot/SnapshotManifestTest.java
index d3b11c0..5eac6a2 100644
--- a/test/unit/org/apache/cassandra/service/snapshot/SnapshotManifestTest.java
+++ b/test/unit/org/apache/cassandra/service/snapshot/SnapshotManifestTest.java
@@ -108,7 +108,7 @@
 
     @Test
     public void testSerializeAndDeserialize() throws Exception {
-        SnapshotManifest manifest = new SnapshotManifest(Arrays.asList("db1", "db2", "db3"), new DurationSpec.IntSecondsBound("2m"), Instant.ofEpochMilli(currentTimeMillis()));
+        SnapshotManifest manifest = new SnapshotManifest(Arrays.asList("db1", "db2", "db3"), new DurationSpec.IntSecondsBound("2m"), Instant.ofEpochMilli(currentTimeMillis()), false);
         File manifestFile = new File(tempFolder.newFile("manifest.json"));
 
         manifest.serializeToJsonFile(manifestFile);
diff --git a/test/unit/org/apache/cassandra/service/snapshot/TableSnapshotTest.java b/test/unit/org/apache/cassandra/service/snapshot/TableSnapshotTest.java
index 4bb1756..c1614df 100644
--- a/test/unit/org/apache/cassandra/service/snapshot/TableSnapshotTest.java
+++ b/test/unit/org/apache/cassandra/service/snapshot/TableSnapshotTest.java
@@ -74,7 +74,9 @@
         "some",
         null,
         null,
-        folders);
+        folders,
+        false
+        );
 
         assertThat(snapshot.exists()).isTrue();
 
@@ -95,7 +97,9 @@
         "some",
         null,
         null,
-        folders);
+        folders,
+        false
+        );
 
         assertThat(snapshot.isExpiring()).isFalse();
         assertThat(snapshot.isExpired(now())).isFalse();
@@ -107,7 +111,9 @@
         "some",
         now(),
         null,
-        folders);
+        folders,
+        false
+        );
 
         assertThat(snapshot.isExpiring()).isFalse();
         assertThat(snapshot.isExpired(now())).isFalse();
@@ -119,7 +125,9 @@
         "some",
         now(),
         now().plusSeconds(1000),
-        folders);
+        folders,
+        false
+        );
 
         assertThat(snapshot.isExpiring()).isTrue();
         assertThat(snapshot.isExpired(now())).isFalse();
@@ -131,7 +139,8 @@
         "some",
         now(),
         now().minusSeconds(1000),
-        folders);
+        folders,
+        false);
 
         assertThat(snapshot.isExpiring()).isTrue();
         assertThat(snapshot.isExpired(now())).isTrue();
@@ -159,7 +168,8 @@
         "some",
         null,
         null,
-        folders);
+        folders,
+        false);
 
         Long res = 0L;
 
@@ -185,7 +195,9 @@
         "some",
         null,
         null,
-        folders);
+        folders,
+        false
+        );
 
         Long res = 0L;
 
@@ -214,7 +226,10 @@
         "some1",
         createdAt,
         null,
-        folders);
+        folders,
+        false
+        );
+
         assertThat(withCreatedAt.getCreatedAt()).isEqualTo(createdAt);
 
         // When createdAt is  null, it should return the snapshot folder minimum update time
@@ -225,7 +240,10 @@
         "some1",
         null,
         null,
-        folders);
+        folders,
+        false
+        );
+
         assertThat(withoutCreatedAt.getCreatedAt()).isEqualTo(Instant.ofEpochMilli(folders.stream().mapToLong(f -> f.lastModified()).min().getAsLong()));
     }
 
diff --git a/test/unit/org/apache/cassandra/transport/SerDeserTest.java b/test/unit/org/apache/cassandra/transport/SerDeserTest.java
index da76070..75523e1 100644
--- a/test/unit/org/apache/cassandra/transport/SerDeserTest.java
+++ b/test/unit/org/apache/cassandra/transport/SerDeserTest.java
@@ -238,7 +238,7 @@
 
         ByteBuffer serialized = t.bindAndGet(options);
 
-        ByteBuffer[] fields = udt.split(serialized);
+        ByteBuffer[] fields = udt.split(ByteBufferAccessor.instance, serialized);
 
         assertEquals(4, fields.length);
 
diff --git a/test/unit/org/apache/cassandra/utils/SimpleGraph.java b/test/unit/org/apache/cassandra/utils/SimpleGraph.java
new file mode 100644
index 0000000..71b1fb2
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/SimpleGraph.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.function.Consumer;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Ordering;
+
+/**
+ * A directed graph. Main usage is the {@link #findPaths(Object, Object)} method which is used to find all paths between
+ * 2 vertices.
+ */
+public class SimpleGraph<V>
+{
+    private final ImmutableMap<V, ImmutableSet<V>> edges;
+
+    private SimpleGraph(ImmutableMap<V, ImmutableSet<V>> edges)
+    {
+        if (edges == null || edges.isEmpty())
+            throw new AssertionError("Edges empty");
+        this.edges = edges;
+    }
+
+    public static <T extends Comparable<T>> NavigableSet<T> sortedVertices(SimpleGraph<T> graph)
+    {
+        return new TreeSet<>(graph.vertices());
+    }
+
+    public static <T extends Comparable<T>> T min(SimpleGraph<T> graph)
+    {
+        return Ordering.natural().min(graph.vertices());
+    }
+
+    public static <T extends Comparable<T>> T max(SimpleGraph<T> graph)
+    {
+        return Ordering.natural().max(graph.vertices());
+    }
+
+    public boolean hasEdge(V a, V b)
+    {
+        ImmutableSet<V> matches = edges.get(a);
+        return matches != null && matches.contains(b);
+    }
+
+    public ImmutableSet<V> vertices()
+    {
+        ImmutableSet.Builder<V> b = ImmutableSet.builder();
+        b.addAll(edges.keySet());
+        edges.values().forEach(b::addAll);
+        return b.build();
+    }
+
+    public List<List<V>> findPaths(V from, V to)
+    {
+        List<List<V>> matches = new ArrayList<>();
+        findPaths0(Collections.singletonList(from), from, to, matches::add);
+        return matches;
+    }
+
+    private void findPaths0(List<V> accum, V from, V to, Consumer<List<V>> onMatch)
+    {
+        ImmutableSet<V> check = edges.get(from);
+        if (check == null)
+            return; // no matches
+        for (V next : check)
+        {
+            if (accum.contains(next))
+                return; // ignore walking recursive
+            List<V> nextAccum = new ArrayList<>(accum);
+            nextAccum.add(next);
+            if (next.equals(to))
+            {
+                onMatch.accept(nextAccum);
+            }
+            else
+            {
+                findPaths0(nextAccum, next, to, onMatch);
+            }
+        }
+    }
+
+    public static class Builder<V>
+    {
+        private final Map<V, Set<V>> edges = new HashMap<>();
+
+        public Builder<V> addEdge(V from, V to)
+        {
+            edges.computeIfAbsent(from, ignore -> new HashSet<>()).add(to);
+            return this;
+        }
+
+        public SimpleGraph<V> build()
+        {
+            ImmutableMap.Builder<V, ImmutableSet<V>> builder = ImmutableMap.builder();
+            for (Map.Entry<V, Set<V>> e : edges.entrySet())
+                builder.put(e.getKey(), ImmutableSet.copyOf(e.getValue()));
+            return new SimpleGraph(builder.build());
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/utils/SimpleGraphTest.java b/test/unit/org/apache/cassandra/utils/SimpleGraphTest.java
new file mode 100644
index 0000000..6adee36
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/SimpleGraphTest.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.junit.Test;
+
+import org.assertj.core.api.Assertions;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class SimpleGraphTest
+{
+    @Test
+    public void empty()
+    {
+        Assertions.assertThatThrownBy(() -> new SimpleGraph.Builder<String>().build())
+                  .isInstanceOf(AssertionError.class)
+                  .hasMessage("Edges empty");
+    }
+
+    /**
+     * If vertices have edges that form a circle this should not cause {@link SimpleGraph#findPaths(Object, Object)} to
+     * hang.
+     */
+    @Test
+    public void recursive()
+    {
+        SimpleGraph<String> graph = of("A", "B",
+                                       "B", "C",
+                                       "C", "A");
+        // no paths to identity
+        assertThat(graph.findPaths("A", "A")).isEmpty();
+        assertThat(graph.findPaths("B", "B")).isEmpty();
+        assertThat(graph.findPaths("C", "C")).isEmpty();
+
+        assertThat(graph.findPaths("C", "B")).isEqualTo(Collections.singletonList(Arrays.asList("C", "A", "B")));
+
+        // all options return and don't have duplicate keys
+        for (String i : graph.vertices())
+        {
+            for (String j : graph.vertices())
+            {
+                List<List<String>> paths = graph.findPaths(i, j);
+                for (List<String> path : paths)
+                {
+                    Map<String, Integer> distinct = countDistinct(path);
+                    for (Map.Entry<String, Integer> e : distinct.entrySet())
+                        assertThat(e.getValue()).describedAs("Duplicate vertex %s found; %s", e.getKey(), path).isEqualTo(1);
+                }
+            }
+        }
+    }
+
+    @Test
+    public void simple()
+    {
+        SimpleGraph<String> graph = of("A", "B",
+                                       "B", "C",
+                                       "C", "D");
+
+        assertThat(graph.findPaths("A", "B")).isEqualTo(Collections.singletonList(Arrays.asList("A", "B")));
+        assertThat(graph.findPaths("A", "C")).isEqualTo(Collections.singletonList(Arrays.asList("A", "B", "C")));
+        assertThat(graph.findPaths("B", "D")).isEqualTo(Collections.singletonList(Arrays.asList("B", "C", "D")));
+
+        assertThat(graph.hasEdge("A", "B")).isTrue();
+        assertThat(graph.hasEdge("C", "D")).isTrue();
+        assertThat(graph.hasEdge("B", "A")).isFalse();
+        assertThat(graph.hasEdge("C", "B")).isFalse();
+    }
+
+    private static <T> Map<T, Integer> countDistinct(List<T> list)
+    {
+        Map<T, Integer> map = new HashMap<>();
+        for (T t : list)
+            map.compute(t, (ignore, accum) -> accum == null ? 1 : accum + 1);
+        return map;
+    }
+
+    static <T> SimpleGraph<T> of(T... values)
+    {
+        assert values.length % 2 == 0: "graph requires even number of values, but given " + values.length;
+        SimpleGraph.Builder<T> builder = new SimpleGraph.Builder<>();
+        for (int i = 0; i < values.length; i = i + 2)
+            builder.addEdge(values[i], values[i + 1]);
+        return builder.build();
+    }
+}
\ No newline at end of file
diff --git a/test/unit/org/apache/cassandra/utils/bytecomparable/AbstractTypeByteSourceTest.java b/test/unit/org/apache/cassandra/utils/bytecomparable/AbstractTypeByteSourceTest.java
new file mode 100644
index 0000000..d5e2f1e
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/bytecomparable/AbstractTypeByteSourceTest.java
@@ -0,0 +1,1015 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils.bytecomparable;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.UUID;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+
+import com.google.common.collect.ImmutableList;
+
+import org.apache.cassandra.cql3.Term;
+import org.apache.cassandra.db.marshal.*;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.apache.cassandra.cql3.Duration;
+import org.apache.cassandra.dht.IPartitioner;
+import org.apache.cassandra.dht.LengthPartitioner;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.dht.RandomPartitioner;
+import org.apache.cassandra.serializers.MarshalException;
+import org.apache.cassandra.serializers.SimpleDateSerializer;
+import org.apache.cassandra.serializers.TypeSerializer;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.UUIDGen;
+
+@RunWith(Parameterized.class)
+public class AbstractTypeByteSourceTest
+{
+    private static final String ALPHABET = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!@#$%^&*()";
+
+    @Parameterized.Parameters(name = "version={0}")
+    public static Iterable<ByteComparable.Version> versions()
+    {
+        return ImmutableList.of(ByteComparable.Version.OSS42);
+    }
+
+    private final ByteComparable.Version version;
+
+    public AbstractTypeByteSourceTest(ByteComparable.Version version)
+    {
+        this.version = version;
+    }
+
+    private <T> void testValuesForType(AbstractType<T> type, T... values)
+    {
+        testValuesForType(type, Arrays.asList(values));
+    }
+
+    private <T> void testValuesForType(AbstractType<T> type, List<T> values)
+    {
+        for (T initial : values)
+            decodeAndAssertEquals(type, initial);
+        if (IntegerType.instance.equals(type))
+            // IntegerType tests go through A LOT of values, so short of randomly picking up to, let's say 1000
+            // values to combine with, we'd rather skip the comparison tests for them.
+            return;
+        for (int i = 0; i < values.size(); ++i)
+        {
+            for (int j = i + 1; j < values.size(); ++j)
+            {
+                ByteBuffer left = type.decompose(values.get(i));
+                ByteBuffer right = type.decompose(values.get(j));
+                int compareBuffers = Integer.signum(type.compare(left, right));
+                ByteSource leftSource = type.asComparableBytes(left.duplicate(), version);
+                ByteSource rightSource = type.asComparableBytes(right.duplicate(), version);
+                int compareBytes = Integer.signum(ByteComparable.compare(v -> leftSource, v -> rightSource, version));
+                Assert.assertEquals(compareBuffers, compareBytes);
+            }
+        }
+    }
+
+    private <T> void testValuesForType(AbstractType<T> type, Stream<T> values)
+    {
+        values.forEach(initial -> decodeAndAssertEquals(type, initial));
+    }
+
+    private <T> void decodeAndAssertEquals(AbstractType<T> type, T initial)
+    {
+        ByteBuffer initialBuffer = type.decompose(initial);
+        // Assert that fromComparableBytes decodes correctly.
+        ByteSource.Peekable peekableBytes = ByteSource.peekable(type.asComparableBytes(initialBuffer, version));
+        ByteBuffer decodedBuffer = type.fromComparableBytes(peekableBytes, version);
+        Assert.assertEquals("For " + ByteSourceComparisonTest.safeStr(initial),
+                            ByteBufferUtil.bytesToHex(initialBuffer),
+                            ByteBufferUtil.bytesToHex(decodedBuffer));
+        // Assert that the value composed from fromComparableBytes is the correct one.
+        peekableBytes = ByteSource.peekable(type.asComparableBytes(initialBuffer, version));
+        T decoded = type.compose(type.fromComparableBytes(peekableBytes, version));
+        Assert.assertEquals(initial, decoded);
+    }
+
+    private static String newRandomAlphanumeric(Random prng, int length)
+    {
+        StringBuilder random = new StringBuilder(length);
+        for (int i = 0; i < length; ++i)
+            random.append(ALPHABET.charAt(prng.nextInt(ALPHABET.length())));
+        return random.toString();
+    }
+
+    @Test
+    public void testAsciiType()
+    {
+        String[] asciiStrings = new String[]
+        {
+                "",
+                "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890",
+                "!@#$%^&*()",
+        };
+        testValuesForType(AsciiType.instance, asciiStrings);
+
+        Random prng = new Random();
+        Stream<String> asciiStream = Stream.generate(() -> newRandomAlphanumeric(prng, 10)).limit(1000);
+        testValuesForType(AsciiType.instance, asciiStream);
+    }
+
+    @Test
+    public void testBooleanType()
+    {
+        testValuesForType(BooleanType.instance, Boolean.TRUE, Boolean.FALSE, null);
+    }
+
+    @Test
+    public void testBytesType()
+    {
+        List<ByteBuffer> byteBuffers = new ArrayList<>();
+        Random prng = new Random();
+        byte[] byteArray;
+        int[] arrayLengths = new int[] {1, 10, 100, 1000};
+        for (int length : arrayLengths)
+        {
+            byteArray = new byte[length];
+            for (int i = 0; i < 1000; ++i)
+            {
+                prng.nextBytes(byteArray);
+                byteBuffers.add(ByteBuffer.wrap(byteArray));
+            }
+        }
+        testValuesForType(BytesType.instance, byteBuffers.toArray(new ByteBuffer[0]));
+    }
+
+    @Test
+    public void testByteType()
+    {
+        testValuesForType(ByteType.instance, new Byte[] { null });
+
+        Stream<Byte> allBytes = IntStream.range(Byte.MIN_VALUE, Byte.MAX_VALUE + 1)
+                                         .mapToObj(value -> (byte) value);
+        testValuesForType(ByteType.instance, allBytes);
+    }
+
+    @Test
+    public void testCompositeType()
+    {
+        CompositeType compType = CompositeType.getInstance(UTF8Type.instance, TimeUUIDType.instance, IntegerType.instance);
+        List<ByteBuffer> byteBuffers = new ArrayList<>();
+        Random prng = new Random();
+        // Test with complete CompositeType rows
+        for (int i = 0; i < 1000; ++i)
+        {
+            String randomString = newRandomAlphanumeric(prng, 10);
+            TimeUUID randomUuid = TimeUUID.Generator.nextTimeUUID();
+            BigInteger randomVarint = BigInteger.probablePrime(80, prng);
+            byteBuffers.add(compType.decompose(randomString, randomUuid, randomVarint));
+        }
+        // Test with incomplete CompositeType rows, where only the first element is present
+        ByteBuffer[] incompleteComposite = new ByteBuffer[1];
+        incompleteComposite[0] = UTF8Type.instance.decompose(newRandomAlphanumeric(prng, 10));
+        byteBuffers.add(CompositeType.build(ByteBufferAccessor.instance, true, incompleteComposite));
+        byteBuffers.add(CompositeType.build(ByteBufferAccessor.instance, false, incompleteComposite));
+        // ...and the last end-of-component byte is not 0.
+        byteBuffers.add(CompositeType.build(ByteBufferAccessor.instance, true, incompleteComposite, (byte) 1));
+        byteBuffers.add(CompositeType.build(ByteBufferAccessor.instance, false, incompleteComposite, (byte) 1));
+        byteBuffers.add(CompositeType.build(ByteBufferAccessor.instance, true, incompleteComposite, (byte) -1));
+        byteBuffers.add(CompositeType.build(ByteBufferAccessor.instance, false, incompleteComposite, (byte) -1));
+        // Test with incomplete CompositeType rows, where only the last element is not present
+        incompleteComposite = new ByteBuffer[2];
+        incompleteComposite[0] = UTF8Type.instance.decompose(newRandomAlphanumeric(prng, 10));
+        incompleteComposite[1] = TimeUUIDType.instance.decompose(TimeUUID.Generator.nextTimeUUID());
+        byteBuffers.add(CompositeType.build(ByteBufferAccessor.instance, true, incompleteComposite));
+        byteBuffers.add(CompositeType.build(ByteBufferAccessor.instance, false, incompleteComposite));
+        // ...and the last end-of-component byte is not 0.
+        byteBuffers.add(CompositeType.build(ByteBufferAccessor.instance, true, incompleteComposite, (byte) 1));
+        byteBuffers.add(CompositeType.build(ByteBufferAccessor.instance, false, incompleteComposite, (byte) 1));
+        byteBuffers.add(CompositeType.build(ByteBufferAccessor.instance, true, incompleteComposite, (byte) -1));
+        byteBuffers.add(CompositeType.build(ByteBufferAccessor.instance, false, incompleteComposite, (byte) -1));
+
+        testValuesForType(compType, byteBuffers.toArray(new ByteBuffer[0]));
+    }
+
+    @Test
+    public void testDateType()
+    {
+        Stream<Date> dates = Stream.of(null,
+                                       new Date(Long.MIN_VALUE),
+                                       new Date(Long.MAX_VALUE),
+                                       new Date());
+        testValuesForType(DateType.instance, dates);
+
+        dates = new Random().longs(1000).mapToObj(Date::new);
+        testValuesForType(DateType.instance, dates);
+    }
+
+    @Test
+    public void testDecimalType()
+    {
+        // We won't be using testValuesForType for DecimalType (i.e. we won't also be comparing the initial and decoded
+        // ByteBuffer values). That's because the same BigDecimal value can be represented with a couple of different,
+        // even if equivalent pairs of <mantissa, scale> (e.g. 0.1 is 1 * e-1, as well as 10 * e-2, as well as...).
+        // And in practice it's easier to just convert to BigDecimals and then compare, instead of trying to manually
+        // decode and convert to canonical representations, which then to compare. For example of generating canonical
+        // decimals in the first place, see testReversedType().
+        Consumer<BigDecimal> bigDecimalConsumer = initial ->
+        {
+            ByteSource byteSource = DecimalType.instance.asComparableBytes(DecimalType.instance.decompose(initial), version);
+            BigDecimal decoded = DecimalType.instance.compose(DecimalType.instance.fromComparableBytes(ByteSource.peekable(byteSource), version));
+            if (initial == null)
+                Assert.assertNull(decoded);
+            else
+                Assert.assertEquals(0, initial.compareTo(decoded));
+        };
+        // Test some interesting predefined BigDecimal values.
+        Stream.of(null,
+                  BigDecimal.ZERO,
+                  BigDecimal.ONE,
+                  BigDecimal.ONE.add(BigDecimal.ONE),
+                  BigDecimal.TEN,
+                  BigDecimal.valueOf(0.0000000000000000000000000000000001),
+                  BigDecimal.valueOf(-0.0000000000000000000000000000000001),
+                  BigDecimal.valueOf(0.0000000000000001234567891011121314),
+                  BigDecimal.valueOf(-0.0000000000000001234567891011121314),
+                  BigDecimal.valueOf(12345678910111213.141516171819202122),
+                  BigDecimal.valueOf(-12345678910111213.141516171819202122),
+                  new BigDecimal(BigInteger.TEN, Integer.MIN_VALUE),
+                  new BigDecimal(BigInteger.TEN.negate(), Integer.MIN_VALUE),
+                  new BigDecimal(BigInteger.TEN, Integer.MAX_VALUE),
+                  new BigDecimal(BigInteger.TEN.negate(), Integer.MAX_VALUE),
+                  new BigDecimal(BigInteger.TEN.pow(1000), Integer.MIN_VALUE),
+                  new BigDecimal(BigInteger.TEN.pow(1000).negate(), Integer.MIN_VALUE),
+                  new BigDecimal(BigInteger.TEN.pow(1000), Integer.MAX_VALUE),
+                  new BigDecimal(BigInteger.TEN.pow(1000).negate(), Integer.MAX_VALUE))
+              .forEach(bigDecimalConsumer);
+        // Test BigDecimals created from random double values with predefined range modifiers.
+        double[] bounds = {
+                Double.MIN_VALUE,
+                -1_000_000_000.0,
+                -100_000.0,
+                -1.0,
+                1.0,
+                100_000.0,
+                1_000_000_000.0,
+                Double.MAX_VALUE};
+        for (double bound : bounds)
+        {
+            new Random().doubles(1000)
+                        .mapToObj(initial -> BigDecimal.valueOf(initial * bound))
+                        .forEach(bigDecimalConsumer);
+        }
+    }
+
+    @Test
+    public void testDoubleType()
+    {
+        Stream<Double> doubles = Stream.of(null,
+                                           Double.NaN,
+                                           Double.POSITIVE_INFINITY,
+                                           Double.NEGATIVE_INFINITY,
+                                           Double.MAX_VALUE,
+                                           Double.MIN_VALUE,
+                                           +0.0,
+                                           -0.0,
+                                           +1.0,
+                                           -1.0,
+                                           +12345678910.111213141516,
+                                           -12345678910.111213141516);
+        testValuesForType(DoubleType.instance, doubles);
+
+        doubles = new Random().doubles(1000).boxed();
+        testValuesForType(DoubleType.instance, doubles);
+    }
+
+    @Test
+    public void testDurationType()
+    {
+        Random prng = new Random();
+        Stream<Duration> posDurations = Stream.generate(() ->
+                                                        {
+                                                            int months = prng.nextInt(12) + 1;
+                                                            int days = prng.nextInt(28) + 1;
+                                                            long nanos = (Math.abs(prng.nextLong() % 86_400_000_000_000L)) + 1;
+                                                            return Duration.newInstance(months, days, nanos);
+                                                        })
+                                              .limit(1000);
+        testValuesForType(DurationType.instance, posDurations);
+        Stream<Duration> negDurations = Stream.generate(() ->
+                                                        {
+                                                            int months = prng.nextInt(12) + 1;
+                                                            int days = prng.nextInt(28) + 1;
+                                                            long nanos = (Math.abs(prng.nextLong() % 86_400_000_000_000L)) + 1;
+                                                            return Duration.newInstance(-months, -days, -nanos);
+                                                        })
+                                              .limit(1000);
+        testValuesForType(DurationType.instance, negDurations);
+    }
+
+    @Test
+    public void testDynamicCompositeType()
+    {
+        DynamicCompositeType dynamicCompType = DynamicCompositeType.getInstance(new HashMap<>());
+        ImmutableList<String> allTypes = ImmutableList.of("org.apache.cassandra.db.marshal.BytesType",
+                                                          "org.apache.cassandra.db.marshal.TimeUUIDType",
+                                                          "org.apache.cassandra.db.marshal.IntegerType");
+        List<ByteBuffer> allValues = new ArrayList<>();
+        List<ByteBuffer> byteBuffers = new ArrayList<>();
+        Random prng = new Random();
+        for (int i = 0; i < 10; ++i)
+        {
+            String randomString = newRandomAlphanumeric(prng, 10);
+            allValues.add(ByteBufferUtil.bytes(randomString));
+            UUID randomUuid = TimeUUID.Generator.nextTimeAsUUID();
+            allValues.add(ByteBuffer.wrap(UUIDGen.decompose(randomUuid)));
+            byte randomByte = (byte) prng.nextInt();
+            allValues.add(ByteBuffer.allocate(1).put(randomByte));
+
+            // Three-component key with aliased and non-aliased types and end-of-component byte varying (0, 1, -1).
+            byteBuffers.add(DynamicCompositeType.build(allTypes, allValues));
+            byteBuffers.add(createStringUuidVarintDynamicCompositeKey(randomString, randomUuid, randomByte, (byte) 1));
+            byteBuffers.add(createStringUuidVarintDynamicCompositeKey(randomString, randomUuid, randomByte, (byte) -1));
+
+            // Two-component key with aliased and non-aliased types and end-of-component byte varying (0, 1, -1).
+            byteBuffers.add(DynamicCompositeType.build(allTypes.subList(0, 2), allValues.subList(0, 2)));
+            byteBuffers.add(createStringUuidVarintDynamicCompositeKey(randomString, randomUuid, -1, (byte) 1));
+            byteBuffers.add(createStringUuidVarintDynamicCompositeKey(randomString, randomUuid, -1, (byte) -1));
+
+            // One-component key with aliased and non-aliased type and end-of-component byte varying (0, 1, -1).
+            byteBuffers.add(DynamicCompositeType.build(allTypes.subList(0, 1), allValues.subList(0, 1)));
+            byteBuffers.add(createStringUuidVarintDynamicCompositeKey(randomString, null, -1, (byte) 1));
+            byteBuffers.add(createStringUuidVarintDynamicCompositeKey(randomString, null, -1, (byte) -1));
+
+            allValues.clear();
+        }
+        testValuesForType(dynamicCompType, byteBuffers.toArray(new ByteBuffer[0]));
+    }
+
+    // Similar to DynamicCompositeTypeTest.createDynamicCompositeKey(string, uuid, i, true, false), but not using any
+    // aliased types, in order to do an exact comparison of the unmarshalled DynamicCompositeType payload with the
+    // input one. If aliased types are used, due to DynamicCompositeType.build(List<String>, List<ByteBuffer>)
+    // always including the full type info in the newly constructed payload, an exact comparison won't work.
+    private static ByteBuffer createStringUuidVarintDynamicCompositeKey(String string, UUID uuid, int i, byte lastEocByte)
+    {
+        // 1. Calculate how many bytes do we need for a key of this DynamicCompositeType
+        String bytesType = "org.apache.cassandra.db.marshal.BytesType";
+        String timeUuidType = "org.apache.cassandra.db.marshal.TimeUUIDType";
+        String varintType = "org.apache.cassandra.db.marshal.IntegerType";
+        ByteBuffer bytes = ByteBufferUtil.bytes(string);
+        int totalSize = 0;
+        // Take into account the string component data (BytesType is aliased)
+        totalSize += 2 + bytesType.length() + 2 + bytes.remaining() + 1;
+        if (uuid != null)
+        {
+            // Take into account the UUID component data (TimeUUIDType is aliased)
+            totalSize += 2 + timeUuidType.length() + 2 + 16 + 1;
+            if (i != -1)
+            {
+                // Take into account the varint component data (IntegerType is _not_ aliased).
+                // Notice that we account for a single byte of varint data, so we'll downcast the int payload
+                // to byte and use only that as the actual varint payload.
+                totalSize += 2 + varintType.length() + 2 + 1 + 1;
+            }
+        }
+
+        // 2. Allocate a buffer with that many bytes
+        ByteBuffer bb = ByteBuffer.allocate(totalSize);
+
+        // 3. Write the key data for each component in the allocated buffer
+        bb.putShort((short) bytesType.length());
+        bb.put(ByteBufferUtil.bytes(bytesType));
+        bb.putShort((short) bytes.remaining());
+        bb.put(bytes);
+        // Make the end-of-component byte 1 if requested and the time-UUID component is null.
+        bb.put(uuid == null ? lastEocByte : (byte) 0);
+        if (uuid != null)
+        {
+            bb.putShort((short) timeUuidType.length());
+            bb.put(ByteBufferUtil.bytes(timeUuidType));
+            bb.putShort((short) 16);
+            bb.put(UUIDGen.decompose(uuid));
+            // Set the end-of-component byte if requested and the varint component is null.
+            bb.put(i == -1 ? lastEocByte : (byte) 0);
+            if (i != -1)
+            {
+                bb.putShort((short) varintType.length());
+                bb.put(ByteBufferUtil.bytes(varintType));
+                bb.putShort((short) 1);
+                bb.put((byte) i);
+                bb.put(lastEocByte);
+            }
+        }
+        bb.rewind();
+        return bb;
+    }
+
+    @Test
+    public void testFloatType()
+    {
+        Stream<Float> floats = Stream.of(null,
+                                         Float.NaN,
+                                         Float.POSITIVE_INFINITY,
+                                         Float.NEGATIVE_INFINITY,
+                                         Float.MAX_VALUE,
+                                         Float.MIN_VALUE,
+                                         +0.0F,
+                                         -0.0F,
+                                         +1.0F,
+                                         -1.0F,
+                                         +123456.7891011F,
+                                         -123456.7891011F);
+        testValuesForType(FloatType.instance, floats);
+
+        floats = new Random().ints(1000).mapToObj(Float::intBitsToFloat);
+        testValuesForType(FloatType.instance, floats);
+    }
+
+    @Test
+    public void testInetAddressType() throws UnknownHostException
+    {
+        Stream<InetAddress> inetAddresses = Stream.of(null,
+                                                      InetAddress.getLocalHost(),
+                                                      InetAddress.getLoopbackAddress(),
+                                                      InetAddress.getByName("0.0.0.0"),
+                                                      InetAddress.getByName("10.0.0.1"),
+                                                      InetAddress.getByName("172.16.1.1"),
+                                                      InetAddress.getByName("192.168.2.2"),
+                                                      InetAddress.getByName("224.3.3.3"),
+                                                      InetAddress.getByName("255.255.255.255"),
+                                                      InetAddress.getByName("0000:0000:0000:0000:0000:0000:0000:0000"),
+                                                      InetAddress.getByName("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"),
+                                                      InetAddress.getByName("fe80:1:23:456:7890:1:23:456"));
+        testValuesForType(InetAddressType.instance, inetAddresses);
+
+        Random prng = new Random();
+        byte[] ipv4Bytes = new byte[4];
+        byte[] ipv6Bytes = new byte[16];
+        InetAddress[] addresses = new InetAddress[2000];
+        for (int i = 0; i < addresses.length / 2; ++i)
+        {
+            prng.nextBytes(ipv4Bytes);
+            addresses[2 * i] = InetAddress.getByAddress(ipv4Bytes);
+            addresses[2 * i + 1] = InetAddress.getByAddress(ipv6Bytes);
+        }
+        testValuesForType(InetAddressType.instance, addresses);
+
+    }
+
+    @Test
+    public void testInt32Type()
+    {
+        Stream<Integer> ints = Stream.of(null,
+                                         Integer.MIN_VALUE,
+                                         Integer.MIN_VALUE + 1,
+                                         -256, -255, -128, -127, -1,
+                                         0,
+                                         1, 127, 128, 255, 256,
+                                         Integer.MAX_VALUE - 1,
+                                         Integer.MAX_VALUE);
+        testValuesForType(Int32Type.instance, ints);
+
+        ints = new Random().ints(1000).boxed();
+        testValuesForType(Int32Type.instance, ints);
+    }
+
+    @Test
+    public void testIntegerType()
+    {
+        Stream<BigInteger> varints = IntStream.range(-1000000, 1000000).mapToObj(BigInteger::valueOf);
+        testValuesForType(IntegerType.instance, varints);
+
+        varints = Stream.of(null,
+                            BigInteger.valueOf(12345678910111213L),
+                            BigInteger.valueOf(12345678910111213L).negate(),
+                            BigInteger.valueOf(Long.MAX_VALUE),
+                            BigInteger.valueOf(Long.MAX_VALUE).negate(),
+                            BigInteger.valueOf(Long.MAX_VALUE - 1).multiply(BigInteger.valueOf(Long.MAX_VALUE - 1)),
+                            BigInteger.valueOf(Long.MAX_VALUE - 1).multiply(BigInteger.valueOf(Long.MAX_VALUE - 1)).negate());
+        testValuesForType(IntegerType.instance, varints);
+
+        List<BigInteger> varintList = new ArrayList<>();
+        for (int i = 0; i < 10000; ++i)
+        {
+            BigInteger initial = BigInteger.ONE.shiftLeft(i);
+            varintList.add(initial);
+            BigInteger plusOne = initial.add(BigInteger.ONE);
+            varintList.add(plusOne);
+            varintList.add(plusOne.negate());
+            BigInteger minusOne = initial.subtract(BigInteger.ONE);
+            varintList.add(minusOne);
+            varintList.add(minusOne.negate());
+        }
+        testValuesForType(IntegerType.instance, varintList.toArray(new BigInteger[0]));
+    }
+
+    @Test
+    public void testUuidTypes()
+    {
+        Random prng = new Random();
+        UUID[] testUuids = new UUID[3001];
+        for (int i = 0; i < testUuids.length / 3; ++i)
+        {
+            testUuids[3 * i] = UUID.randomUUID();
+            testUuids[3 * i + 1] = TimeUUID.Generator.nextTimeAsUUID();
+            testUuids[3 * i + 2] = TimeUUID.atUnixMicrosWithLsbAsUUID(prng.nextLong(), prng.nextLong());
+        }
+        testUuids[testUuids.length - 1] = null;
+        testValuesForType(UUIDType.instance, testUuids);
+        testValuesForType(LexicalUUIDType.instance, testUuids);
+        testValuesForType(TimeUUIDType.instance, Arrays.stream(testUuids)
+                                                       .filter(u -> u == null || u.version() == 1)
+                                                       .map(u -> u != null ? TimeUUID.fromUuid(u) : null));
+    }
+
+    private static <E, C extends Collection<E>> List<C> newRandomElementCollections(Supplier<? extends C> collectionProducer,
+                                                                                    Supplier<? extends E> elementProducer,
+                                                                                    int numCollections,
+                                                                                    int numElementsInCollection)
+    {
+        List<C> result = new ArrayList<>();
+        for (int i = 0; i < numCollections; ++i)
+        {
+            C coll = collectionProducer.get();
+            for (int j = 0; j < numElementsInCollection; ++j)
+            {
+                coll.add(elementProducer.get());
+            }
+            result.add(coll);
+        }
+        return result;
+    }
+
+    @Test
+    public void testListType()
+    {
+        // Test lists with element components not having known/computable length (e.g. strings).
+        Random prng = new Random();
+        List<List<String>> stringLists = newRandomElementCollections(ArrayList::new,
+                                                                     () -> newRandomAlphanumeric(prng, 10),
+                                                                     100,
+                                                                     100);
+        testValuesForType(ListType.getInstance(UTF8Type.instance, false), stringLists);
+        testValuesForType(ListType.getInstance(UTF8Type.instance, true), stringLists);
+        // Test lists with element components with known/computable length (e.g. 128-bit UUIDs).
+        List<List<UUID>> uuidLists = newRandomElementCollections(ArrayList::new,
+                                                                 UUID::randomUUID,
+                                                                 100,
+                                                                 100);
+        testValuesForType(ListType.getInstance(UUIDType.instance, false), uuidLists);
+        testValuesForType(ListType.getInstance(UUIDType.instance, true), uuidLists);
+    }
+
+    @Test
+    public void testLongType()
+    {
+        Stream<Long> longs = Stream.of(null,
+                                       Long.MIN_VALUE,
+                                       Long.MIN_VALUE + 1,
+                                       (long) Integer.MIN_VALUE - 1,
+                                       -256L, -255L, -128L, -127L, -1L,
+                                       0L,
+                                       1L, 127L, 128L, 255L, 256L,
+                                       (long) Integer.MAX_VALUE + 1,
+                                       Long.MAX_VALUE - 1,
+                                       Long.MAX_VALUE);
+        testValuesForType(LongType.instance, longs);
+
+        longs = new Random().longs(1000).boxed();
+        testValuesForType(LongType.instance, longs);
+    }
+
+    private static <K, V> List<Map<K, V>> newRandomEntryMaps(Supplier<? extends K> keyProducer,
+                                                             Supplier<? extends V> valueProducer,
+                                                             int numMaps,
+                                                             int numEntries)
+    {
+        List<Map<K, V>> result = new ArrayList<>();
+        for (int i = 0; i < numMaps; ++i)
+        {
+            Map<K, V> map = new HashMap<>();
+            for (int j = 0; j < numEntries; ++j)
+            {
+                K key = keyProducer.get();
+                V value = valueProducer.get();
+                map.put(key, value);
+            }
+            result.add(map);
+        }
+        return result;
+    }
+
+    @Test
+    public void testMapType()
+    {
+        Random prng = new Random();
+        List<Map<String, UUID>> stringToUuidMaps = newRandomEntryMaps(() -> newRandomAlphanumeric(prng, 10),
+                                                                      UUID::randomUUID,
+                                                                      100,
+                                                                      100);
+        testValuesForType(MapType.getInstance(UTF8Type.instance, UUIDType.instance, false), stringToUuidMaps);
+        testValuesForType(MapType.getInstance(UTF8Type.instance, UUIDType.instance, true), stringToUuidMaps);
+
+        List<Map<UUID, String>> uuidToStringMaps = newRandomEntryMaps(UUID::randomUUID,
+                                                                      () -> newRandomAlphanumeric(prng, 10),
+                                                                      100,
+                                                                      100);
+        testValuesForType(MapType.getInstance(UUIDType.instance, UTF8Type.instance, false), uuidToStringMaps);
+        testValuesForType(MapType.getInstance(UUIDType.instance, UTF8Type.instance, true), uuidToStringMaps);
+    }
+
+    @Test
+    public void testPartitionerDefinedOrder()
+    {
+        Random prng = new Random();
+        List<ByteBuffer> byteBuffers = new ArrayList<>();
+        byteBuffers.add(ByteBufferUtil.EMPTY_BYTE_BUFFER);
+        for (int i = 0; i < 1000; ++i)
+        {
+            String randomString = newRandomAlphanumeric(prng, 10);
+            byteBuffers.add(UTF8Type.instance.decompose(randomString));
+            int randomInt = prng.nextInt();
+            byteBuffers.add(Int32Type.instance.decompose(randomInt));
+            double randomDouble = prng.nextDouble();
+            byteBuffers.add(DoubleType.instance.decompose(randomDouble));
+            BigInteger randomishVarint = BigInteger.probablePrime(100, prng);
+            byteBuffers.add(IntegerType.instance.decompose(randomishVarint));
+            BigDecimal randomishDecimal = BigDecimal.valueOf(prng.nextLong(), prng.nextInt(100) - 50);
+            byteBuffers.add(DecimalType.instance.decompose(randomishDecimal));
+        }
+
+        byte[] bytes = new byte[100];
+        prng.nextBytes(bytes);
+        ByteBuffer exhausted = ByteBuffer.wrap(bytes);
+        ByteBufferUtil.readBytes(exhausted, 100);
+
+        List<IPartitioner> partitioners = Arrays.asList(
+                Murmur3Partitioner.instance,
+                RandomPartitioner.instance,
+                LengthPartitioner.instance
+                // NOTE LocalPartitioner, OrderPreservingPartitioner, and ByteOrderedPartitioner don't need a dedicated
+                // PartitionerDefinedOrder.
+                //   1) LocalPartitioner uses its inner AbstractType
+                //   2) OrderPreservingPartitioner uses UTF8Type
+                //   3) ByteOrderedPartitioner uses BytesType
+        );
+        for (IPartitioner partitioner : partitioners)
+        {
+            AbstractType<?> partitionOrdering = partitioner.partitionOrdering();
+            Assert.assertTrue(partitionOrdering instanceof PartitionerDefinedOrder);
+            for (ByteBuffer input : byteBuffers)
+            {
+                ByteSource byteSource = partitionOrdering.asComparableBytes(input, version);
+                ByteBuffer output = partitionOrdering.fromComparableBytes(ByteSource.peekable(byteSource), version);
+                Assert.assertEquals("For partitioner " + partitioner.getClass().getSimpleName(),
+                                    ByteBufferUtil.bytesToHex(input),
+                                    ByteBufferUtil.bytesToHex(output));
+            }
+            ByteSource byteSource = partitionOrdering.asComparableBytes(exhausted, version);
+            ByteBuffer output = partitionOrdering.fromComparableBytes(ByteSource.peekable(byteSource), version);
+            Assert.assertEquals(ByteBufferUtil.EMPTY_BYTE_BUFFER, output);
+        }
+    }
+
+    @Test
+    public void testReversedType()
+    {
+        // Test how ReversedType handles null ByteSource.Peekable - here the choice of base type is important, as
+        // the base type should also be able to handle null ByteSource.Peekable.
+        ReversedType<BigInteger> reversedVarintType = ReversedType.getInstance(IntegerType.instance);
+        ByteBuffer decodedNull = reversedVarintType.fromComparableBytes(null, ByteComparable.Version.OSS42);
+        Assert.assertEquals(ByteBufferUtil.EMPTY_BYTE_BUFFER, decodedNull);
+
+        // Test how ReversedType handles random data with some common and important base types.
+        Map<AbstractType<?>, BiFunction<Random, Integer, ByteBuffer>> bufferGeneratorByType = new HashMap<>();
+        bufferGeneratorByType.put(UTF8Type.instance, (prng, length) -> UTF8Type.instance.decompose(newRandomAlphanumeric(prng, length)));
+        bufferGeneratorByType.put(BytesType.instance, (prng, length) ->
+        {
+            byte[] randomBytes = new byte[length];
+            prng.nextBytes(randomBytes);
+            return ByteBuffer.wrap(randomBytes);
+        });
+        bufferGeneratorByType.put(IntegerType.instance, (prng, length) ->
+        {
+            BigInteger randomVarint = BigInteger.valueOf(prng.nextLong());
+            for (int i = 1; i < length / 8; ++i)
+                randomVarint = randomVarint.multiply(BigInteger.valueOf(prng.nextLong()));
+            return IntegerType.instance.decompose(randomVarint);
+        });
+        bufferGeneratorByType.put(DecimalType.instance, (prng, length) ->
+        {
+            BigInteger randomMantissa = BigInteger.valueOf(prng.nextLong());
+            for (int i = 1; i < length / 8; ++i)
+                randomMantissa = randomMantissa.multiply(BigInteger.valueOf(prng.nextLong()));
+            // Remove all trailing zeros from the mantissa and use an even scale, in order to have a "canonically
+            // represented" (in the context of DecimalType's encoding) decimal, i.e. one which wouldn't be re-scaled to
+            // conform with the "compacted mantissa between 0 and 1, scale as a power of 100" rule.
+            while (randomMantissa.remainder(BigInteger.TEN).equals(BigInteger.ZERO))
+                randomMantissa = randomMantissa.divide(BigInteger.TEN);
+            int randomScale = prng.nextInt() & -2;
+            BigDecimal randomDecimal = new BigDecimal(randomMantissa, randomScale);
+            return DecimalType.instance.decompose(randomDecimal);
+        });
+        Random prng = new Random();
+        for (Map.Entry<AbstractType<?>, BiFunction<Random, Integer, ByteBuffer>> entry : bufferGeneratorByType.entrySet())
+        {
+            ReversedType<?> reversedType = ReversedType.getInstance(entry.getKey());
+            for (int length = 32; length <= 512; length *= 4)
+            {
+                for (int i = 0; i < 100; ++i)
+                {
+                    ByteBuffer initial = entry.getValue().apply(prng, length);
+                    ByteSource.Peekable reversedPeekable = ByteSource.peekable(reversedType.asComparableBytes(initial, ByteComparable.Version.OSS42));
+                    ByteBuffer decoded = reversedType.fromComparableBytes(reversedPeekable, ByteComparable.Version.OSS42);
+                    Assert.assertEquals(initial, decoded);
+                }
+            }
+        }
+    }
+
+    @Test
+    public void testSetType()
+    {
+        // Test sets with element components not having known/computable length (e.g. strings).
+        Random prng = new Random();
+        List<Set<String>> stringSets = newRandomElementCollections(HashSet::new,
+                                                                   () -> newRandomAlphanumeric(prng, 10),
+                                                                   100,
+                                                                   100);
+        testValuesForType(SetType.getInstance(UTF8Type.instance, false), stringSets);
+        testValuesForType(SetType.getInstance(UTF8Type.instance, true), stringSets);
+        // Test sets with element components with known/computable length (e.g. 128-bit UUIDs).
+        List<Set<UUID>> uuidSets = newRandomElementCollections(HashSet::new,
+                                                               UUID::randomUUID,
+                                                               100,
+                                                               100);
+        testValuesForType(SetType.getInstance(UUIDType.instance, false), uuidSets);
+        testValuesForType(SetType.getInstance(UUIDType.instance, true), uuidSets);
+    }
+
+    @Test
+    public void testShortType()
+    {
+        testValuesForType(ShortType.instance, new Short[] { null });
+
+        Stream<Short> allShorts = IntStream.range(Short.MIN_VALUE, Short.MAX_VALUE + 1)
+                                           .mapToObj(value -> (short) value);
+        testValuesForType(ShortType.instance, allShorts);
+    }
+
+    @Test
+    public void testSimpleDateType()
+    {
+        testValuesForType(SimpleDateType.instance, new Integer[] { null });
+
+        testValuesForType(SimpleDateType.instance, new Random().ints(1000).boxed());
+
+        // Test by manually creating and manually interpreting simple dates from random millis.
+        new Random().ints(1000).forEach(initialMillis ->
+                                         {
+                                             initialMillis = Math.abs(initialMillis);
+                                             Integer initialDays = SimpleDateSerializer.timeInMillisToDay(initialMillis);
+                                             ByteBuffer simpleDateBuffer = SimpleDateType.instance.fromTimeInMillis(initialMillis);
+                                             ByteSource byteSource = SimpleDateType.instance.asComparableBytes(simpleDateBuffer, version);
+                                             Integer decodedDays = SimpleDateType.instance.compose(SimpleDateType.instance.fromComparableBytes(ByteSource.peekable(byteSource), version));
+                                             Assert.assertEquals(initialDays, decodedDays);
+                                         });
+
+        // Test by manually creating and manually interpreting simple dates from strings.
+        String[] simpleDateStrings = new String[]
+                                             {
+                                                     "1970-01-01",
+                                                     "1970-01-02",
+                                                     "1969-12-31",
+                                                     "-0001-01-02",
+                                                     "-5877521-01-02",
+                                                     "2014-01-01",
+                                                     "+5881580-01-10",
+                                                     "1920-12-01",
+                                                     "1582-10-19"
+                                             };
+        for (String simpleDate : simpleDateStrings)
+        {
+            ByteBuffer simpleDataBuffer = SimpleDateType.instance.fromString(simpleDate);
+            ByteSource byteSource = SimpleDateType.instance.asComparableBytes(simpleDataBuffer, version);
+            Integer decodedDays = SimpleDateType.instance.compose(SimpleDateType.instance.fromComparableBytes(ByteSource.peekable(byteSource), version));
+            String decodedDate = SimpleDateSerializer.instance.toString(decodedDays);
+            Assert.assertEquals(simpleDate, decodedDate);
+        }
+    }
+
+    @Test
+    public void testTimestampType()
+    {
+        Date[] dates = new Date[]
+                               {
+                                       null,
+                                       new Date(),
+                                       new Date(0L),
+                                       new Date(-1L),
+                                       new Date(Long.MAX_VALUE),
+                                       new Date(Long.MIN_VALUE)
+                               };
+        testValuesForType(TimestampType.instance, dates);
+        testValuesForType(TimestampType.instance, new Random().longs(1000).mapToObj(Date::new));
+    }
+
+    @Test
+    public void testTimeType()
+    {
+        testValuesForType(TimeType.instance, new Long[] { null });
+
+        testValuesForType(TimeType.instance, new Random().longs(1000).boxed());
+    }
+
+    @Test
+    public void testTupleType()
+    {
+        TupleType tt = new TupleType(Arrays.asList(UTF8Type.instance,
+                                                   DecimalType.instance,
+                                                   IntegerType.instance,
+                                                   BytesType.instance));
+        Random prng = new Random();
+        List<ByteBuffer> tuplesData = new ArrayList<>();
+        String[] utf8Values = new String[]
+                                      {
+                                              "a",
+                                              "©",
+                                              newRandomAlphanumeric(prng, 10),
+                                              newRandomAlphanumeric(prng, 100)
+                                      };
+        BigDecimal[] decimalValues = new BigDecimal[]
+                                             {
+                                                     null,
+                                                     BigDecimal.ZERO,
+                                                     BigDecimal.ONE,
+                                                     BigDecimal.valueOf(1234567891011121314L, 50),
+                                                     BigDecimal.valueOf(1234567891011121314L, 50).negate()
+                                             };
+        BigInteger[] varintValues = new BigInteger[]
+                                            {
+                                                    null,
+                                                    BigInteger.ZERO,
+                                                    BigInteger.TEN.pow(1000),
+                                                    BigInteger.TEN.pow(1000).negate()
+                                            };
+        byte[] oneByte = new byte[1];
+        byte[] tenBytes = new byte[10];
+        byte[] hundredBytes = new byte[100];
+        byte[] thousandBytes = new byte[1000];
+        prng.nextBytes(oneByte);
+        prng.nextBytes(tenBytes);
+        prng.nextBytes(hundredBytes);
+        prng.nextBytes(thousandBytes);
+        byte[][] bytesValues = new byte[][]
+                                       {
+                                               new byte[0],
+                                               oneByte,
+                                               tenBytes,
+                                               hundredBytes,
+                                               thousandBytes
+                                       };
+        for (String utf8 : utf8Values)
+        {
+            for (BigDecimal decimal : decimalValues)
+            {
+                for (BigInteger varint : varintValues)
+                {
+                    for (byte[] bytes : bytesValues)
+                    {
+                        ByteBuffer tupleData = TupleType.buildValue(UTF8Type.instance.decompose(utf8),
+                                                                    decimal != null ? DecimalType.instance.decompose(decimal) : null,
+                                                                    varint != null ? IntegerType.instance.decompose(varint) : null,
+                                                                    // We could also use the wrapped bytes directly
+                                                                    BytesType.instance.decompose(ByteBuffer.wrap(bytes)));
+                        tuplesData.add(tupleData);
+                    }
+                }
+            }
+        }
+        testValuesForType(tt, tuplesData.toArray(new ByteBuffer[0]));
+    }
+
+    @Test
+    public void testUtf8Type()
+    {
+        Random prng = new Random();
+        testValuesForType(UTF8Type.instance, Stream.generate(() -> newRandomAlphanumeric(prng, 100)).limit(1000));
+    }
+
+    @Test
+    public void testTypeWithByteOrderedComparison()
+    {
+        Random prng = new Random();
+        byte[] singleByte = new byte[] { (byte) prng.nextInt() };
+        byte[] tenBytes = new byte[10];
+        prng.nextBytes(tenBytes);
+        byte[] hundredBytes = new byte[100];
+        prng.nextBytes(hundredBytes);
+        byte[] thousandBytes = new byte[1000];
+        prng.nextBytes(thousandBytes);
+        // No null here, as the default asComparableBytes(ByteBuffer, Version) implementation (and more specifically
+        // the ByteSource.of(ByteBuffer, Version) encoding) would throw then.
+        testValuesForType(ByteOrderedType.instance, Stream.of(ByteBufferUtil.EMPTY_BYTE_BUFFER,
+                                                              ByteBuffer.wrap(singleByte),
+                                                              ByteBuffer.wrap(tenBytes),
+                                                              ByteBuffer.wrap(hundredBytes),
+                                                              ByteBuffer.wrap(thousandBytes)));
+    }
+
+    private static class ByteOrderedType extends AbstractType<ByteBuffer>
+    {
+        public static final ByteOrderedType instance = new ByteOrderedType();
+
+        private ByteOrderedType()
+        {
+            super(ComparisonType.BYTE_ORDER);
+        }
+
+        @Override
+        public ByteBuffer fromString(String source) throws MarshalException
+        {
+            return null;
+        }
+
+        @Override
+        public Term fromJSONObject(Object parsed) throws MarshalException
+        {
+            return null;
+        }
+
+        @Override
+        public TypeSerializer<ByteBuffer> getSerializer()
+        {
+            return ByteOrderedSerializer.instance;
+        }
+
+        static class ByteOrderedSerializer extends TypeSerializer<ByteBuffer>
+        {
+
+            static final ByteOrderedSerializer instance = new ByteOrderedSerializer();
+
+            @Override
+            public ByteBuffer serialize(ByteBuffer value)
+            {
+                return value != null ? value.duplicate() : null;
+            }
+
+            @Override
+            public <V> ByteBuffer deserialize(V bytes, ValueAccessor<V> accessor)
+            {
+                return accessor.toBuffer(bytes);
+            }
+
+            @Override
+            public <V> void validate(V bytes, ValueAccessor<V> accessor) throws MarshalException
+            {
+
+            }
+
+            @Override
+            public String toString(ByteBuffer value)
+            {
+                return ByteBufferUtil.bytesToHex(value);
+            }
+
+            @Override
+            public Class<ByteBuffer> getType()
+            {
+                return ByteBuffer.class;
+            }
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceComparisonTest.java b/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceComparisonTest.java
new file mode 100644
index 0000000..f5cf2b6
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceComparisonTest.java
@@ -0,0 +1,1178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils.bytecomparable;
+
+import java.math.BigDecimal;
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.*;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.function.Supplier;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Ordering;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.Util;
+import org.apache.cassandra.db.Clustering;
+import org.apache.cassandra.db.ClusteringComparator;
+import org.apache.cassandra.db.ClusteringPrefix;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.marshal.*;
+import org.apache.cassandra.dht.ByteOrderedPartitioner;
+import org.apache.cassandra.dht.IPartitioner;
+import org.apache.cassandra.dht.LocalPartitioner;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.dht.RandomPartitioner;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.MurmurHash;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable.Version;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Tests forward conversion to ByteSource/ByteComparable and that the result compares correctly.
+ */
+public class ByteSourceComparisonTest extends ByteSourceTestBase
+{
+    private final static Logger logger = LoggerFactory.getLogger(ByteSourceComparisonTest.class);
+
+    @Rule
+    public final ExpectedException expectedException = ExpectedException.none();
+
+    @Test
+    public void testStringsAscii()
+    {
+        testType(AsciiType.instance, testStrings);
+    }
+
+    @Test
+    public void testStringsUTF8()
+    {
+        testType(UTF8Type.instance, testStrings);
+        testDirect(x -> ByteSource.of(x, Version.OSS42), Ordering.<String>natural()::compare, testStrings);
+    }
+
+    @Test
+    public void testBooleans()
+    {
+        testType(BooleanType.instance, testBools);
+    }
+
+    @Test
+    public void testInts()
+    {
+        testType(Int32Type.instance, testInts);
+        testDirect(x -> ByteSource.of(x), Integer::compare, testInts);
+    }
+
+    @Test
+    public void randomTestInts()
+    {
+        Random rand = new Random();
+        for (int i=0; i<10000; ++i)
+        {
+            int i1 = rand.nextInt();
+            int i2 = rand.nextInt();
+            assertComparesSame(Int32Type.instance, i1, i2);
+        }
+
+    }
+
+    @Test
+    public void testLongs()
+    {
+        testType(LongType.instance, testLongs);
+        testDirect(x -> ByteSource.of(x), Long::compare, testLongs);
+    }
+
+    @Test
+    public void testShorts()
+    {
+        testType(ShortType.instance, testShorts);
+    }
+
+    @Test
+    public void testBytes()
+    {
+        testType(ByteType.instance, testBytes);
+    }
+
+    @Test
+    public void testDoubles()
+    {
+        testType(DoubleType.instance, testDoubles);
+    }
+
+    @Test
+    public void testFloats()
+    {
+        testType(FloatType.instance, testFloats);
+    }
+
+    @Test
+    public void testBigInts()
+    {
+        testType(IntegerType.instance, testBigInts);
+    }
+
+    @Test
+    public void testBigDecimals()
+    {
+        testType(DecimalType.instance, testBigDecimals);
+    }
+
+    @Test
+    public void testBigDecimalInCombination()
+    {
+        BigDecimal b1 = new BigDecimal("123456.78901201");
+        BigDecimal b2 = new BigDecimal("123456.789012");
+        Boolean b = false;
+
+        assertClusteringPairComparesSame(DecimalType.instance, BooleanType.instance, b1, b, b2, b);
+        assertClusteringPairComparesSame(BooleanType.instance, DecimalType.instance, b, b1, b, b2);
+
+        b1 = b1.negate();
+        b2 = b2.negate();
+
+        assertClusteringPairComparesSame(DecimalType.instance, BooleanType.instance, b1, b, b2, b);
+        assertClusteringPairComparesSame(BooleanType.instance, DecimalType.instance, b, b1, b, b2);
+
+        b1 = new BigDecimal("-123456.78901289");
+        b2 = new BigDecimal("-123456.789012");
+
+        assertClusteringPairComparesSame(DecimalType.instance, BooleanType.instance, b1, b, b2, b);
+        assertClusteringPairComparesSame(BooleanType.instance, DecimalType.instance, b, b1, b, b2);
+
+        b1 = new BigDecimal("1");
+        b2 = new BigDecimal("1.1");
+
+        assertClusteringPairComparesSame(DecimalType.instance, BooleanType.instance, b1, b, b2, b);
+        assertClusteringPairComparesSame(BooleanType.instance, DecimalType.instance, b, b1, b, b2);
+
+        b1 = b1.negate();
+        b2 = b2.negate();
+
+        assertClusteringPairComparesSame(DecimalType.instance, BooleanType.instance, b1, b, b2, b);
+        assertClusteringPairComparesSame(BooleanType.instance, DecimalType.instance, b, b1, b, b2);
+    }
+
+    @Test
+    public void testUUIDs()
+    {
+        testType(UUIDType.instance, testUUIDs);
+    }
+
+    @Test
+    public void testTimeUUIDs()
+    {
+        testType(TimeUUIDType.instance, Arrays.stream(testUUIDs)
+                                              .filter(x -> x == null || x.version() == 1)
+                                              .map(x -> x != null ? TimeUUID.fromUuid(x) : null)
+                                              .toArray());
+    }
+
+    @Test
+    public void testLexicalUUIDs()
+    {
+        testType(LexicalUUIDType.instance, testUUIDs);
+    }
+
+    @Test
+    public void testSimpleDate()
+    {
+        testType(SimpleDateType.instance, Arrays.stream(testInts).filter(x -> x != null).toArray());
+    }
+
+    @Test
+    public void testTimeType()
+    {
+        testType(TimeType.instance, Arrays.stream(testLongs).filter(x -> x != null && x >= 0 && x <= 24L * 60 * 60 * 1000 * 1000 * 1000).toArray());
+    }
+
+    @SuppressWarnings("deprecation")
+    @Test
+    public void testDateType()
+    {
+        testType(DateType.instance, testDates);
+    }
+
+    @Test
+    public void testTimestampType()
+    {
+        testType(TimestampType.instance, testDates);
+    }
+
+    @Test
+    public void testBytesType()
+    {
+        List<ByteBuffer> values = new ArrayList<>();
+        for (int i = 0; i < testValues.length; ++i)
+            for (Object o : testValues[i])
+                values.add(testTypes[i].decompose(o));
+
+        testType(BytesType.instance, values.toArray());
+    }
+
+    @Test
+    public void testInetAddressType() throws UnknownHostException
+    {
+        testType(InetAddressType.instance, testInets);
+    }
+
+    @Test
+    public void testEmptyType()
+    {
+        testType(EmptyType.instance, new Void[] { null });
+    }
+
+    @Test
+    public void testPatitionerDefinedOrder()
+    {
+        List<ByteBuffer> values = new ArrayList<>();
+        for (int i = 0; i < testValues.length; ++i)
+            for (Object o : testValues[i])
+                values.add(testTypes[i].decompose(o));
+
+        testBuffers(new PartitionerDefinedOrder(Murmur3Partitioner.instance), values);
+        testBuffers(new PartitionerDefinedOrder(RandomPartitioner.instance), values);
+        testBuffers(new PartitionerDefinedOrder(ByteOrderedPartitioner.instance), values);
+    }
+
+    @Test
+    public void testPatitionerOrder()
+    {
+        List<ByteBuffer> values = new ArrayList<>();
+        for (int i = 0; i < testValues.length; ++i)
+            for (Object o : testValues[i])
+                values.add(testTypes[i].decompose(o));
+
+        testDecoratedKeys(Murmur3Partitioner.instance, values);
+        testDecoratedKeys(RandomPartitioner.instance, values);
+        testDecoratedKeys(ByteOrderedPartitioner.instance, values);
+    }
+
+    @Test
+    public void testLocalPatitionerOrder()
+    {
+        for (int i = 0; i < testValues.length; ++i)
+        {
+            final AbstractType testType = testTypes[i];
+            testDecoratedKeys(new LocalPartitioner(testType), Lists.transform(Arrays.asList(testValues[i]),
+                                                                                            v -> testType.decompose(v)));
+        }
+    }
+
+    interface PairTester
+    {
+        void test(AbstractType t1, AbstractType t2, Object o1, Object o2, Object o3, Object o4);
+    }
+
+    void testCombinationSampling(Random rand, PairTester tester)
+    {
+        for (int i=0;i<testTypes.length;++i)
+            for (int j=0;j<testTypes.length;++j)
+            {
+                Object[] tv1 = new Object[3];
+                Object[] tv2 = new Object[3];
+                for (int t=0; t<tv1.length; ++t)
+                {
+                    tv1[t] = testValues[i][rand.nextInt(testValues[i].length)];
+                    tv2[t] = testValues[j][rand.nextInt(testValues[j].length)];
+                }
+
+                for (Object o1 : tv1)
+                    for (Object o2 : tv2)
+                        for (Object o3 : tv1)
+                            for (Object o4 : tv2)
+
+                {
+                    tester.test(testTypes[i], testTypes[j], o1, o2, o3, o4);
+                }
+            }
+    }
+
+    @Test
+    public void testCombinations()
+    {
+        Random rand = new Random(0);
+        testCombinationSampling(rand, this::assertClusteringPairComparesSame);
+    }
+
+    @Test
+    public void testNullsInClustering()
+    {
+        ByteBuffer[][] inputs = new ByteBuffer[][]
+                                {
+                                new ByteBuffer[] {decomposeAndRandomPad(UTF8Type.instance, "a"),
+                                                  decomposeAndRandomPad(Int32Type.instance, 0)},
+                                new ByteBuffer[] {decomposeAndRandomPad(UTF8Type.instance, "a"),
+                                                  decomposeAndRandomPad(Int32Type.instance, null)},
+                                new ByteBuffer[] {decomposeAndRandomPad(UTF8Type.instance, "a"),
+                                                  null},
+                                new ByteBuffer[] {decomposeAndRandomPad(UTF8Type.instance, ""),
+                                                  decomposeAndRandomPad(Int32Type.instance, 0)},
+                                new ByteBuffer[] {decomposeAndRandomPad(UTF8Type.instance, ""),
+                                                  decomposeAndRandomPad(Int32Type.instance, null)},
+                                new ByteBuffer[] {decomposeAndRandomPad(UTF8Type.instance, ""),
+                                                  null},
+                                new ByteBuffer[] {null,
+                                                  decomposeAndRandomPad(Int32Type.instance, 0)},
+                                new ByteBuffer[] {null,
+                                                  decomposeAndRandomPad(Int32Type.instance, null)},
+                                new ByteBuffer[] {null,
+                                                  null}
+                                };
+        for (ByteBuffer[] input1 : inputs)
+            for (ByteBuffer[] input2 : inputs)
+            {
+                assertClusteringPairComparesSame(UTF8Type.instance, Int32Type.instance,
+                                                 input1[0], input1[1], input2[0], input2[1],
+                                                 (t, v) -> (ByteBuffer) v,
+                                                 input1[0] != null && input1[1] != null && input2[0] != null && input2[1] != null);
+            }
+    }
+
+    @Test
+    public void testNullsInClusteringLegacy()
+    {
+        // verify the legacy encoding treats null clustering the same as null value
+        ClusteringPrefix<ByteBuffer> aNull = makeBound(ClusteringPrefix.Kind.CLUSTERING,
+                                                       decomposeAndRandomPad(UTF8Type.instance, "a"),
+                                                       decomposeAndRandomPad(Int32Type.instance, null));
+        ClusteringPrefix<ByteBuffer> aEmpty = makeBound(ClusteringPrefix.Kind.CLUSTERING,
+                                                        decomposeAndRandomPad(UTF8Type.instance, "a"),
+                                                        null);
+        ClusteringComparator comp = new ClusteringComparator(UTF8Type.instance, Int32Type.instance);
+        assertEquals(0, ByteComparable.compare(comp.asByteComparable(aNull), comp.asByteComparable(aEmpty), Version.LEGACY));
+        ClusteringComparator compReversed = new ClusteringComparator(UTF8Type.instance, ReversedType.getInstance(Int32Type.instance));
+        assertEquals(0, ByteComparable.compare(compReversed.asByteComparable(aNull), compReversed.asByteComparable(aEmpty), Version.LEGACY));
+    }
+
+    @Test
+    public void testEmptyClustering()
+    {
+        assertEmptyComparedToStatic(1, ClusteringPrefix.Kind.CLUSTERING, Version.OSS42);
+        assertEmptyComparedToStatic(0, ClusteringPrefix.Kind.STATIC_CLUSTERING, Version.OSS42);
+        assertEmptyComparedToStatic(1, ClusteringPrefix.Kind.INCL_START_BOUND, Version.OSS42);
+        assertEmptyComparedToStatic(1, ClusteringPrefix.Kind.INCL_END_BOUND, Version.OSS42);
+
+        assertEmptyComparedToStatic(1, ClusteringPrefix.Kind.CLUSTERING, Version.LEGACY);
+        assertEmptyComparedToStatic(0, ClusteringPrefix.Kind.STATIC_CLUSTERING, Version.LEGACY);
+        assertEmptyComparedToStatic(-1, ClusteringPrefix.Kind.INCL_START_BOUND, Version.LEGACY);
+        assertEmptyComparedToStatic(1, ClusteringPrefix.Kind.INCL_END_BOUND, Version.LEGACY);
+    }
+
+    private void assertEmptyComparedToStatic(int expected, ClusteringPrefix.Kind kind, Version version)
+    {
+        ClusteringPrefix<ByteBuffer> empty = makeBound(kind);
+        ClusteringComparator compEmpty = new ClusteringComparator();
+        assertEquals(expected, Integer.signum(ByteComparable.compare(compEmpty.asByteComparable(empty),
+                                                                     compEmpty.asByteComparable(Clustering.STATIC_CLUSTERING),
+                                                                     version)));
+    }
+
+    void assertClusteringPairComparesSame(AbstractType<?> t1, AbstractType<?> t2, Object o1, Object o2, Object o3, Object o4)
+    {
+        assertClusteringPairComparesSame(t1, t2, o1, o2, o3, o4, AbstractType::decompose, true);
+    }
+
+    void assertClusteringPairComparesSame(AbstractType<?> t1, AbstractType<?> t2,
+                                          Object o1, Object o2, Object o3, Object o4,
+                                          BiFunction<AbstractType, Object, ByteBuffer> decompose,
+                                          boolean testLegacy)
+    {
+        for (Version v : Version.values())
+            for (ClusteringPrefix.Kind k1 : ClusteringPrefix.Kind.values())
+                for (ClusteringPrefix.Kind k2 : ClusteringPrefix.Kind.values())
+                {
+                    if (!testLegacy && v == Version.LEGACY)
+                        continue;
+
+                    ClusteringComparator comp = new ClusteringComparator(t1, t2);
+                    ByteBuffer[] b = new ByteBuffer[2];
+                    ByteBuffer[] d = new ByteBuffer[2];
+                    b[0] = decompose.apply(t1, o1);
+                    b[1] = decompose.apply(t2, o2);
+                    d[0] = decompose.apply(t1, o3);
+                    d[1] = decompose.apply(t2, o4);
+                    ClusteringPrefix<ByteBuffer> c = makeBound(k1, b);
+                    ClusteringPrefix<ByteBuffer> e = makeBound(k2, d);
+                    final ByteComparable bsc = comp.asByteComparable(c);
+                    final ByteComparable bse = comp.asByteComparable(e);
+                    int expected = Integer.signum(comp.compare(c, e));
+                    assertEquals(String.format("Failed comparing %s and %s, %s vs %s version %s",
+                                               safeStr(c.clusteringString(comp.subtypes())),
+                                               safeStr(e.clusteringString(comp.subtypes())), bsc, bse, v),
+                                 expected, Integer.signum(ByteComparable.compare(bsc, bse, v)));
+                    maybeCheck41Properties(expected, bsc, bse, v);
+                    maybeAssertNotPrefix(bsc, bse, v);
+
+                    ClusteringComparator compR = new ClusteringComparator(ReversedType.getInstance(t1), ReversedType.getInstance(t2));
+                    final ByteComparable bsrc = compR.asByteComparable(c);
+                    final ByteComparable bsre = compR.asByteComparable(e);
+                    int expectedR = Integer.signum(compR.compare(c, e));
+                    assertEquals(String.format("Failed comparing reversed %s and %s, %s vs %s version %s",
+                                               safeStr(c.clusteringString(comp.subtypes())),
+                                               safeStr(e.clusteringString(comp.subtypes())), bsrc, bsre, v),
+                                 expectedR, Integer.signum(ByteComparable.compare(bsrc, bsre, v)));
+                    maybeCheck41Properties(expectedR, bsrc, bsre, v);
+                    maybeAssertNotPrefix(bsrc, bsre, v);
+                }
+    }
+
+    static ClusteringPrefix<ByteBuffer> makeBound(ClusteringPrefix.Kind k1, ByteBuffer... b)
+    {
+        return makeBound(ByteBufferAccessor.instance.factory(), k1, b);
+    }
+
+    static <T> ClusteringPrefix<T> makeBound(ValueAccessor.ObjectFactory<T> factory, ClusteringPrefix.Kind k1, T[] b)
+    {
+        switch (k1)
+        {
+        case INCL_END_EXCL_START_BOUNDARY:
+        case EXCL_END_INCL_START_BOUNDARY:
+            return factory.boundary(k1, b);
+
+        case INCL_END_BOUND:
+        case EXCL_END_BOUND:
+        case INCL_START_BOUND:
+        case EXCL_START_BOUND:
+            return factory.bound(k1, b);
+
+        case CLUSTERING:
+            return factory.clustering(b);
+
+        case STATIC_CLUSTERING:
+            return factory.staticClustering();
+
+        default:
+            throw new AssertionError();
+        }
+    }
+
+    @Test
+    public void testTupleType()
+    {
+        Random rand = ThreadLocalRandom.current();
+        testCombinationSampling(rand, this::assertTupleComparesSame);
+    }
+
+    @Test
+    public void testTupleTypeNonFull()
+    {
+        TupleType tt = new TupleType(ImmutableList.of(UTF8Type.instance, Int32Type.instance));
+        List<ByteBuffer> tests = ImmutableList.of
+            (
+            TupleType.buildValue(ByteBufferAccessor.instance,
+                                 decomposeAndRandomPad(UTF8Type.instance, ""),
+                                 decomposeAndRandomPad(Int32Type.instance, 0)),
+            // Note: a decomposed null (e.g. decomposeAndRandomPad(Int32Type.instance, null)) should not reach a tuple
+            TupleType.buildValue(ByteBufferAccessor.instance,
+                                 decomposeAndRandomPad(UTF8Type.instance, ""),
+                                 null),
+            TupleType.buildValue(ByteBufferAccessor.instance,
+                                 null,
+                                 decomposeAndRandomPad(Int32Type.instance, 0)),
+            TupleType.buildValue(ByteBufferAccessor.instance,
+                                 decomposeAndRandomPad(UTF8Type.instance, "")),
+            TupleType.buildValue(ByteBufferAccessor.instance, (ByteBuffer) null),
+            TupleType.buildValue(ByteBufferAccessor.instance)
+            );
+        testBuffers(tt, tests);
+    }
+
+    @Test
+    public void testTupleNewField()
+    {
+        TupleType t1 = new TupleType(ImmutableList.of(UTF8Type.instance));
+        TupleType t2 = new TupleType(ImmutableList.of(UTF8Type.instance, Int32Type.instance));
+
+        ByteBuffer vOne = TupleType.buildValue(ByteBufferAccessor.instance,
+                                               decomposeAndRandomPad(UTF8Type.instance, "str"));
+        ByteBuffer vOneAndNull = TupleType.buildValue(ByteBufferAccessor.instance,
+                                                      decomposeAndRandomPad(UTF8Type.instance, "str"),
+                                                      null);
+
+        ByteComparable bOne1 = typeToComparable(t1, vOne);
+        ByteComparable bOne2 = typeToComparable(t2, vOne);
+        ByteComparable bOneAndNull2 = typeToComparable(t2, vOneAndNull);
+
+        assertEquals("The byte-comparable version of a one-field tuple must be the same as a two-field tuple with non-present second component.",
+                     bOne1.byteComparableAsString(Version.OSS42),
+                     bOne2.byteComparableAsString(Version.OSS42));
+        assertEquals("The byte-comparable version of a one-field tuple must be the same as a two-field tuple with null as second component.",
+                     bOne1.byteComparableAsString(Version.OSS42),
+                     bOneAndNull2.byteComparableAsString(Version.OSS42));
+    }
+
+
+    void assertTupleComparesSame(AbstractType t1, AbstractType t2, Object o1, Object o2, Object o3, Object o4)
+    {
+        TupleType tt = new TupleType(ImmutableList.of(t1, t2));
+        ByteBuffer b1 = TupleType.buildValue(ByteBufferAccessor.instance,
+                                             decomposeForTuple(t1, o1),
+                                             decomposeForTuple(t2, o2));
+        ByteBuffer b2 = TupleType.buildValue(ByteBufferAccessor.instance,
+                                             decomposeForTuple(t1, o3),
+                                             decomposeForTuple(t2, o4));
+        assertComparesSameBuffers(tt, b1, b2);
+    }
+
+    static <T> ByteBuffer decomposeForTuple(AbstractType<T> t, T o)
+    {
+        return o != null ? t.decompose(o) : null;
+    }
+
+    @Test
+    public void testCompositeType()
+    {
+        Random rand = new Random(0);
+        testCombinationSampling(rand, this::assertCompositeComparesSame);
+    }
+
+    @Test
+    public void testCompositeTypeNonFull()
+    {
+        CompositeType tt = CompositeType.getInstance(UTF8Type.instance, Int32Type.instance);
+        List<ByteBuffer> tests = ImmutableList.of
+            (
+            CompositeType.build(ByteBufferAccessor.instance, decomposeAndRandomPad(UTF8Type.instance, ""), decomposeAndRandomPad(Int32Type.instance, 0)),
+            CompositeType.build(ByteBufferAccessor.instance, decomposeAndRandomPad(UTF8Type.instance, ""), decomposeAndRandomPad(Int32Type.instance, null)),
+            CompositeType.build(ByteBufferAccessor.instance, decomposeAndRandomPad(UTF8Type.instance, "")),
+            CompositeType.build(ByteBufferAccessor.instance),
+            CompositeType.build(ByteBufferAccessor.instance, true, decomposeAndRandomPad(UTF8Type.instance, "")),
+            CompositeType.build(ByteBufferAccessor.instance,true)
+            );
+        for (ByteBuffer b : tests)
+            tt.validate(b);
+        testBuffers(tt, tests);
+    }
+
+    void assertCompositeComparesSame(AbstractType t1, AbstractType t2, Object o1, Object o2, Object o3, Object o4)
+    {
+        CompositeType tt = CompositeType.getInstance(t1, t2);
+        ByteBuffer b1 = CompositeType.build(ByteBufferAccessor.instance, decomposeAndRandomPad(t1, o1), decomposeAndRandomPad(t2, o2));
+        ByteBuffer b2 = CompositeType.build(ByteBufferAccessor.instance, decomposeAndRandomPad(t1, o3), decomposeAndRandomPad(t2, o4));
+        assertComparesSameBuffers(tt, b1, b2);
+    }
+
+    @Test
+    public void testDynamicComposite()
+    {
+        DynamicCompositeType tt = DynamicCompositeType.getInstance(DynamicCompositeTypeTest.aliases);
+        UUID[] uuids = DynamicCompositeTypeTest.uuids;
+        List<ByteBuffer> tests = ImmutableList.of
+            (
+            DynamicCompositeTypeTest.createDynamicCompositeKey("test1", null, -1, false, true),
+            DynamicCompositeTypeTest.createDynamicCompositeKey("test1", uuids[0], 24, false, true),
+            DynamicCompositeTypeTest.createDynamicCompositeKey("test1", uuids[0], 42, false, true),
+            DynamicCompositeTypeTest.createDynamicCompositeKey("test2", uuids[0], -1, false, true),
+            DynamicCompositeTypeTest.createDynamicCompositeKey("test2", uuids[1], 42, false, true)
+            );
+        for (ByteBuffer b : tests)
+            tt.validate(b);
+        testBuffers(tt, tests);
+    }
+
+    @Test
+    public void testListTypeString()
+    {
+        testCollection(ListType.getInstance(UTF8Type.instance, true), testStrings, () -> new ArrayList<>(), new Random());
+    }
+
+    @Test
+    public void testListTypeLong()
+    {
+        testCollection(ListType.getInstance(LongType.instance, true), testLongs, () -> new ArrayList<>(), new Random());
+    }
+
+    @Test
+    public void testSetTypeString()
+    {
+        testCollection(SetType.getInstance(UTF8Type.instance, true), testStrings, () -> new HashSet<>(), new Random());
+    }
+
+    @Test
+    public void testSetTypeLong()
+    {
+        testCollection(SetType.getInstance(LongType.instance, true), testLongs, () -> new HashSet<>(), new Random());
+    }
+
+    <T, CT extends Collection<T>> void testCollection(CollectionType<CT> tt, T[] values, Supplier<CT> gen, Random rand)
+    {
+        int cnt = 0;
+        List<CT> tests = new ArrayList<>();
+        tests.add(gen.get());
+        for (int c = 1; c <= 3; ++c)
+            for (int j = 0; j < 5; ++j)
+            {
+                CT l = gen.get();
+                for (int i = 0; i < c; ++i)
+                    l.add(values[cnt++ % values.length]);
+
+                tests.add(l);
+            }
+        testType(tt, tests);
+    }
+
+    @Test
+    public void testMapTypeStringLong()
+    {
+        testMap(MapType.getInstance(UTF8Type.instance, LongType.instance, true), testStrings, testLongs, () -> new HashMap<>(), new Random());
+    }
+
+    @Test
+    public void testMapTypeStringLongTree()
+    {
+        testMap(MapType.getInstance(UTF8Type.instance, LongType.instance, true), testStrings, testLongs, () -> new TreeMap<>(), new Random());
+    }
+
+    @Test
+    public void testDecoratedKeyPrefixesVOSS42()
+    {
+        // This should pass with the OSS 4.1 encoding
+        testDecoratedKeyPrefixes(Version.OSS42);
+    }
+
+    @Test
+    public void testDecoratedKeyPrefixesVLegacy()
+    {
+        // ... and fail with the legacy encoding
+        try
+        {
+            testDecoratedKeyPrefixes(Version.LEGACY);
+        }
+        catch (AssertionError e)
+        {
+            // Correct path, test failing.
+            return;
+        }
+        Assert.fail("Test expected to fail.");
+    }
+
+    @Test
+    public void testFixedLengthWithOffset()
+    {
+        byte[] bytes = new byte[]{ 1, 2, 3, 4, 5, 6, 7, 8, 9 };
+
+        ByteSource source = ByteSource.fixedLength(bytes, 0, 1);
+        assertEquals(1, source.next());
+        assertEquals(ByteSource.END_OF_STREAM, source.next());
+
+        source = ByteSource.fixedLength(bytes, 4, 5);
+        assertEquals(5, source.next());
+        assertEquals(6, source.next());
+        assertEquals(7, source.next());
+        assertEquals(8, source.next());
+        assertEquals(9, source.next());
+        assertEquals(ByteSource.END_OF_STREAM, source.next());
+
+        ByteSource.fixedLength(bytes, 9, 0);
+        assertEquals(ByteSource.END_OF_STREAM, source.next());
+    }
+
+    @Test
+    public void testFixedLengthNegativeLength()
+    {
+        byte[] bytes = new byte[]{ 1, 2, 3 };
+
+        expectedException.expect(IllegalArgumentException.class);
+        ByteSource.fixedLength(bytes, 0, -1);
+    }
+
+    @Test
+    public void testFixedLengthNegativeOffset()
+    {
+        byte[] bytes = new byte[]{ 1, 2, 3 };
+
+        expectedException.expect(IllegalArgumentException.class);
+        ByteSource.fixedLength(bytes, -1, 1);
+    }
+
+    @Test
+    public void testFixedLengthOutOfBounds()
+    {
+        byte[] bytes = new byte[]{ 1, 2, 3 };
+
+        expectedException.expect(IllegalArgumentException.class);
+        ByteSource.fixedLength(bytes, 0, 4);
+    }
+
+    @Test
+    public void testFixedOffsetOutOfBounds()
+    {
+        byte[] bytes = new byte[]{ 1, 2, 3 };
+
+        expectedException.expect(IllegalArgumentException.class);
+        ByteSource.fixedLength(bytes, 4, 1);
+    }
+
+    @Test
+    public void testSeparatorGT()
+    {
+        testSeparator(ByteComparable::separatorGt, testLongs, LongType.instance);
+    }
+
+    @Test
+    public void testSeparatorPrefix()
+    {
+        testSeparator(ByteComparable::separatorPrefix, testLongs, LongType.instance);
+    }
+
+    @Test
+    public void testSeparatorPrefixViaDiffPoint()
+    {
+        testSeparator((x, y) -> version -> ByteSource.cut(y.asComparableBytes(version),
+                                                          ByteComparable.diffPoint(x, y, version)),
+                      testLongs,
+                      LongType.instance);
+    }
+    @Test
+    public void testSeparatorNext()
+    {
+        // Appending a 00 byte at the end gives the immediate next possible value after x.
+        testSeparator((x, y) -> version -> ByteSource.cutOrRightPad(x.asComparableBytes(version),
+                                                                    ByteComparable.length(x, version) + 1,
+                                                                    0),
+                      testLongs,
+                      LongType.instance);
+    }
+
+    private <T> void testSeparator(BiFunction<ByteComparable, ByteComparable, ByteComparable> separatorMethod, T[] testValues, AbstractType<T> type)
+    {
+        for (T v1 : testValues)
+            for (T v2 : testValues)
+            {
+                if (v1 == null || v2 == null)
+                    continue;
+                if (type.compare(type.decompose(v1), type.decompose(v2)) >= 0)
+                    continue;
+                ByteComparable bc1 = getByteComparable(type, v1);
+                ByteComparable bc2 = getByteComparable(type, v2);
+                ByteComparable separator = separatorMethod.apply(bc1, bc2);
+
+                for (Version version : Version.values())
+                {
+                    Assert.assertTrue("Sanity check failed", ByteComparable.compare(bc1, bc2, version) < 0);
+                    Assert.assertTrue(String.format("Separator %s must be greater than left %s (for %s) (version %s)",
+                                                    separator.byteComparableAsString(version),
+                                                    bc1.byteComparableAsString(version),
+                                                    v1,
+                                                    version),
+                                      ByteComparable.compare(bc1, separator, version) < 0);
+                    Assert.assertTrue(String.format("Separator %s must be less than or equal to right %s (for %s) (version %s)",
+                                                    separator.byteComparableAsString(version),
+                                                    bc2.byteComparableAsString(version),
+                                                    v2,
+                                                    version),
+                                      ByteComparable.compare(separator, bc2, version) <= 0);
+                }
+            }
+    }
+
+    private <T> ByteComparable getByteComparable(AbstractType<T> type, T v1)
+    {
+        return version -> type.asComparableBytes(type.decompose(v1), version);
+    }
+
+    public void testDecoratedKeyPrefixes(Version version)
+    {
+        testDecoratedKeyPrefixes("012345678BCDE\0", "", version);
+        testDecoratedKeyPrefixes("012345678ABCDE\0", "ABC", version);
+        testDecoratedKeyPrefixes("0123456789ABCDE\0", "\0AB", version);
+        testDecoratedKeyPrefixes("0123456789ABCDEF\0", "\0", version);
+
+        testDecoratedKeyPrefixes("0123456789ABCDEF0", "ABC", version);
+        testDecoratedKeyPrefixes("0123456789ABCDEF", "", version);
+        testDecoratedKeyPrefixes("0123456789ABCDE", "", version);
+        testDecoratedKeyPrefixes("0123456789ABCD", "\0AB", version);
+        testDecoratedKeyPrefixes("0123456789ABC", "\0", version);
+
+    }
+
+    public void testDecoratedKeyPrefixes(String key, String append, Version version)
+    {
+        logger.info("Testing {} + {}", safeStr(key), safeStr(append));
+        IPartitioner partitioner = Murmur3Partitioner.instance;
+        ByteBuffer original = ByteBufferUtil.bytes(key);
+        ByteBuffer collision = Util.generateMurmurCollision(original, append.getBytes(StandardCharsets.UTF_8));
+
+        long[] hash = new long[2];
+        MurmurHash.hash3_x64_128(original, 0, original.limit(), 0, hash);
+        logger.info(String.format("Original hash  %016x,%016x", hash[0], hash[1]));
+        MurmurHash.hash3_x64_128(collision, 0, collision.limit(), 0, hash);
+        logger.info(String.format("Collision hash %016x,%016x", hash[0], hash[1]));
+
+        DecoratedKey kk1 = partitioner.decorateKey(original);
+        DecoratedKey kk2 = partitioner.decorateKey(collision);
+        logger.info("{}\n{}\n{}\n{}", kk1, kk2, kk1.byteComparableAsString(version), kk2.byteComparableAsString(version));
+
+        final ByteSource s1 = kk1.asComparableBytes(version);
+        final ByteSource s2 = kk2.asComparableBytes(version);
+        logger.info("{}\n{}", s1, s2);
+
+        // Check that the representations compare correctly
+        Assert.assertEquals(Long.signum(kk1.compareTo(kk2)), ByteComparable.compare(kk1, kk2, version));
+        // s1 must not be a prefix of s2
+        assertNotPrefix(s1, s2);
+    }
+
+    private void assertNotPrefix(ByteSource s1, ByteSource s2)
+    {
+        int c1, c2;
+        do
+        {
+            c1 = s1.next();
+            c2 = s2.next();
+        }
+        while (c1 == c2 && c1 != ByteSource.END_OF_STREAM);
+
+        // Equal is ok
+        if (c1 == c2)
+            return;
+
+        Assert.assertNotEquals("ByteComparable is a prefix of other", ByteSource.END_OF_STREAM, c1);
+        Assert.assertNotEquals("ByteComparable is a prefix of other", ByteSource.END_OF_STREAM, c2);
+    }
+
+    private int compare(ByteSource s1, ByteSource s2)
+    {
+        int c1, c2;
+        do
+        {
+            c1 = s1.next();
+            c2 = s2.next();
+        }
+        while (c1 == c2 && c1 != ByteSource.END_OF_STREAM);
+
+        return Integer.compare(c1, c2);
+    }
+
+    private void maybeAssertNotPrefix(ByteComparable s1, ByteComparable s2, Version version)
+    {
+        if (version == Version.OSS42)
+            assertNotPrefix(s1.asComparableBytes(version), s2.asComparableBytes(version));
+    }
+
+    private void maybeCheck41Properties(int expectedComparison, ByteComparable s1, ByteComparable s2, Version version)
+    {
+        if (version != Version.OSS42)
+            return;
+
+        if (s1 == null || s2 == null || 0 == expectedComparison)
+            return;
+        int b1 = randomTerminator();
+        int b2 = randomTerminator();
+        assertEquals(String.format("Comparison failed for %s(%s + %02x) and %s(%s + %02x)", s1, s1.byteComparableAsString(version), b1, s2, s2.byteComparableAsString(version), b2),
+                expectedComparison, Integer.signum(compare(ByteSource.withTerminator(b1, s1.asComparableBytes(version)), ByteSource.withTerminator(b2, s2.asComparableBytes(version)))));
+        assertNotPrefix(ByteSource.withTerminator(b1, s1.asComparableBytes(version)), ByteSource.withTerminator(b2, s2.asComparableBytes(version)));
+    }
+
+    private int randomTerminator()
+    {
+        int term;
+        do
+        {
+            term = ThreadLocalRandom.current().nextInt(ByteSource.MIN_SEPARATOR, ByteSource.MAX_SEPARATOR + 1);
+        }
+        while (term >= ByteSource.MIN_NEXT_COMPONENT && term <= ByteSource.MAX_NEXT_COMPONENT);
+        return term;
+    }
+
+    <K, V, M extends Map<K, V>> void testMap(MapType<K, V> tt, K[] keys, V[] values, Supplier<M> gen, Random rand)
+    {
+        List<M> tests = new ArrayList<>();
+        tests.add(gen.get());
+        for (int c = 1; c <= 3; ++c)
+            for (int j = 0; j < 5; ++j)
+            {
+                M l = gen.get();
+                for (int i = 0; i < c; ++i)
+                    l.put(keys[rand.nextInt(keys.length)], values[rand.nextInt(values.length)]);
+
+                tests.add(l);
+            }
+        testType(tt, tests);
+    }
+
+    /*
+     * Convert type to a comparable.
+     */
+    private ByteComparable typeToComparable(AbstractType<?> type, ByteBuffer value)
+    {
+        return new ByteComparable()
+        {
+            @Override
+            public ByteSource asComparableBytes(Version v)
+            {
+                return type.asComparableBytes(value, v);
+            }
+
+            @Override
+            public String toString()
+            {
+                return type.getString(value);
+            }
+        };
+    }
+
+    public <T> void testType(AbstractType<T> type, Object[] values)
+    {
+        testType(type, Iterables.transform(Arrays.asList(values), x -> (T) x));
+    }
+
+    public <T> void testType(AbstractType<? super T> type, Iterable<T> values)
+    {
+        for (T i : values) {
+            ByteBuffer b = decomposeAndRandomPad(type, i);
+            logger.info("Value {} ({}) bytes {} ByteSource {}",
+                              safeStr(i),
+                              safeStr(type.getSerializer().toCQLLiteral(b)),
+                              safeStr(ByteBufferUtil.bytesToHex(b)),
+                              typeToComparable(type, b).byteComparableAsString(Version.OSS42));
+        }
+        for (T i : values)
+            for (T j : values)
+                assertComparesSame(type, i, j);
+        if (!type.isReversed())
+            testType(ReversedType.getInstance(type), values);
+    }
+
+    public void testBuffers(AbstractType<?> type, List<ByteBuffer> values)
+    {
+        try
+        {
+            for (ByteBuffer b : values) {
+                logger.info("Value {} bytes {} ByteSource {}",
+                            safeStr(type.getSerializer().toCQLLiteral(b)),
+                            safeStr(ByteBufferUtil.bytesToHex(b)),
+                            typeToComparable(type, b).byteComparableAsString(Version.OSS42));
+            }
+        }
+        catch (UnsupportedOperationException e)
+        {
+            // Continue without listing values.
+        }
+
+        for (ByteBuffer i : values)
+            for (ByteBuffer j : values)
+                assertComparesSameBuffers(type, i, j);
+    }
+
+    void assertComparesSameBuffers(AbstractType<?> type, ByteBuffer b1, ByteBuffer b2)
+    {
+        int expected = Integer.signum(type.compare(b1, b2));
+        final ByteComparable bs1 = typeToComparable(type, b1);
+        final ByteComparable bs2 = typeToComparable(type, b2);
+
+        for (Version version : Version.values())
+        {
+            int actual = Integer.signum(ByteComparable.compare(bs1, bs2, version));
+            assertEquals(String.format("Failed comparing %s(%s) and %s(%s)", ByteBufferUtil.bytesToHex(b1), bs1.byteComparableAsString(version), ByteBufferUtil.bytesToHex(b2), bs2.byteComparableAsString(version)),
+                         expected,
+                         actual);
+            maybeCheck41Properties(expected, bs1, bs2, version);
+        }
+    }
+
+    public void testDecoratedKeys(IPartitioner type, List<ByteBuffer> values)
+    {
+        for (ByteBuffer i : values)
+            for (ByteBuffer j : values)
+                assertComparesSameDecoratedKeys(type, i, j);
+        for (ByteBuffer i : values)
+            assertDecoratedKeyBounds(type, i);
+    }
+
+    void assertComparesSameDecoratedKeys(IPartitioner type, ByteBuffer b1, ByteBuffer b2)
+    {
+        DecoratedKey k1 = type.decorateKey(b1);
+        DecoratedKey k2 = type.decorateKey(b2);
+        int expected = Integer.signum(k1.compareTo(k2));
+
+        for (Version version : Version.values())
+        {
+            int actual = Integer.signum(ByteComparable.compare(k1, k2, version));
+            assertEquals(String.format("Failed comparing %s[%s](%s) and %s[%s](%s)\npartitioner %s version %s",
+                                       ByteBufferUtil.bytesToHex(b1),
+                                       k1,
+                                       k1.byteComparableAsString(version),
+                                       ByteBufferUtil.bytesToHex(b2),
+                                       k2,
+                                       k2.byteComparableAsString(version),
+                                       type,
+                                       version),
+                         expected,
+                         actual);
+            maybeAssertNotPrefix(k1, k2, version);
+        }
+    }
+
+    void assertDecoratedKeyBounds(IPartitioner type, ByteBuffer b)
+    {
+        Version version = Version.OSS42;
+        DecoratedKey k = type.decorateKey(b);
+        final ByteComparable after = k.asComparableBound(false);
+        final ByteComparable before = k.asComparableBound(true);
+
+        int actual = Integer.signum(ByteComparable.compare(k, before, version));
+        assertEquals(String.format("Failed comparing bound before (%s) for %s[%s](%s)\npartitioner %s version %s",
+                                   before.byteComparableAsString(version),
+                                   ByteBufferUtil.bytesToHex(b),
+                                   k,
+                                   k.byteComparableAsString(version),
+                                   type,
+                                   version),
+                     1,
+                     actual);
+        maybeAssertNotPrefix(k, before, version);
+
+        actual = Integer.signum(ByteComparable.compare(k, after, version));
+        assertEquals(String.format("Failed comparing bound after (%s) for %s[%s](%s)\npartitioner %s version %s",
+                                   after.byteComparableAsString(version),
+                                   ByteBufferUtil.bytesToHex(b),
+                                   k,
+                                   k.byteComparableAsString(version),
+                                   type,
+                                   version),
+                     -1,
+                     actual);
+        maybeAssertNotPrefix(k, after, version);
+
+        actual = Integer.signum(ByteComparable.compare(before, after, version));
+        assertEquals(String.format("Failed comparing bound before (%s) to after (%s) for %s[%s](%s)\npartitioner %s version %s",
+                                   before.byteComparableAsString(version),
+                                   after.byteComparableAsString(version),
+                                   ByteBufferUtil.bytesToHex(b),
+                                   k,
+                                   k.byteComparableAsString(version),
+                                   type,
+                                   version),
+                     -1,
+                     actual);
+        maybeAssertNotPrefix(after, before, version);
+    }
+
+    static Object safeStr(Object i)
+    {
+        if (i == null)
+            return null;
+        String s = i.toString();
+        if (s.length() > 100)
+            s = s.substring(0, 100) + "...";
+        return s.replaceAll("\0", "<0>");
+    }
+
+    public <T> void testDirect(Function<T, ByteSource> convertor, BiFunction<T, T, Integer> comparator, T[] values)
+    {
+        for (T i : values) {
+            if (i == null)
+                continue;
+
+            logger.info("Value {} ByteSource {}\n",
+                              safeStr(i),
+                              convertor.apply(i));
+        }
+        for (T i : values)
+            if (i != null)
+                for (T j : values)
+                    if (j != null)
+                        assertComparesSame(convertor, comparator, i, j);
+    }
+
+    <T> void assertComparesSame(Function<T, ByteSource> convertor, BiFunction<T, T, Integer> comparator, T v1, T v2)
+    {
+        ByteComparable b1 = v -> convertor.apply(v1);
+        ByteComparable b2 = v -> convertor.apply(v2);
+        int expected = Integer.signum(comparator.apply(v1, v2));
+        int actual = Integer.signum(ByteComparable.compare(b1, b2, null));  // version ignored above
+        assertEquals(String.format("Failed comparing %s and %s", v1, v2), expected, actual);
+    }
+
+    <T> void assertComparesSame(AbstractType<T> type, T v1, T v2)
+    {
+        ByteBuffer b1 = decomposeAndRandomPad(type, v1);
+        ByteBuffer b2 = decomposeAndRandomPad(type, v2);
+        int expected = Integer.signum(type.compare(b1, b2));
+        final ByteComparable bc1 = typeToComparable(type, b1);
+        final ByteComparable bc2 = typeToComparable(type, b2);
+
+        for (Version version : Version.values())
+        {
+            int actual = Integer.signum(ByteComparable.compare(bc1, bc2, version));
+            if (expected != actual)
+            {
+                if (type.isReversed())
+                {
+                    // This can happen for reverse of nulls and prefixes. Check that it's ok within multi-component
+                    ClusteringComparator cc = new ClusteringComparator(type);
+                    ByteComparable c1 = cc.asByteComparable(Clustering.make(b1));
+                    ByteComparable c2 = cc.asByteComparable(Clustering.make(b2));
+                    int actualcc = Integer.signum(ByteComparable.compare(c1, c2, version));
+                    if (actualcc == expected)
+                        return;
+                    assertEquals(String.format("Failed comparing reversed %s(%s, %s) and %s(%s, %s) direct (%d) and as clustering", safeStr(v1), ByteBufferUtil.bytesToHex(b1), c1, safeStr(v2), ByteBufferUtil.bytesToHex(b2), c2, actual), expected, actualcc);
+                }
+                else
+                    assertEquals(String.format("Failed comparing %s(%s BC %s) and %s(%s BC %s) version %s",
+                                               safeStr(v1),
+                                               ByteBufferUtil.bytesToHex(b1),
+                                               bc1.byteComparableAsString(version),
+                                               safeStr(v2),
+                                               ByteBufferUtil.bytesToHex(b2),
+                                               bc2.byteComparableAsString(version),
+                                               version),
+                                 expected,
+                                 actual);
+            }
+            maybeCheck41Properties(expected, bc1, bc2, version);
+        }
+    }
+
+    <T> ByteBuffer decomposeAndRandomPad(AbstractType<T> type, T v)
+    {
+        ByteBuffer b = type.decompose(v);
+        Random rand = new Random(0);
+        int padBefore = rand.nextInt(16);
+        int padAfter = rand.nextInt(16);
+        int paddedCapacity = b.remaining() + padBefore + padAfter;
+        ByteBuffer padded = allocateBuffer(paddedCapacity);
+        rand.ints(padBefore).forEach(x -> padded.put((byte) x));
+        padded.put(b.duplicate());
+        rand.ints(padAfter).forEach(x -> padded.put((byte) x));
+        padded.clear().limit(padded.capacity() - padAfter).position(padBefore);
+        return padded;
+    }
+
+    protected ByteBuffer allocateBuffer(int paddedCapacity)
+    {
+        return ByteBuffer.allocate(paddedCapacity);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceConversionTest.java b/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceConversionTest.java
new file mode 100644
index 0000000..5a59ddf
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceConversionTest.java
@@ -0,0 +1,784 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils.bytecomparable;
+
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.*;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.function.Supplier;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.BufferDecoratedKey;
+import org.apache.cassandra.db.ClusteringComparator;
+import org.apache.cassandra.db.ClusteringPrefix;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.marshal.*;
+import org.apache.cassandra.dht.ByteOrderedPartitioner;
+import org.apache.cassandra.dht.IPartitioner;
+import org.apache.cassandra.dht.LocalPartitioner;
+import org.apache.cassandra.dht.Murmur3Partitioner;
+import org.apache.cassandra.dht.RandomPartitioner;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.TimeUUID;
+import org.apache.cassandra.utils.bytecomparable.ByteComparable.Version;
+
+import static org.apache.cassandra.utils.bytecomparable.ByteSourceComparisonTest.decomposeForTuple;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Tests that the result of forward + backward ByteSource translation is the same as the original.
+ */
+public class ByteSourceConversionTest extends ByteSourceTestBase
+{
+    private final static Logger logger = LoggerFactory.getLogger(ByteSourceConversionTest.class);
+    public static final Version VERSION = Version.OSS42;
+
+    @Rule
+    public final ExpectedException expectedException = ExpectedException.none();
+
+    @Test
+    public void testStringsAscii()
+    {
+        testType(AsciiType.instance, Arrays.stream(testStrings)
+                                           .filter(s -> s.equals(new String(s.getBytes(StandardCharsets.US_ASCII),
+                                                                            StandardCharsets.US_ASCII)))
+                                           .toArray());
+    }
+
+    @Test
+    public void testStringsUTF8()
+    {
+        testType(UTF8Type.instance, testStrings);
+        testDirect(x -> ByteSource.of(x, VERSION), ByteSourceInverse::getString, testStrings);
+    }
+
+    @Test
+    public void testBooleans()
+    {
+        testType(BooleanType.instance, testBools);
+    }
+
+    @Test
+    public void testInts()
+    {
+        testType(Int32Type.instance, testInts);
+        testDirect(ByteSource::of, ByteSourceInverse::getSignedInt, testInts);
+    }
+
+    @Test
+    public void randomTestInts()
+    {
+        Random rand = new Random();
+        for (int i=0; i<10000; ++i)
+        {
+            int i1 = rand.nextInt();
+            assertConvertsSame(Int32Type.instance, i1);
+        }
+
+    }
+
+    @Test
+    public void testLongs()
+    {
+        testType(LongType.instance, testLongs);
+        testDirect(ByteSource::of, ByteSourceInverse::getSignedLong, testLongs);
+    }
+
+    @Test
+    public void testShorts()
+    {
+        testType(ShortType.instance, testShorts);
+    }
+
+    @Test
+    public void testBytes()
+    {
+        testType(ByteType.instance, testBytes);
+    }
+
+    @Test
+    public void testDoubles()
+    {
+        testType(DoubleType.instance, testDoubles);
+    }
+
+    @Test
+    public void testFloats()
+    {
+        testType(FloatType.instance, testFloats);
+    }
+
+    @Test
+    public void testBigInts()
+    {
+        testType(IntegerType.instance, testBigInts);
+    }
+
+    @Test
+    public void testBigDecimals()
+    {
+        testTypeBuffers(DecimalType.instance, testBigDecimals);
+    }
+
+    @Test
+    public void testUUIDs()
+    {
+        testType(UUIDType.instance, testUUIDs);
+    }
+
+    @Test
+    public void testTimeUUIDs()
+    {
+        testType(TimeUUIDType.instance, Arrays.stream(testUUIDs)
+                                              .filter(x -> x == null || x.version() == 1)
+                                              .map(x -> x != null ? TimeUUID.fromUuid(x) : null)
+                                              .toArray());
+    }
+
+    @Test
+    public void testLexicalUUIDs()
+    {
+        testType(LexicalUUIDType.instance, testUUIDs);
+    }
+
+    @Test
+    public void testSimpleDate()
+    {
+        testType(SimpleDateType.instance, Arrays.stream(testInts).filter(x -> x != null).toArray());
+    }
+
+    @Test
+    public void testTimeType()
+    {
+        testType(TimeType.instance, Arrays.stream(testLongs).filter(x -> x != null && x >= 0 && x <= 24L * 60 * 60 * 1000 * 1000 * 1000).toArray());
+    }
+
+    @SuppressWarnings("deprecation")
+    @Test
+    public void testDateType()
+    {
+        testType(DateType.instance, testDates);
+    }
+
+    @Test
+    public void testTimestampType()
+    {
+        testType(TimestampType.instance, testDates);
+    }
+
+    @Test
+    public void testBytesType()
+    {
+        List<ByteBuffer> values = new ArrayList<>();
+        for (int i = 0; i < testValues.length; ++i)
+            for (Object o : testValues[i])
+                values.add(testTypes[i].decompose(o));
+
+        testType(BytesType.instance, values);
+    }
+
+    @Test
+    public void testInetAddressType() throws UnknownHostException
+    {
+        testType(InetAddressType.instance, testInets);
+    }
+
+    @Test
+    public void testEmptyType()
+    {
+        testType(EmptyType.instance, new Void[] { null });
+    }
+
+    @Test
+    public void testPatitionerDefinedOrder()
+    {
+        List<ByteBuffer> values = new ArrayList<>();
+        for (int i = 0; i < testValues.length; ++i)
+            for (Object o : testValues[i])
+                values.add(testTypes[i].decompose(o));
+
+        testBuffers(new PartitionerDefinedOrder(Murmur3Partitioner.instance), values);
+        testBuffers(new PartitionerDefinedOrder(RandomPartitioner.instance), values);
+        testBuffers(new PartitionerDefinedOrder(ByteOrderedPartitioner.instance), values);
+    }
+
+    @Test
+    public void testPatitionerOrder()
+    {
+        List<ByteBuffer> values = new ArrayList<>();
+        for (int i = 0; i < testValues.length; ++i)
+            for (Object o : testValues[i])
+                values.add(testTypes[i].decompose(o));
+
+        testDecoratedKeys(Murmur3Partitioner.instance, values);
+        testDecoratedKeys(RandomPartitioner.instance, values);
+        testDecoratedKeys(ByteOrderedPartitioner.instance, values);
+    }
+
+    @Test
+    public void testLocalPatitionerOrder()
+    {
+        for (int i = 0; i < testValues.length; ++i)
+        {
+            final AbstractType testType = testTypes[i];
+            testDecoratedKeys(new LocalPartitioner(testType), Lists.transform(Arrays.asList(testValues[i]),
+                                                                                            v -> testType.decompose(v)));
+        }
+    }
+
+    interface PairTester
+    {
+        void test(AbstractType t1, AbstractType t2, Object o1, Object o2);
+    }
+
+    void testCombinationSampling(Random rand, PairTester tester)
+    {
+        for (int i=0;i<testTypes.length;++i)
+            for (int j=0;j<testTypes.length;++j)
+            {
+                Object[] tv1 = new Object[3];
+                Object[] tv2 = new Object[3];
+                for (int t=0; t<tv1.length; ++t)
+                {
+                    tv1[t] = testValues[i][rand.nextInt(testValues[i].length)];
+                    tv2[t] = testValues[j][rand.nextInt(testValues[j].length)];
+                }
+
+                for (Object o1 : tv1)
+                    for (Object o2 : tv2)
+
+                {
+                    tester.test(testTypes[i], testTypes[j], o1, o2);
+                }
+            }
+    }
+
+    @Test
+    public void testCombinations()
+    {
+        Random rand = new Random(0);
+        testCombinationSampling(rand, this::assertClusteringPairConvertsSame);
+    }
+
+    @Test
+    public void testNullsInClustering()
+    {
+        ByteBuffer[][] inputs = new ByteBuffer[][]
+                                {
+                                new ByteBuffer[] {decomposeAndRandomPad(UTF8Type.instance, "a"),
+                                                  decomposeAndRandomPad(Int32Type.instance, 0)},
+                                new ByteBuffer[] {decomposeAndRandomPad(UTF8Type.instance, "a"),
+                                                  decomposeAndRandomPad(Int32Type.instance, null)},
+                                new ByteBuffer[] {decomposeAndRandomPad(UTF8Type.instance, "a"),
+                                                  null},
+                                new ByteBuffer[] {decomposeAndRandomPad(UTF8Type.instance, ""),
+                                                  decomposeAndRandomPad(Int32Type.instance, 0)},
+                                new ByteBuffer[] {decomposeAndRandomPad(UTF8Type.instance, ""),
+                                                  decomposeAndRandomPad(Int32Type.instance, null)},
+                                new ByteBuffer[] {decomposeAndRandomPad(UTF8Type.instance, ""),
+                                                  null},
+                                new ByteBuffer[] {null,
+                                                  decomposeAndRandomPad(Int32Type.instance, 0)},
+                                new ByteBuffer[] {null,
+                                                  decomposeAndRandomPad(Int32Type.instance, null)},
+                                new ByteBuffer[] {null,
+                                                  null},
+                                };
+        for (ByteBuffer[] input : inputs)
+        {
+            assertClusteringPairConvertsSame(ByteBufferAccessor.instance,
+                                             UTF8Type.instance,
+                                             Int32Type.instance,
+                                             input[0],
+                                             input[1],
+                                             (t, v) -> (ByteBuffer) v);
+        }
+    }
+
+    @Test
+    public void testEmptyClustering()
+    {
+        ValueAccessor<ByteBuffer> accessor = ByteBufferAccessor.instance;
+        ClusteringComparator comp = new ClusteringComparator();
+        for (ClusteringPrefix.Kind kind : ClusteringPrefix.Kind.values())
+        {
+            if (kind.isBoundary())
+                continue;
+
+            ClusteringPrefix<ByteBuffer> empty = ByteSourceComparisonTest.makeBound(kind);
+            ClusteringPrefix<ByteBuffer> converted = getClusteringPrefix(accessor, kind, comp, comp.asByteComparable(empty));
+            assertEquals(empty, converted);
+        }
+    }
+
+    void assertClusteringPairConvertsSame(AbstractType t1, AbstractType t2, Object o1, Object o2)
+    {
+        for (ValueAccessor<?> accessor : ValueAccessors.ACCESSORS)
+            assertClusteringPairConvertsSame(accessor, t1, t2, o1, o2, AbstractType::decompose);
+    }
+
+    <V> void assertClusteringPairConvertsSame(ValueAccessor<V> accessor,
+                                              AbstractType<?> t1, AbstractType<?> t2,
+                                              Object o1, Object o2,
+                                              BiFunction<AbstractType, Object, ByteBuffer> decompose)
+    {
+        boolean checkEquals = t1 != DecimalType.instance && t2 != DecimalType.instance;
+        for (ClusteringPrefix.Kind k1 : ClusteringPrefix.Kind.values())
+            {
+                ClusteringComparator comp = new ClusteringComparator(t1, t2);
+                V[] b = accessor.createArray(2);
+                b[0] = accessor.valueOf(decompose.apply(t1, o1));
+                b[1] = accessor.valueOf(decompose.apply(t2, o2));
+                ClusteringPrefix<V> c = ByteSourceComparisonTest.makeBound(accessor.factory(), k1, b);
+                final ByteComparable bsc = comp.asByteComparable(c);
+                logger.info("Clustering {} bytesource {}", c.clusteringString(comp.subtypes()), bsc.byteComparableAsString(VERSION));
+                ClusteringPrefix<V> converted = getClusteringPrefix(accessor, k1, comp, bsc);
+                assertEquals(String.format("Failed compare(%s, converted %s ByteSource %s) == 0\ntype %s",
+                                           safeStr(c.clusteringString(comp.subtypes())),
+                                           safeStr(converted.clusteringString(comp.subtypes())),
+                                           bsc.byteComparableAsString(VERSION),
+                                           comp),
+                             0, comp.compare(c, converted));
+                if (checkEquals)
+                    assertEquals(String.format("Failed equals %s, got %s ByteSource %s\ntype %s",
+                                               safeStr(c.clusteringString(comp.subtypes())),
+                                               safeStr(converted.clusteringString(comp.subtypes())),
+                                               bsc.byteComparableAsString(VERSION),
+                                               comp),
+                                 c, converted);
+
+                ClusteringComparator compR = new ClusteringComparator(ReversedType.getInstance(t1), ReversedType.getInstance(t2));
+                final ByteComparable bsrc = compR.asByteComparable(c);
+                converted = getClusteringPrefix(accessor, k1, compR, bsrc);
+                assertEquals(String.format("Failed reverse compare(%s, converted %s ByteSource %s) == 0\ntype %s",
+                                           safeStr(c.clusteringString(compR.subtypes())),
+                                           safeStr(converted.clusteringString(compR.subtypes())),
+                                           bsrc.byteComparableAsString(VERSION),
+                                           compR),
+                             0, compR.compare(c, converted));
+                if (checkEquals)
+                    assertEquals(String.format("Failed reverse equals %s, got %s ByteSource %s\ntype %s",
+                                               safeStr(c.clusteringString(compR.subtypes())),
+                                               safeStr(converted.clusteringString(compR.subtypes())),
+                                               bsrc.byteComparableAsString(VERSION),
+                                               compR),
+                                 c, converted);
+            }
+    }
+
+    private static <V> ClusteringPrefix<V> getClusteringPrefix(ValueAccessor<V> accessor,
+                                                               ClusteringPrefix.Kind k1,
+                                                               ClusteringComparator comp,
+                                                               ByteComparable bsc)
+    {
+        switch (k1)
+        {
+        case STATIC_CLUSTERING:
+        case CLUSTERING:
+            return comp.clusteringFromByteComparable(accessor, bsc);
+        case EXCL_END_BOUND:
+        case INCL_END_BOUND:
+            return comp.boundFromByteComparable(accessor, bsc, true);
+        case INCL_START_BOUND:
+        case EXCL_START_BOUND:
+            return comp.boundFromByteComparable(accessor, bsc, false);
+        case EXCL_END_INCL_START_BOUNDARY:
+        case INCL_END_EXCL_START_BOUNDARY:
+            return comp.boundaryFromByteComparable(accessor, bsc);
+        default:
+            throw new AssertionError();
+        }
+    }
+
+    private static ByteSource.Peekable source(ByteComparable bsc)
+    {
+        if (bsc == null)
+            return null;
+        return ByteSource.peekable(bsc.asComparableBytes(VERSION));
+    }
+
+    @Test
+    public void testTupleType()
+    {
+        Random rand = ThreadLocalRandom.current();
+        testCombinationSampling(rand, this::assertTupleConvertsSame);
+    }
+
+    @Test
+    public void testTupleTypeNonFull()
+    {
+        TupleType tt = new TupleType(ImmutableList.of(UTF8Type.instance, Int32Type.instance));
+        List<ByteBuffer> tests = ImmutableList.of
+            (
+            TupleType.buildValue(ByteBufferAccessor.instance,
+                                 decomposeAndRandomPad(UTF8Type.instance, ""),
+                                 decomposeAndRandomPad(Int32Type.instance, 0)),
+            // Note: a decomposed null (e.g. decomposeAndRandomPad(Int32Type.instance, null)) should not reach a tuple
+            TupleType.buildValue(ByteBufferAccessor.instance,
+                                 decomposeAndRandomPad(UTF8Type.instance, ""),
+                                 null),
+            TupleType.buildValue(ByteBufferAccessor.instance,
+                                 null,
+                                 decomposeAndRandomPad(Int32Type.instance, 0)),
+            TupleType.buildValue(ByteBufferAccessor.instance, decomposeAndRandomPad(UTF8Type.instance, "")),
+            TupleType.buildValue(ByteBufferAccessor.instance, (ByteBuffer) null),
+            TupleType.buildValue(ByteBufferAccessor.instance)
+            );
+        testBuffers(tt, tests);
+    }
+
+    void assertTupleConvertsSame(AbstractType t1, AbstractType t2, Object o1, Object o2)
+    {
+        TupleType tt = new TupleType(ImmutableList.of(t1, t2));
+        ByteBuffer b1 = TupleType.buildValue(ByteBufferAccessor.instance,
+                                             decomposeForTuple(t1, o1),
+                                             decomposeForTuple(t2, o2));
+        assertConvertsSameBuffers(tt, b1);
+    }
+
+    @Test
+    public void testCompositeType()
+    {
+        Random rand = new Random(0);
+        testCombinationSampling(rand, this::assertCompositeConvertsSame);
+    }
+
+    @Test
+    public void testCompositeTypeNonFull()
+    {
+        CompositeType tt = CompositeType.getInstance(UTF8Type.instance, Int32Type.instance);
+        List<ByteBuffer> tests = ImmutableList.of
+            (
+            CompositeType.build(ByteBufferAccessor.instance, decomposeAndRandomPad(UTF8Type.instance, ""), decomposeAndRandomPad(Int32Type.instance, 0)),
+            CompositeType.build(ByteBufferAccessor.instance, decomposeAndRandomPad(UTF8Type.instance, ""), decomposeAndRandomPad(Int32Type.instance, null)),
+            CompositeType.build(ByteBufferAccessor.instance, decomposeAndRandomPad(UTF8Type.instance, "")),
+            CompositeType.build(ByteBufferAccessor.instance),
+            CompositeType.build(ByteBufferAccessor.instance, true, decomposeAndRandomPad(UTF8Type.instance, "")),
+            CompositeType.build(ByteBufferAccessor.instance,true)
+            );
+        for (ByteBuffer b : tests)
+            tt.validate(b);
+        testBuffers(tt, tests);
+    }
+
+    void assertCompositeConvertsSame(AbstractType t1, AbstractType t2, Object o1, Object o2)
+    {
+        CompositeType tt = CompositeType.getInstance(t1, t2);
+        ByteBuffer b1 = CompositeType.build(ByteBufferAccessor.instance, decomposeAndRandomPad(t1, o1), decomposeAndRandomPad(t2, o2));
+        assertConvertsSameBuffers(tt, b1);
+    }
+
+    @Test
+    public void testDynamicComposite()
+    {
+        DynamicCompositeType tt = DynamicCompositeType.getInstance(DynamicCompositeTypeTest.aliases);
+        UUID[] uuids = DynamicCompositeTypeTest.uuids;
+        List<ByteBuffer> tests = ImmutableList.of
+            (
+            DynamicCompositeTypeTest.createDynamicCompositeKey("test1", null, -1, false, true),
+            DynamicCompositeTypeTest.createDynamicCompositeKey("test1", uuids[0], 24, false, true),
+            DynamicCompositeTypeTest.createDynamicCompositeKey("test1", uuids[0], 42, false, true),
+            DynamicCompositeTypeTest.createDynamicCompositeKey("test2", uuids[0], -1, false, true),
+            DynamicCompositeTypeTest.createDynamicCompositeKey("test2", uuids[1], 42, false, true)
+            );
+        for (ByteBuffer b : tests)
+            tt.validate(b);
+        testBuffers(tt, tests);
+    }
+
+    @Test
+    public void testListTypeString()
+    {
+        testCollection(ListType.getInstance(UTF8Type.instance, true), testStrings, () -> new ArrayList<>(), new Random());
+    }
+
+    @Test
+    public void testListTypeLong()
+    {
+        testCollection(ListType.getInstance(LongType.instance, true), testLongs, () -> new ArrayList<>(), new Random());
+    }
+
+    @Test
+    public void testSetTypeString()
+    {
+        testCollection(SetType.getInstance(UTF8Type.instance, true), testStrings, () -> new HashSet<>(), new Random());
+    }
+
+    @Test
+    public void testSetTypeLong()
+    {
+        testCollection(SetType.getInstance(LongType.instance, true), testLongs, () -> new HashSet<>(), new Random());
+    }
+
+    <T, CT extends Collection<T>> void testCollection(CollectionType<CT> tt, T[] values, Supplier<CT> gen, Random rand)
+    {
+        int cnt = 0;
+        List<CT> tests = new ArrayList<>();
+        tests.add(gen.get());
+        for (int c = 1; c <= 3; ++c)
+            for (int j = 0; j < 5; ++j)
+            {
+                CT l = gen.get();
+                for (int i = 0; i < c; ++i)
+                {
+                    T value = values[cnt++ % values.length];
+                    if (value != null)
+                        l.add(value);
+                }
+
+                tests.add(l);
+            }
+        testType(tt, tests);
+    }
+
+    @Test
+    public void testMapTypeStringLong()
+    {
+        testMap(MapType.getInstance(UTF8Type.instance, LongType.instance, true), testStrings, testLongs, () -> new HashMap<>(), new Random());
+    }
+
+    @Test
+    public void testMapTypeStringLongTree()
+    {
+        testMap(MapType.getInstance(UTF8Type.instance, LongType.instance, true), testStrings, testLongs, () -> new TreeMap<>(), new Random());
+    }
+
+    <K, V, M extends Map<K, V>> void testMap(MapType<K, V> tt, K[] keys, V[] values, Supplier<M> gen, Random rand)
+    {
+        List<M> tests = new ArrayList<>();
+        tests.add(gen.get());
+        for (int c = 1; c <= 3; ++c)
+            for (int j = 0; j < 5; ++j)
+            {
+                M l = gen.get();
+                for (int i = 0; i < c; ++i)
+                {
+                    V value = values[rand.nextInt(values.length)];
+                    if (value != null)
+                        l.put(keys[rand.nextInt(keys.length)], value);
+                }
+
+                tests.add(l);
+            }
+        testType(tt, tests);
+    }
+
+    /*
+     * Convert type to a comparable.
+     */
+    private ByteComparable typeToComparable(AbstractType<?> type, ByteBuffer value)
+    {
+        return new ByteComparable()
+        {
+            @Override
+            public ByteSource asComparableBytes(Version v)
+            {
+                return type.asComparableBytes(value, v);
+            }
+
+            @Override
+            public String toString()
+            {
+                return type.getString(value);
+            }
+        };
+    }
+
+    public <T> void testType(AbstractType<T> type, Object[] values)
+    {
+        testType(type, Iterables.transform(Arrays.asList(values), x -> (T) x));
+    }
+
+    public <T> void testType(AbstractType<? super T> type, Iterable<T> values)
+    {
+        for (T i : values) {
+            ByteBuffer b = decomposeAndRandomPad(type, i);
+            logger.info("Value {} ({}) bytes {} ByteSource {}",
+                              safeStr(i),
+                              safeStr(type.getSerializer().toCQLLiteral(b)),
+                              safeStr(ByteBufferUtil.bytesToHex(b)),
+                              typeToComparable(type, b).byteComparableAsString(VERSION));
+            assertConvertsSame(type, i);
+        }
+        if (!type.isReversed())
+            testType(ReversedType.getInstance(type), values);
+    }
+
+    public <T> void testTypeBuffers(AbstractType<T> type, Object[] values)
+    {
+        testTypeBuffers(type, Lists.transform(Arrays.asList(values), x -> (T) x));
+    }
+
+    public <T> void testTypeBuffers(AbstractType<T> type, List<T> values)
+    {
+        // Main difference with above is that we use type.compare instead of checking equals
+        testBuffers(type, Lists.transform(values, value -> decomposeAndRandomPad(type, value)));
+
+    }
+    public void testBuffers(AbstractType<?> type, List<ByteBuffer> values)
+    {
+        try
+        {
+            for (ByteBuffer b : values) {
+                logger.info("Value {} bytes {} ByteSource {}",
+                            safeStr(type.getSerializer().toCQLLiteral(b)),
+                            safeStr(ByteBufferUtil.bytesToHex(b)),
+                            typeToComparable(type, b).byteComparableAsString(VERSION));
+            }
+        }
+        catch (UnsupportedOperationException e)
+        {
+            // Continue without listing values.
+        }
+
+        for (ByteBuffer i : values)
+            assertConvertsSameBuffers(type, i);
+    }
+
+    void assertConvertsSameBuffers(AbstractType<?> type, ByteBuffer b1)
+    {
+        final ByteComparable bs1 = typeToComparable(type, b1);
+
+        ByteBuffer actual = type.fromComparableBytes(source(bs1), VERSION);
+        assertEquals(String.format("Failed compare(%s, converted %s (bytesource %s))",
+                                   ByteBufferUtil.bytesToHex(b1),
+                                   ByteBufferUtil.bytesToHex(actual),
+                                   bs1.byteComparableAsString(VERSION)),
+                     0,
+                     type.compare(b1, actual));
+    }
+
+    public void testDecoratedKeys(IPartitioner type, List<ByteBuffer> values)
+    {
+        for (ByteBuffer i : values)
+            assertConvertsSameDecoratedKeys(type, i);
+    }
+
+    void assertConvertsSameDecoratedKeys(IPartitioner type, ByteBuffer b1)
+    {
+        DecoratedKey k1 = type.decorateKey(b1);
+        DecoratedKey actual = BufferDecoratedKey.fromByteComparable(k1, VERSION, type);
+
+        assertEquals(String.format("Failed compare(%s[%s bs %s], %s[%s bs %s])\npartitioner %s",
+                                   k1,
+                                   ByteBufferUtil.bytesToHex(b1),
+                                   k1.byteComparableAsString(VERSION),
+                                   actual,
+                                   ByteBufferUtil.bytesToHex(actual.getKey()),
+                                   actual.byteComparableAsString(VERSION),
+                                   type),
+                     0,
+                     k1.compareTo(actual));
+        assertEquals(String.format("Failed equals(%s[%s bs %s], %s[%s bs %s])\npartitioner %s",
+                                   k1,
+                                   ByteBufferUtil.bytesToHex(b1),
+                                   k1.byteComparableAsString(VERSION),
+                                   actual,
+                                   ByteBufferUtil.bytesToHex(actual.getKey()),
+                                   actual.byteComparableAsString(VERSION),
+                                   type),
+                     k1,
+                     actual);
+    }
+
+    static Object safeStr(Object i)
+    {
+        if (i == null)
+            return null;
+        if (i instanceof ByteBuffer)
+        {
+            ByteBuffer buf = (ByteBuffer) i;
+            i = ByteBufferUtil.bytesToHex(buf);
+        }
+        String s = i.toString();
+        if (s.length() > 100)
+            s = s.substring(0, 100) + "...";
+        return s.replaceAll("\0", "<0>");
+    }
+
+    public <T> void testDirect(Function<T, ByteSource> convertor, Function<ByteSource.Peekable, T> inverse, T[] values)
+    {
+        for (T i : values) {
+            if (i == null)
+                continue;
+
+            logger.info("Value {} ByteSource {}\n",
+                              safeStr(i),
+                              convertor.apply(i));
+
+        }
+        for (T i : values)
+            if (i != null)
+                assertConvertsSame(convertor, inverse, i);
+    }
+
+    <T> void assertConvertsSame(Function<T, ByteSource> convertor, Function<ByteSource.Peekable, T> inverse, T v1)
+    {
+        ByteComparable b1 = v -> convertor.apply(v1);
+        T actual = inverse.apply(source(b1));
+        assertEquals(String.format("ByteSource %s", b1.byteComparableAsString(VERSION)), v1, actual);
+    }
+
+    <T> void assertConvertsSame(AbstractType<T> type, T v1)
+    {
+        ByteBuffer b1 = decomposeAndRandomPad(type, v1);
+        final ByteComparable bc1 = typeToComparable(type, b1);
+        ByteBuffer convertedBuffer = type.fromComparableBytes(source(bc1), VERSION);
+        T actual = type.compose(convertedBuffer);
+
+        assertEquals(String.format("Failed equals %s(%s bs %s), got %s",
+                                   safeStr(v1),
+                                   ByteBufferUtil.bytesToHex(b1),
+                                   safeStr(bc1.byteComparableAsString(VERSION)),
+                                   safeStr(actual)),
+                     v1,
+                     actual);
+    }
+
+    <T> ByteBuffer decomposeAndRandomPad(AbstractType<T> type, T v)
+    {
+        ByteBuffer b = type.decompose(v);
+        Random rand = new Random(0);
+        int padBefore = rand.nextInt(16);
+        int padAfter = rand.nextInt(16);
+        int paddedCapacity = b.remaining() + padBefore + padAfter;
+        ByteBuffer padded = allocateBuffer(paddedCapacity);
+        rand.ints(padBefore).forEach(x -> padded.put((byte) x));
+        padded.put(b.duplicate());
+        rand.ints(padAfter).forEach(x -> padded.put((byte) x));
+        padded.clear().limit(padded.capacity() - padAfter).position(padBefore);
+        return padded;
+    }
+
+    protected ByteBuffer allocateBuffer(int paddedCapacity)
+    {
+        return ByteBuffer.allocate(paddedCapacity);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceInverseTest.java b/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceInverseTest.java
new file mode 100644
index 0000000..391a8d3
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceInverseTest.java
@@ -0,0 +1,397 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils.bytecomparable;
+
+import org.apache.cassandra.db.marshal.*;
+import org.apache.cassandra.utils.*;
+import org.apache.cassandra.utils.memory.MemoryUtil;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.lang.reflect.Method;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.*;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.IntConsumer;
+import java.util.function.LongConsumer;
+import java.util.stream.*;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+
+@RunWith(Parameterized.class)
+public class ByteSourceInverseTest
+{
+    private static final String ALPHABET = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!@#$%^&*()";
+
+    @Parameterized.Parameters(name = "version={0}")
+    public static Iterable<ByteComparable.Version> versions()
+    {
+        return ImmutableList.of(ByteComparable.Version.OSS42);
+    }
+
+    private final ByteComparable.Version version;
+
+    public ByteSourceInverseTest(ByteComparable.Version version)
+    {
+        this.version = version;
+    }
+
+    @Test
+    public void testGetSignedInt()
+    {
+        IntConsumer intConsumer = initial ->
+        {
+            ByteSource byteSource = ByteSource.of(initial);
+            int decoded = ByteSourceInverse.getSignedInt(byteSource);
+            Assert.assertEquals(initial, decoded);
+        };
+
+        IntStream.of(Integer.MIN_VALUE, Integer.MIN_VALUE + 1,
+                     -256, -255, -128, -127, -1, 0, 1, 127, 128, 255, 256,
+                     Integer.MAX_VALUE - 1, Integer.MAX_VALUE)
+                 .forEach(intConsumer);
+        new Random().ints(1000)
+                    .forEach(intConsumer);
+    }
+
+    @Test
+    public void testNextInt()
+    {
+        // The high and low 32 bits of this long differ only in the first and last bit (in the high 32 bits they are
+        // both 0s instead of 1s). The first bit difference will be negated by the bit flipping when writing down a
+        // fixed length signed number, so the only remaining difference will be in the last bit.
+        int hi = 0b0001_0010_0011_0100_0101_0110_0111_1000;
+        int lo = hi | 1 | 1 << 31;
+        long l1 = Integer.toUnsignedLong(hi) << 32 | Integer.toUnsignedLong(lo);
+
+        ByteSource byteSource = ByteSource.of(l1);
+        int i1 = ByteSourceInverse.getSignedInt(byteSource);
+        int i2 = ByteSourceInverse.getSignedInt(byteSource);
+        Assert.assertEquals(i1 + 1, i2);
+
+        try
+        {
+            ByteSourceInverse.getSignedInt(byteSource);
+            Assert.fail();
+        }
+        catch (IllegalArgumentException e)
+        {
+            // Expected.
+        }
+
+        byteSource = ByteSource.of(l1);
+        int iFirst = ByteSourceInverse.getSignedInt(byteSource);
+        Assert.assertEquals(i1, iFirst);
+        int iNext = ByteSourceInverse.getSignedInt(byteSource);
+        Assert.assertEquals(i2, iNext);
+    }
+
+    @Test
+    public void testGetSignedLong()
+    {
+        LongConsumer longConsumer = initial ->
+        {
+            ByteSource byteSource = ByteSource.of(initial);
+            long decoded = ByteSourceInverse.getSignedLong(byteSource);
+            Assert.assertEquals(initial, decoded);
+        };
+
+        LongStream.of(Long.MIN_VALUE, Long.MIN_VALUE + 1, Integer.MIN_VALUE - 1L,
+                      -256L, -255L, -128L, -127L, -1L, 0L, 1L, 127L, 128L, 255L, 256L,
+                      Integer.MAX_VALUE + 1L, Long.MAX_VALUE - 1, Long.MAX_VALUE)
+                  .forEach(longConsumer);
+        new Random().longs(1000)
+                    .forEach(longConsumer);
+    }
+
+    @Test
+    public void testGetSignedByte()
+    {
+        Consumer<Byte> byteConsumer = boxedByte ->
+        {
+            byte initial = boxedByte;
+            ByteBuffer byteBuffer = ByteType.instance.decompose(initial);
+            ByteSource byteSource = ByteType.instance.asComparableBytes(byteBuffer, version);
+            byte decoded = ByteSourceInverse.getSignedByte(byteSource);
+            Assert.assertEquals(initial, decoded);
+        };
+
+        IntStream.range(Byte.MIN_VALUE, Byte.MAX_VALUE + 1)
+                 .forEach(byteInteger -> byteConsumer.accept((byte) byteInteger));
+    }
+
+    @Test
+    public void testGetSignedShort()
+    {
+        Consumer<Short> shortConsumer = boxedShort ->
+        {
+            short initial = boxedShort;
+            ByteBuffer shortBuffer = ShortType.instance.decompose(initial);
+            ByteSource byteSource = ShortType.instance.asComparableBytes(shortBuffer, version);
+            short decoded = ByteSourceInverse.getSignedShort(byteSource);
+            Assert.assertEquals(initial, decoded);
+        };
+
+        IntStream.range(Short.MIN_VALUE, Short.MAX_VALUE + 1)
+                 .forEach(shortInteger -> shortConsumer.accept((short) shortInteger));
+    }
+
+    @Test
+    public void testBadByteSourceForFixedLengthNumbers()
+    {
+        byte[] bytes = new byte[8];
+        new Random().nextBytes(bytes);
+        for (Map.Entry<String, Integer> entries : ImmutableMap.of("getSignedInt", 4,
+                  "getSignedLong", 8,
+                  "getSignedByte", 1,
+                  "getSignedShort", 2).entrySet())
+        {
+            String methodName = entries.getKey();
+            int length = entries.getValue();
+            try
+            {
+                Method fixedLengthNumberMethod = ByteSourceInverse.class.getMethod(methodName, ByteSource.class);
+                ArrayList<ByteSource> sources = new ArrayList<>();
+                sources.add(null);
+                sources.add(ByteSource.EMPTY);
+                for (int i = 0; i < length; ++i)
+                    sources.add(ByteSource.fixedLength(bytes, 0, i));
+                // Note: not testing invalid bytes (e.g. using the construction below) as they signify a programming
+                // error (throwing AssertionError) rather than something that could happen due to e.g. bad files.
+                //      ByteSource.withTerminatorLegacy(257, ByteSource.fixedLength(bytes, 0, length - 1));
+                for (ByteSource badSource : sources)
+                {
+                    try
+                    {
+                        fixedLengthNumberMethod.invoke(ByteSourceInverse.class, badSource);
+                        Assert.fail("Expected exception not thrown");
+                    }
+                    catch (Throwable maybe)
+                    {
+                        maybe = Throwables.unwrapped(maybe);
+                        final String message = "Unexpected throwable " + maybe + " with cause " + maybe.getCause();
+                        if (badSource == null)
+                            Assert.assertTrue(message,
+                                              maybe instanceof NullPointerException);
+                        else
+                            Assert.assertTrue(message,
+                                              maybe instanceof IllegalArgumentException);
+                    }
+                }
+            }
+            catch (NoSuchMethodException e)
+            {
+                Assert.fail("Expected ByteSourceInverse to have method called " + methodName
+                            + " with a single parameter of type ByteSource");
+            }
+        }
+    }
+
+    @Test
+    public void testBadByteSourceForVariableLengthNumbers()
+    {
+        for (long value : Arrays.asList(0L, 1L << 6, 1L << 13, 1L << 20, 1L << 27, 1L << 34, 1L << 41, 1L << 48, 1L << 55))
+        {
+            Assert.assertEquals(value, ByteSourceInverse.getVariableLengthInteger(ByteSource.variableLengthInteger(value)));
+
+            ArrayList<ByteSource> sources = new ArrayList<>();
+            sources.add(null);
+            sources.add(ByteSource.EMPTY);
+            int length = ByteComparable.length(version -> ByteSource.variableLengthInteger(value), ByteComparable.Version.OSS42);
+            for (int i = 0; i < length; ++i)
+                sources.add(ByteSource.cut(ByteSource.variableLengthInteger(value), i));
+
+            for (ByteSource badSource : sources)
+            {
+                try
+                {
+                    ByteSourceInverse.getVariableLengthInteger(badSource);
+                    Assert.fail("Expected exception not thrown");
+                }
+                catch (Throwable maybe)
+                {
+                    maybe = Throwables.unwrapped(maybe);
+                    final String message = "Unexpected throwable " + maybe + " with cause " + maybe.getCause();
+                    if (badSource == null)
+                        Assert.assertTrue(message,
+                                          maybe instanceof NullPointerException);
+                    else
+                        Assert.assertTrue(message,
+                                          maybe instanceof IllegalArgumentException);
+                }
+            }
+        }
+    }
+
+    @Test
+    public void testGetString()
+    {
+        Consumer<String> stringConsumer = initial ->
+        {
+            ByteSource.Peekable byteSource = initial == null ? null : ByteSource.peekable(ByteSource.of(initial, version));
+            String decoded = ByteSourceInverse.getString(byteSource);
+            Assert.assertEquals(initial, decoded);
+        };
+
+        Stream.of(null, "© 2018 DataStax", "", "\n", "\0", "\0\0", "\001", "0", "0\0", "00", "1")
+              .forEach(stringConsumer);
+
+        Random prng = new Random();
+        int stringLength = 10;
+        String random;
+        for (int i = 0; i < 1000; ++i)
+        {
+            random = newRandomAlphanumeric(prng, stringLength);
+            stringConsumer.accept(random);
+        }
+    }
+
+    private static String newRandomAlphanumeric(Random prng, int length)
+    {
+        StringBuilder random = new StringBuilder(length);
+        for (int i = 0; i < length; ++i)
+            random.append(ALPHABET.charAt(prng.nextInt(ALPHABET.length())));
+        return random.toString();
+    }
+
+    @Test
+    public void testGetByteBuffer()
+    {
+        for (Consumer<byte[]> byteArrayConsumer : Arrays.<Consumer<byte[]>>asList(initialBytes ->
+            {
+                ByteSource.Peekable byteSource = ByteSource.peekable(ByteSource.of(ByteBuffer.wrap(initialBytes), version));
+                byte[] decodedBytes = ByteSourceInverse.getUnescapedBytes(byteSource);
+                Assert.assertArrayEquals(initialBytes, decodedBytes);
+            },
+            initialBytes ->
+            {
+                ByteSource.Peekable byteSource = ByteSource.peekable(ByteSource.of(initialBytes, version));
+                byte[] decodedBytes = ByteSourceInverse.getUnescapedBytes(byteSource);
+                Assert.assertArrayEquals(initialBytes, decodedBytes);
+            },
+            initialBytes ->
+            {
+                long address = MemoryUtil.allocate(initialBytes.length);
+                try
+                {
+                    MemoryUtil.setBytes(address, initialBytes, 0, initialBytes.length);
+                    ByteSource.Peekable byteSource = ByteSource.peekable(ByteSource.ofMemory(address, initialBytes.length, version));
+                    byte[] decodedBytes = ByteSourceInverse.getUnescapedBytes(byteSource);
+                    Assert.assertArrayEquals(initialBytes, decodedBytes);
+                }
+                finally
+                {
+                    MemoryUtil.free(address);
+                }
+            }
+            ))
+        {
+            for (byte[] tricky : Arrays.asList(
+            // ESCAPE - leading, in the middle, trailing
+            new byte[]{ 0, 2, 3, 4, 5 }, new byte[]{ 1, 2, 0, 4, 5 }, new byte[]{ 1, 2, 3, 4, 0 },
+            // END_OF_STREAM/ESCAPED_0_DONE - leading, in the middle, trailing
+            new byte[]{ -1, 2, 3, 4, 5 }, new byte[]{ 1, 2, -1, 4, 5 }, new byte[]{ 1, 2, 3, 4, -1 },
+            // ESCAPED_0_CONT - leading, in the middle, trailing
+            new byte[]{ -2, 2, 3, 4, 5 }, new byte[]{ 1, 2, -2, 4, 5 }, new byte[]{ 1, 2, 3, 4, -2 },
+            // ESCAPE + ESCAPED_0_DONE - leading, in the middle, trailing
+            new byte[]{ 0, -1, 3, 4, 5 }, new byte[]{ 1, 0, -1, 4, 5 }, new byte[]{ 1, 2, 3, 0, -1 },
+            // ESCAPE + ESCAPED_0_CONT + ESCAPED_0_DONE - leading, in the middle, trailing
+            new byte[]{ 0, -2, -1, 4, 5 }, new byte[]{ 1, 0, -2, -1, 5 }, new byte[]{ 1, 2, 0, -2, -1 }))
+            {
+                byteArrayConsumer.accept(tricky);
+            }
+
+            byte[] bytes = new byte[1000];
+            Random prng = new Random();
+            for (int i = 0; i < 1000; ++i)
+            {
+                prng.nextBytes(bytes);
+                byteArrayConsumer.accept(bytes);
+            }
+
+            int stringLength = 10;
+            String random;
+            for (int i = 0; i < 1000; ++i)
+            {
+                random = newRandomAlphanumeric(prng, stringLength);
+                byteArrayConsumer.accept(random.getBytes(StandardCharsets.UTF_8));
+            }
+        }
+    }
+
+    @Test
+    public void testReadBytes()
+    {
+        Map<Class<?>, Function<Object, ByteSource>> generatorPerType = new HashMap<>();
+        List<Object> originalValues = new ArrayList<>();
+        Random prng = new Random();
+
+        generatorPerType.put(String.class, s ->
+        {
+            String string = (String) s;
+            return ByteSource.of(string, version);
+        });
+        for (int i = 0; i < 100; ++i)
+            originalValues.add(newRandomAlphanumeric(prng, 10));
+
+        generatorPerType.put(Integer.class, i ->
+        {
+            Integer integer = (Integer) i;
+            return ByteSource.of(integer);
+        });
+        for (int i = 0; i < 100; ++i)
+            originalValues.add(prng.nextInt());
+
+        generatorPerType.put(Long.class, l ->
+        {
+            Long looong = (Long) l;
+            return ByteSource.of(looong);
+        });
+        for (int i = 0; i < 100; ++i)
+            originalValues.add(prng.nextLong());
+
+        generatorPerType.put(UUID.class, u ->
+        {
+            UUID uuid = (UUID) u;
+            ByteBuffer uuidBuffer = UUIDType.instance.decompose(uuid);
+            return UUIDType.instance.asComparableBytes(uuidBuffer, version);
+        });
+        for (int i = 0; i < 100; ++i)
+            originalValues.add(UUID.randomUUID());
+
+        for (Object value : originalValues)
+        {
+            Class<?> type = value.getClass();
+            Function<Object, ByteSource> generator = generatorPerType.get(type);
+            ByteSource originalSource = generator.apply(value);
+            ByteSource originalSourceCopy = generator.apply(value);
+            byte[] bytes = ByteSourceInverse.readBytes(originalSource);
+            // The best way to test the read bytes seems to be to assert that just directly using them as a
+            // ByteSource (using ByteSource.fixedLength(byte[])) they compare as equal to another ByteSource obtained
+            // from the same original value.
+            int compare = ByteComparable.compare(v -> originalSourceCopy, v -> ByteSource.fixedLength(bytes), version);
+            Assert.assertEquals(0, compare);
+        }
+    }
+}
diff --git a/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceSequenceTest.java b/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceSequenceTest.java
new file mode 100644
index 0000000..aa7843b
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceSequenceTest.java
@@ -0,0 +1,784 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils.bytecomparable;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.TreeMap;
+import java.util.function.Function;
+
+import com.google.common.collect.ImmutableList;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.apache.cassandra.db.BufferClusteringBound;
+import org.apache.cassandra.db.BufferDecoratedKey;
+import org.apache.cassandra.db.CachedHashDecoratedKey;
+import org.apache.cassandra.db.ClusteringComparator;
+import org.apache.cassandra.db.ClusteringPrefix;
+import org.apache.cassandra.db.marshal.*;
+import org.apache.cassandra.dht.IPartitioner;
+import org.apache.cassandra.dht.LocalPartitioner;
+import org.apache.cassandra.utils.TimeUUID;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+@RunWith(Parameterized.class)
+public class ByteSourceSequenceTest
+{
+
+    private static final String ALPHABET = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!@#$%^&*()";
+
+    @Parameterized.Parameters(name = "version={0}")
+    public static Iterable<ByteComparable.Version> versions()
+    {
+        return ImmutableList.of(ByteComparable.Version.OSS42);
+    }
+
+    private final ByteComparable.Version version;
+
+    public ByteSourceSequenceTest(ByteComparable.Version version)
+    {
+        this.version = version;
+    }
+
+    @Test
+    public void testNullsSequence()
+    {
+        ByteSource.Peekable comparableBytes = ByteSource.peekable(ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                null, null, null
+        ));
+        expectNextComponentNull(comparableBytes);
+        expectNextComponentNull(comparableBytes);
+        expectNextComponentNull(comparableBytes);
+        assertEquals(ByteSource.TERMINATOR, comparableBytes.next());
+    }
+
+    @Test
+    public void testNullsAndUnknownLengthsSequence()
+    {
+        ByteSource.Peekable comparableBytes = ByteSource.peekable(ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                null, ByteSource.of("b", version), ByteSource.of("c", version)
+        ));
+        expectNextComponentNull(comparableBytes);
+        expectNextComponentValue(comparableBytes, ByteSourceInverse::getString, "b");
+        expectNextComponentValue(comparableBytes, ByteSourceInverse::getString, "c");
+        assertEquals(ByteSource.TERMINATOR, comparableBytes.next());
+
+        comparableBytes = ByteSource.peekable(ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                ByteSource.of("a", version), null, ByteSource.of("c", version)
+        ));
+        expectNextComponentValue(comparableBytes, ByteSourceInverse::getString, "a");
+        expectNextComponentNull(comparableBytes);
+        expectNextComponentValue(comparableBytes, ByteSourceInverse::getString, "c");
+        assertEquals(ByteSource.TERMINATOR, comparableBytes.next());
+
+        comparableBytes = ByteSource.peekable(ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                ByteSource.of("a", version), ByteSource.of("b", version), null
+        ));
+        expectNextComponentValue(comparableBytes, ByteSourceInverse::getString, "a");
+        expectNextComponentValue(comparableBytes, ByteSourceInverse::getString, "b");
+        expectNextComponentNull(comparableBytes);
+        assertEquals(ByteSource.TERMINATOR, comparableBytes.next());
+
+        comparableBytes = ByteSource.peekable(ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                ByteSource.of("a", version), null, null
+        ));
+        expectNextComponentValue(comparableBytes, ByteSourceInverse::getString, "a");
+        expectNextComponentNull(comparableBytes);
+        expectNextComponentNull(comparableBytes);
+        assertEquals(ByteSource.TERMINATOR, comparableBytes.next());
+
+        comparableBytes = ByteSource.peekable(ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                null, null, ByteSource.of("c", version)
+        ));
+        expectNextComponentNull(comparableBytes);
+        expectNextComponentNull(comparableBytes);
+        expectNextComponentValue(comparableBytes, ByteSourceInverse::getString, "c");
+        assertEquals(ByteSource.TERMINATOR, comparableBytes.next());
+    }
+
+    private static void expectNextComponentNull(ByteSource.Peekable comparableBytes)
+    {
+        // We expect null-signifying separator, followed by a null ByteSource component
+        ByteSource.Peekable next = ByteSourceInverse.nextComponentSource(comparableBytes);
+        assertNull(next);
+    }
+
+    private static <T> void expectNextComponentValue(ByteSource.Peekable comparableBytes,
+                                                     Function<ByteSource.Peekable, T> decoder,
+                                                     T expected)
+    {
+        // We expect a regular separator, followed by a ByteSource component corresponding to the expected value
+        ByteSource.Peekable next = ByteSourceInverse.nextComponentSource(comparableBytes);
+        assertNotNull(next);
+        T decoded = decoder.apply(next);
+        assertEquals(expected, decoded);
+    }
+
+    @Test
+    public void testNullsAndKnownLengthsSequence()
+    {
+        int intValue = 42;
+        BigInteger varintValue = BigInteger.valueOf(2018L);
+        ByteSource.Peekable comparableBytes = ByteSource.peekable(ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                null, ByteSource.of(intValue), varintToByteSource(varintValue)
+        ));
+        expectNextComponentNull(comparableBytes);
+        expectNextComponentValue(comparableBytes, ByteSourceInverse::getSignedInt, intValue);
+        expectNextComponentValue(comparableBytes, VARINT, varintValue);
+        assertEquals(ByteSource.TERMINATOR, comparableBytes.next());
+
+        comparableBytes = ByteSource.peekable(ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                ByteSource.of(intValue), null, varintToByteSource(varintValue)
+        ));
+        expectNextComponentValue(comparableBytes, ByteSourceInverse::getSignedInt, intValue);
+        expectNextComponentNull(comparableBytes);
+        expectNextComponentValue(comparableBytes, VARINT, varintValue);
+        assertEquals(ByteSource.TERMINATOR, comparableBytes.next());
+
+        comparableBytes = ByteSource.peekable(ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                ByteSource.of(intValue), varintToByteSource(varintValue), null
+        ));
+        expectNextComponentValue(comparableBytes, ByteSourceInverse::getSignedInt, intValue);
+        expectNextComponentValue(comparableBytes, VARINT, varintValue);
+        expectNextComponentNull(comparableBytes);
+        assertEquals(ByteSource.TERMINATOR, comparableBytes.next());
+
+        comparableBytes = ByteSource.peekable(ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                null, null, varintToByteSource(varintValue)
+        ));
+        expectNextComponentNull(comparableBytes);
+        expectNextComponentNull(comparableBytes);
+        expectNextComponentValue(comparableBytes, VARINT, varintValue);
+        assertEquals(ByteSource.TERMINATOR, comparableBytes.next());
+
+        comparableBytes = ByteSource.peekable(ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                null, varintToByteSource(varintValue), null
+        ));
+        expectNextComponentNull(comparableBytes);
+        expectNextComponentValue(comparableBytes, VARINT, varintValue);
+        expectNextComponentNull(comparableBytes);
+        assertEquals(ByteSource.TERMINATOR, comparableBytes.next());
+
+        comparableBytes = ByteSource.peekable(ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                varintToByteSource(varintValue), null, null
+        ));
+        expectNextComponentValue(comparableBytes, VARINT, varintValue);
+        expectNextComponentNull(comparableBytes);
+        expectNextComponentNull(comparableBytes);
+        assertEquals(ByteSource.TERMINATOR, comparableBytes.next());
+
+        Boolean boolValue = new Random().nextBoolean();
+        ByteSource boolSource = BooleanType.instance.asComparableBytes(BooleanType.instance.decompose(boolValue), version);
+        comparableBytes = ByteSource.peekable(ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                varintToByteSource(varintValue), boolSource, null
+        ));
+        expectNextComponentValue(comparableBytes, VARINT, varintValue);
+        expectNextComponentValue(comparableBytes, BooleanType.instance, boolValue);
+        expectNextComponentNull(comparableBytes);
+        assertEquals(ByteSource.TERMINATOR, comparableBytes.next());
+
+        boolSource = BooleanType.instance.asComparableBytes(BooleanType.instance.decompose(boolValue), version);
+        comparableBytes = ByteSource.peekable(ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                varintToByteSource(varintValue), null, boolSource
+        ));
+        expectNextComponentValue(comparableBytes, VARINT, varintValue);
+        expectNextComponentNull(comparableBytes);
+        expectNextComponentValue(comparableBytes, BooleanType.instance, boolValue);
+        assertEquals(ByteSource.TERMINATOR, comparableBytes.next());
+    }
+
+    @Test
+    public void testOptionalSignedFixedLengthTypesSequence()
+    {
+        Random prng = new Random();
+        String randomString = newRandomAlphanumeric(prng, 10);
+        byte randomByte = (byte) prng.nextInt();
+        short randomShort = (short) prng.nextInt();
+        int randomInt = prng.nextInt();
+        long randomLong = prng.nextLong();
+        BigInteger randomVarint = BigInteger.probablePrime(80, prng);
+
+        Map<AbstractType<?>, ByteBuffer> valuesByType = new HashMap<AbstractType<?>, ByteBuffer>()
+        {{
+            put(ByteType.instance, ByteType.instance.decompose(randomByte));
+            put(ShortType.instance, ShortType.instance.decompose(randomShort));
+            put(SimpleDateType.instance, SimpleDateType.instance.decompose(randomInt));
+            put(TimeType.instance, TimeType.instance.decompose(randomLong));
+        }};
+
+        for (Map.Entry<AbstractType<?>, ByteBuffer> entry : valuesByType.entrySet())
+        {
+            AbstractType<?> type = entry.getKey();
+            ByteBuffer value = entry.getValue();
+
+            ByteSource byteSource = type.asComparableBytes(value, version);
+            ByteSource.Peekable sequence = ByteSource.peekable(ByteSource.withTerminator(
+                    ByteSource.TERMINATOR,
+                    ByteSource.of(randomString, version), byteSource, varintToByteSource(randomVarint)
+            ));
+            expectNextComponentValue(sequence, ByteSourceInverse::getString, randomString);
+            expectNextComponentValue(sequence, type, value);
+            expectNextComponentValue(sequence, VARINT, randomVarint);
+            assertEquals(ByteSource.TERMINATOR, sequence.next());
+
+            byteSource = type.asComparableBytes(type.decompose(null), version);
+            sequence = ByteSource.peekable(ByteSource.withTerminator(
+                    ByteSource.TERMINATOR,
+                    ByteSource.of(randomString, version), byteSource, varintToByteSource(randomVarint)
+            ));
+            expectNextComponentValue(sequence, ByteSourceInverse::getString, randomString);
+            expectNextComponentNull(sequence);
+            expectNextComponentValue(sequence, VARINT, randomVarint);
+            assertEquals(ByteSource.TERMINATOR, sequence.next());
+        }
+    }
+
+    private ByteSource varintToByteSource(BigInteger value)
+    {
+        ByteBuffer valueByteBuffer = VARINT.decompose(value);
+        return VARINT.asComparableBytes(valueByteBuffer, version);
+    }
+
+    private static final UTF8Type UTF8 = UTF8Type.instance;
+    private static final DecimalType DECIMAL = DecimalType.instance;
+    private static final IntegerType VARINT = IntegerType.instance;
+
+    // A regular comparator using the natural ordering for all types.
+    private static final ClusteringComparator COMP = new ClusteringComparator(Arrays.asList(
+            UTF8,
+            DECIMAL,
+            VARINT
+    ));
+    // A comparator that reverses the ordering for the first unknown length type
+    private static final ClusteringComparator COMP_REVERSED_UNKNOWN_LENGTH = new ClusteringComparator(Arrays.asList(
+            ReversedType.getInstance(UTF8),
+            DECIMAL,
+            VARINT
+    ));
+    // A comparator that reverses the ordering for the second unknown length type
+    private static final ClusteringComparator COMP_REVERSED_UNKNOWN_LENGTH_2 = new ClusteringComparator(Arrays.asList(
+            UTF8,
+            ReversedType.getInstance(DECIMAL),
+            VARINT
+    ));
+    // A comparator that reverses the ordering for the sole known/computable length type
+    private static final ClusteringComparator COMP_REVERSED_KNOWN_LENGTH = new ClusteringComparator(Arrays.asList(
+            UTF8,
+            DECIMAL,
+            ReversedType.getInstance(VARINT)
+    ));
+    // A comparator that reverses the ordering for all types
+    private static final ClusteringComparator COMP_ALL_REVERSED = new ClusteringComparator(Arrays.asList(
+            ReversedType.getInstance(UTF8),
+            ReversedType.getInstance(DECIMAL),
+            ReversedType.getInstance(VARINT)
+    ));
+
+    @Test
+    public void testClusteringPrefixBoundNormalAndReversed()
+    {
+        String stringValue = "Lorem ipsum dolor sit amet";
+        BigDecimal decimalValue = BigDecimal.valueOf(123456789, 20);
+        BigInteger varintValue = BigInteger.valueOf(2018L);
+
+        // Create some non-null clustering key values that will be encoded and decoded to byte-ordered representation
+        // with different types of clustering comparators (and in other tests with different types of prefixes).
+        ByteBuffer[] clusteringKeyValues = new ByteBuffer[] {
+                UTF8.decompose(stringValue),
+                DECIMAL.decompose(decimalValue),
+                VARINT.decompose(varintValue)
+        };
+
+        for (ClusteringPrefix.Kind prefixKind : ClusteringPrefix.Kind.values())
+        {
+            if (prefixKind.isBoundary())
+                continue;
+
+            ClusteringPrefix prefix = BufferClusteringBound.create(prefixKind, clusteringKeyValues);
+            // Use the regular comparator.
+            ByteSource.Peekable comparableBytes = ByteSource.peekable(COMP.asByteComparable(prefix).asComparableBytes(version));
+            expectNextComponentValue(comparableBytes, UTF8, stringValue);
+            expectNextComponentValue(comparableBytes, DECIMAL, decimalValue);
+            expectNextComponentValue(comparableBytes, VARINT, varintValue);
+
+            prefix = BufferClusteringBound.create(prefixKind, clusteringKeyValues);
+            // Use the comparator reversing the ordering for the first unknown length type.
+            comparableBytes = ByteSource.peekable(COMP_REVERSED_UNKNOWN_LENGTH.asByteComparable(prefix).asComparableBytes(version));
+            expectNextComponentValue(comparableBytes, ReversedType.getInstance(UTF8), stringValue);
+            expectNextComponentValue(comparableBytes, DECIMAL, decimalValue);
+            expectNextComponentValue(comparableBytes, VARINT, varintValue);
+
+            prefix = BufferClusteringBound.create(prefixKind, clusteringKeyValues);
+            // Use the comparator reversing the ordering for the second unknown length type.
+            comparableBytes = ByteSource.peekable(COMP_REVERSED_UNKNOWN_LENGTH_2.asByteComparable(prefix).asComparableBytes(version));
+            expectNextComponentValue(comparableBytes, UTF8, stringValue);
+            expectNextComponentValue(comparableBytes, ReversedType.getInstance(DECIMAL), decimalValue);
+            expectNextComponentValue(comparableBytes, VARINT, varintValue);
+
+            prefix = BufferClusteringBound.create(prefixKind, clusteringKeyValues);
+            // Use the comparator reversing the ordering for the known/computable length type.
+            comparableBytes = ByteSource.peekable(COMP_REVERSED_KNOWN_LENGTH.asByteComparable(prefix).asComparableBytes(version));
+            expectNextComponentValue(comparableBytes, UTF8, stringValue);
+            expectNextComponentValue(comparableBytes, DECIMAL, decimalValue);
+            expectNextComponentValue(comparableBytes, ReversedType.getInstance(VARINT), varintValue);
+
+            prefix = BufferClusteringBound.create(prefixKind, clusteringKeyValues);
+            // Use the all-reversing comparator.
+            comparableBytes = ByteSource.peekable(COMP_ALL_REVERSED.asByteComparable(prefix).asComparableBytes(version));
+            expectNextComponentValue(comparableBytes, ReversedType.getInstance(UTF8), stringValue);
+            expectNextComponentValue(comparableBytes, ReversedType.getInstance(DECIMAL), decimalValue);
+            expectNextComponentValue(comparableBytes, ReversedType.getInstance(VARINT), varintValue);
+        }
+    }
+
+    @Test
+    public void testClusteringPrefixBoundNulls()
+    {
+        String stringValue = "Lorem ipsum dolor sit amet";
+        BigDecimal decimalValue = BigDecimal.valueOf(123456789, 20);
+        BigInteger varintValue = BigInteger.valueOf(2018L);
+
+        // Create clustering key values where the component for an unknown length type is null.
+        ByteBuffer[] unknownLengthNull = new ByteBuffer[] {
+                UTF8.decompose(stringValue),
+                DECIMAL.decompose(null),
+                VARINT.decompose(varintValue)
+        };
+        // Create clustering key values where the component for a known/computable length type is null.
+        ByteBuffer[] knownLengthNull = new ByteBuffer[] {
+                UTF8.decompose(stringValue),
+                DECIMAL.decompose(decimalValue),
+                VARINT.decompose(null)
+        };
+
+        for (ClusteringPrefix.Kind prefixKind : ClusteringPrefix.Kind.values())
+        {
+            if (prefixKind.isBoundary())
+                continue;
+
+            // Test the decoding of a null component of a non-reversed unknown length type.
+            ClusteringPrefix prefix = BufferClusteringBound.create(prefixKind, unknownLengthNull);
+            ByteSource.Peekable comparableBytes = ByteSource.peekable(COMP.asByteComparable(prefix).asComparableBytes(version));
+            expectNextComponentValue(comparableBytes, UTF8, stringValue);
+            expectNextComponentNull(comparableBytes);
+            expectNextComponentValue(comparableBytes, VARINT, varintValue);
+            // Test the decoding of a null component of a reversed unknown length type.
+            prefix = BufferClusteringBound.create(prefixKind, unknownLengthNull);
+            comparableBytes = ByteSource.peekable(COMP_REVERSED_UNKNOWN_LENGTH_2.asByteComparable(prefix).asComparableBytes(version));
+            expectNextComponentValue(comparableBytes, UTF8, stringValue);
+            expectNextComponentNull(comparableBytes);
+            expectNextComponentValue(comparableBytes, VARINT, varintValue);
+
+            // Test the decoding of a null component of a non-reversed known/computable length type.
+            prefix = BufferClusteringBound.create(prefixKind, knownLengthNull);
+            comparableBytes = ByteSource.peekable(COMP.asByteComparable(prefix).asComparableBytes(version));
+            expectNextComponentValue(comparableBytes, UTF8, stringValue);
+            expectNextComponentValue(comparableBytes, DECIMAL, decimalValue);
+            expectNextComponentNull(comparableBytes);
+            // Test the decoding of a null component of a reversed known/computable length type.
+            prefix = BufferClusteringBound.create(prefixKind, knownLengthNull);
+            comparableBytes = ByteSource.peekable(COMP_REVERSED_KNOWN_LENGTH.asByteComparable(prefix).asComparableBytes(version));
+            expectNextComponentValue(comparableBytes, UTF8, stringValue);
+            expectNextComponentValue(comparableBytes, DECIMAL, decimalValue);
+            expectNextComponentNull(comparableBytes);
+        }
+    }
+
+    private <T> void expectNextComponentValue(ByteSource.Peekable comparableBytes,
+                                              AbstractType<T> type,
+                                              T expected)
+    {
+        // We expect a regular separator, followed by a ByteSource component corresponding to the expected value
+        ByteSource.Peekable next = ByteSourceInverse.nextComponentSource(comparableBytes);
+        T decoded = type.compose(type.fromComparableBytes(next, version));
+        assertEquals(expected, decoded);
+    }
+
+    private void expectNextComponentValue(ByteSource.Peekable comparableBytes,
+                                          AbstractType<?> type,
+                                          ByteBuffer expected)
+    {
+        // We expect a regular separator, followed by a ByteSource component corresponding to the expected value
+        ByteSource.Peekable next = ByteSourceInverse.nextComponentSource(comparableBytes);
+        assertEquals(expected, type.fromComparableBytes(next, version));
+    }
+
+    @Test
+    public void testGetBoundFromPrefixTerminator()
+    {
+        String stringValue = "Lorem ipsum dolor sit amet";
+        BigDecimal decimalValue = BigDecimal.valueOf(123456789, 20);
+        BigInteger varintValue = BigInteger.valueOf(2018L);
+
+        ByteBuffer[] clusteringKeyValues = new ByteBuffer[] {
+                UTF8.decompose(stringValue),
+                DECIMAL.decompose(decimalValue),
+                VARINT.decompose(varintValue)
+        };
+        ByteBuffer[] nullValueBeforeTerminator = new ByteBuffer[] {
+                UTF8.decompose(stringValue),
+                DECIMAL.decompose(decimalValue),
+                VARINT.decompose(null)
+        };
+
+        for (ClusteringPrefix.Kind prefixKind : ClusteringPrefix.Kind.values())
+        {
+            // NOTE dimitar.dimitrov I assume there's a sensible explanation why does STATIC_CLUSTERING use a custom
+            // terminator that's not one of the common separator values, but I haven't spent enough time to get it.
+            if (prefixKind.isBoundary())
+                continue;
+
+            // Test that the read terminator value is exactly the encoded value of this prefix' bound.
+            ClusteringPrefix prefix = BufferClusteringBound.create(prefixKind, clusteringKeyValues);
+            ByteSource.Peekable comparableBytes = ByteSource.peekable(COMP.asByteComparable(prefix).asComparableBytes(version));
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+            ByteSourceInverse.getString(comparableBytes);
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+            DECIMAL.fromComparableBytes(comparableBytes, version);
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+            VARINT.fromComparableBytes(comparableBytes, version);
+            // Expect the last separator (i.e. the terminator) to be the one specified by the prefix kind.
+            assertEquals(prefixKind.asByteComparableValue(version), comparableBytes.next());
+
+            // Test that the read terminator value is exactly the encoded value of this prefix' bound, when the
+            // terminator is preceded by a null value.
+            prefix = BufferClusteringBound.create(prefixKind, nullValueBeforeTerminator);
+            comparableBytes = ByteSource.peekable(COMP.asByteComparable(prefix).asComparableBytes(version));
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+            ByteSourceInverse.getString(comparableBytes);
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+            DECIMAL.fromComparableBytes(comparableBytes, version);
+            // Expect null-signifying separator here.
+            assertEquals(ByteSource.NEXT_COMPONENT_EMPTY, comparableBytes.next());
+            // No varint to read
+            // Expect the last separator (i.e. the terminator) to be the one specified by the prefix kind.
+            assertEquals(prefixKind.asByteComparableValue(version), comparableBytes.next());
+
+            // Test that the read terminator value is exactly the encoded value of this prefix' bound, when the
+            // terminator is preceded by a reversed null value.
+            prefix = BufferClusteringBound.create(prefixKind, nullValueBeforeTerminator);
+            // That's the comparator that will reverse the ordering of the type of the last value in the prefix (the
+            // one before the terminator). In other tests we're more interested in the fact that values of this type
+            // have known/computable length, which is why we've named it so...
+            comparableBytes = ByteSource.peekable(COMP_REVERSED_KNOWN_LENGTH.asByteComparable(prefix).asComparableBytes(version));
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+            ByteSourceInverse.getString(comparableBytes);
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+            DECIMAL.fromComparableBytes(comparableBytes, version);
+            // Expect reversed null-signifying separator here.
+            assertEquals(ByteSource.NEXT_COMPONENT_EMPTY_REVERSED, comparableBytes.next());
+            // No varint to read
+            // Expect the last separator (i.e. the terminator) to be the one specified by the prefix kind.
+            assertEquals(prefixKind.asByteComparableValue(version), comparableBytes.next());
+        }
+    }
+
+    @Test
+    public void testReversedTypesInClusteringKey()
+    {
+        String stringValue = "Lorem ipsum dolor sit amet";
+        BigDecimal decimalValue = BigDecimal.valueOf(123456789, 20);
+
+        AbstractType<String> reversedStringType = ReversedType.getInstance(UTF8);
+        AbstractType<BigDecimal> reversedDecimalType = ReversedType.getInstance(DECIMAL);
+
+        final ClusteringComparator comparator = new ClusteringComparator(Arrays.asList(
+                // unknown length type
+                UTF8,
+                // known length type
+                DECIMAL,
+                // reversed unknown length type
+                reversedStringType,
+                // reversed known length type
+                reversedDecimalType
+        ));
+        ByteBuffer[] clusteringKeyValues = new ByteBuffer[] {
+                UTF8.decompose(stringValue),
+                DECIMAL.decompose(decimalValue),
+                UTF8.decompose(stringValue),
+                DECIMAL.decompose(decimalValue)
+        };
+
+        final ClusteringComparator comparator2 = new ClusteringComparator(Arrays.asList(
+                // known length type
+                DECIMAL,
+                // unknown length type
+                UTF8,
+                // reversed known length type
+                reversedDecimalType,
+                // reversed unknown length type
+                reversedStringType
+        ));
+        ByteBuffer[] clusteringKeyValues2 = new ByteBuffer[] {
+                DECIMAL.decompose(decimalValue),
+                UTF8.decompose(stringValue),
+                DECIMAL.decompose(decimalValue),
+                UTF8.decompose(stringValue)
+        };
+
+        for (ClusteringPrefix.Kind prefixKind : ClusteringPrefix.Kind.values())
+        {
+            if (prefixKind.isBoundary())
+                continue;
+
+            ClusteringPrefix prefix = BufferClusteringBound.create(prefixKind, clusteringKeyValues);
+            ByteSource.Peekable comparableBytes = ByteSource.peekable(comparator.asByteComparable(prefix).asComparableBytes(version));
+
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+            assertEquals(getComponentValue(UTF8, comparableBytes), stringValue);
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+            assertEquals(getComponentValue(DECIMAL, comparableBytes), decimalValue);
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+            assertEquals(getComponentValue(reversedStringType, comparableBytes), stringValue);
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+            assertEquals(getComponentValue(reversedDecimalType, comparableBytes), decimalValue);
+
+            assertEquals(prefixKind.asByteComparableValue(version), comparableBytes.next());
+            assertEquals(ByteSource.END_OF_STREAM, comparableBytes.next());
+
+            ClusteringPrefix prefix2 = BufferClusteringBound.create(prefixKind, clusteringKeyValues2);
+            ByteSource.Peekable comparableBytes2 = ByteSource.peekable(comparator2.asByteComparable(prefix2).asComparableBytes(version));
+
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes2.next());
+            assertEquals(getComponentValue(DECIMAL, comparableBytes2), decimalValue);
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes2.next());
+            assertEquals(getComponentValue(UTF8, comparableBytes2), stringValue);
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes2.next());
+            assertEquals(getComponentValue(reversedDecimalType, comparableBytes2), decimalValue);
+            assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes2.next());
+            assertEquals(getComponentValue(reversedStringType, comparableBytes2), stringValue);
+
+            assertEquals(prefixKind.asByteComparableValue(version), comparableBytes2.next());
+            assertEquals(ByteSource.END_OF_STREAM, comparableBytes2.next());
+        }
+    }
+
+    private <T extends AbstractType<E>, E> E getComponentValue(T type, ByteSource.Peekable comparableBytes)
+    {
+        return type.compose(type.fromComparableBytes(comparableBytes, version));
+    }
+
+    @Test
+    public void testReadingNestedSequence_Simple()
+    {
+        String padding1 = "A string";
+        String padding2 = "Another string";
+
+        BigInteger varint1 = BigInteger.valueOf(0b10000000);
+        BigInteger varint2 = BigInteger.valueOf(1 >> 30);
+        BigInteger varint3 = BigInteger.valueOf(0x10000000L);
+        BigInteger varint4 = BigInteger.valueOf(Long.MAX_VALUE);
+
+        String string1 = "Testing byte sources";
+        String string2 = "is neither easy nor fun;";
+        String string3 = "But do it we must.";
+        String string4 = "— DataStax, 2018";
+
+        MapType<BigInteger, String> varintStringMapType = MapType.getInstance(VARINT, UTF8, false);
+        Map<BigInteger, String> varintStringMap = new TreeMap<>();
+        varintStringMap.put(varint1, string1);
+        varintStringMap.put(varint2, string2);
+        varintStringMap.put(varint3, string3);
+        varintStringMap.put(varint4, string4);
+
+        ByteSource sequence = ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                ByteSource.of(padding1, version),
+                varintStringMapType.asComparableBytes(varintStringMapType.decompose(varintStringMap), version),
+                ByteSource.of(padding2, version)
+        );
+        ByteSource.Peekable comparableBytes = ByteSource.peekable(sequence);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(UTF8, comparableBytes), padding1);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(varintStringMapType, comparableBytes), varintStringMap);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(UTF8, comparableBytes), padding2);
+        sequence = ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                varintStringMapType.asComparableBytes(varintStringMapType.decompose(varintStringMap), version),
+                ByteSource.of(padding1, version),
+                ByteSource.of(padding2, version)
+        );
+        comparableBytes = ByteSource.peekable(sequence);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(varintStringMapType, comparableBytes), varintStringMap);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(UTF8, comparableBytes), padding1);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(UTF8, comparableBytes), padding2);
+        sequence = ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                ByteSource.of(padding1, version),
+                ByteSource.of(padding2, version),
+                varintStringMapType.asComparableBytes(varintStringMapType.decompose(varintStringMap), version)
+        );
+        comparableBytes = ByteSource.peekable(sequence);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(UTF8, comparableBytes), padding1);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(UTF8, comparableBytes), padding2);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(varintStringMapType, comparableBytes), varintStringMap);
+
+        MapType<String, BigInteger> stringVarintMapType = MapType.getInstance(UTF8, VARINT, false);
+        Map<String, BigInteger> stringVarintMap = new HashMap<>();
+        stringVarintMap.put(string1, varint1);
+        stringVarintMap.put(string2, varint2);
+        stringVarintMap.put(string3, varint3);
+        stringVarintMap.put(string4, varint4);
+
+        sequence = ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                ByteSource.of(padding1, version),
+                stringVarintMapType.asComparableBytes(stringVarintMapType.decompose(stringVarintMap), version),
+                ByteSource.of(padding2, version)
+        );
+        comparableBytes = ByteSource.peekable(sequence);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(UTF8, comparableBytes), padding1);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(stringVarintMapType, comparableBytes), stringVarintMap);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(UTF8, comparableBytes), padding2);
+
+        MapType<String, String> stringStringMapType = MapType.getInstance(UTF8, UTF8, false);
+        Map<String, String> stringStringMap = new HashMap<>();
+        stringStringMap.put(string1, string4);
+        stringStringMap.put(string2, string3);
+        stringStringMap.put(string3, string2);
+        stringStringMap.put(string4, string1);
+
+        sequence = ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                ByteSource.of(padding1, version),
+                stringStringMapType.asComparableBytes(stringStringMapType.decompose(stringStringMap), version),
+                ByteSource.of(padding2, version)
+        );
+        comparableBytes = ByteSource.peekable(sequence);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(UTF8, comparableBytes), padding1);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(stringStringMapType, comparableBytes), stringStringMap);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(UTF8, comparableBytes), padding2);
+
+        MapType<BigInteger, BigInteger> varintVarintMapType = MapType.getInstance(VARINT, VARINT, false);
+        Map<BigInteger, BigInteger> varintVarintMap = new HashMap<>();
+        varintVarintMap.put(varint1, varint4);
+        varintVarintMap.put(varint2, varint3);
+        varintVarintMap.put(varint3, varint2);
+        varintVarintMap.put(varint4, varint1);
+
+        sequence = ByteSource.withTerminator(
+                ByteSource.TERMINATOR,
+                ByteSource.of(padding1, version),
+                varintVarintMapType.asComparableBytes(varintVarintMapType.decompose(varintVarintMap), version),
+                ByteSource.of(padding2, version)
+        );
+        comparableBytes = ByteSource.peekable(sequence);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(UTF8, comparableBytes), padding1);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(varintVarintMapType, comparableBytes), varintVarintMap);
+        assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
+        assertEquals(getComponentValue(UTF8, comparableBytes), padding2);
+    }
+
+    @Test
+    public void testReadingNestedSequence_DecoratedKey()
+    {
+        Random prng = new Random();
+
+        MapType<String, BigDecimal> stringDecimalMapType = MapType.getInstance(UTF8, DECIMAL, false);
+        Map<String, BigDecimal> stringDecimalMap = new HashMap<>();
+        for (int i = 0; i < 4; ++i)
+            stringDecimalMap.put(newRandomAlphanumeric(prng, 10), BigDecimal.valueOf(prng.nextDouble()));
+        ByteBuffer key = stringDecimalMapType.decompose(stringDecimalMap);
+        testDecodingKeyWithLocalPartitionerForType(key, stringDecimalMapType);
+
+        MapType<BigDecimal, String> decimalStringMapType = MapType.getInstance(DECIMAL, UTF8, false);
+        Map<BigDecimal, String> decimalStringMap = new HashMap<>();
+        for (int i = 0; i < 4; ++i)
+            decimalStringMap.put(BigDecimal.valueOf(prng.nextDouble()), newRandomAlphanumeric(prng, 10));
+        key = decimalStringMapType.decompose(decimalStringMap);
+        testDecodingKeyWithLocalPartitionerForType(key, decimalStringMapType);
+
+        if (version != ByteComparable.Version.LEGACY)
+        {
+            CompositeType stringDecimalCompType = CompositeType.getInstance(UTF8, DECIMAL);
+            key = stringDecimalCompType.decompose(newRandomAlphanumeric(prng, 10), BigDecimal.valueOf(prng.nextDouble()));
+            testDecodingKeyWithLocalPartitionerForType(key, stringDecimalCompType);
+
+            CompositeType decimalStringCompType = CompositeType.getInstance(DECIMAL, UTF8);
+            key = decimalStringCompType.decompose(BigDecimal.valueOf(prng.nextDouble()), newRandomAlphanumeric(prng, 10));
+            testDecodingKeyWithLocalPartitionerForType(key, decimalStringCompType);
+
+            DynamicCompositeType dynamicCompType = DynamicCompositeType.getInstance(DynamicCompositeTypeTest.aliases);
+            key = DynamicCompositeTypeTest.createDynamicCompositeKey(
+                    newRandomAlphanumeric(prng, 10), TimeUUID.Generator.nextTimeAsUUID(), 42, true, false);
+            testDecodingKeyWithLocalPartitionerForType(key, dynamicCompType);
+
+            key = DynamicCompositeTypeTest.createDynamicCompositeKey(
+            newRandomAlphanumeric(prng, 10), TimeUUID.Generator.nextTimeAsUUID(), 42, true, true);
+            testDecodingKeyWithLocalPartitionerForType(key, dynamicCompType);
+        }
+    }
+
+    private static String newRandomAlphanumeric(Random prng, int length)
+    {
+        StringBuilder random = new StringBuilder(length);
+        for (int i = 0; i < length; ++i)
+            random.append(ALPHABET.charAt(prng.nextInt(ALPHABET.length())));
+        return random.toString();
+    }
+
+    private <T> void testDecodingKeyWithLocalPartitionerForType(ByteBuffer key, AbstractType<T> type)
+    {
+        IPartitioner partitioner = new LocalPartitioner(type);
+        CachedHashDecoratedKey initial = (CachedHashDecoratedKey) partitioner.decorateKey(key);
+        BufferDecoratedKey base = BufferDecoratedKey.fromByteComparable(initial, version, partitioner);
+        CachedHashDecoratedKey decoded = new CachedHashDecoratedKey(base.getToken(), base.getKey());
+        Assert.assertEquals(initial, decoded);
+    }
+}
diff --git a/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceTestBase.java b/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceTestBase.java
new file mode 100644
index 0000000..90463f6
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/bytecomparable/ByteSourceTestBase.java
@@ -0,0 +1,513 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.cassandra.utils.bytecomparable;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.UUID;
+
+import com.google.common.base.Throwables;
+
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.BooleanType;
+import org.apache.cassandra.db.marshal.DecimalType;
+import org.apache.cassandra.db.marshal.DoubleType;
+import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.db.marshal.IntegerType;
+import org.apache.cassandra.db.marshal.UTF8Type;
+import org.apache.cassandra.utils.TimeUUID;
+
+public class ByteSourceTestBase
+{
+    String[] testStrings = new String[]{ "", "\0", "\0\0", "\001", "A\0\0B", "A\0B\0", "0", "0\0", "00", "1", "\377" };
+    Integer[] testInts = new Integer[]{ null,
+                                        Integer.MIN_VALUE,
+                                        Integer.MIN_VALUE + 1,
+                                        -256,
+                                        -255,
+                                        -128,
+                                        -127,
+                                        -64,
+                                        -63,
+                                        -1,
+                                        0,
+                                        1,
+                                        63,
+                                        64,
+                                        127,
+                                        128,
+                                        255,
+                                        256,
+                                        Integer.MAX_VALUE - 1,
+                                        Integer.MAX_VALUE };
+    Byte[] testBytes = new Byte[]{ -128, -127, -1, 0, 1, 127 };
+    Short[] testShorts = new Short[]{ Short.MIN_VALUE,
+                                      Short.MIN_VALUE + 1,
+                                      -256,
+                                      -255,
+                                      -128,
+                                      -127,
+                                      -65,
+                                      -64,
+                                      -63,
+                                      -1,
+                                      0,
+                                      1,
+                                      127,
+                                      128,
+                                      255,
+                                      256,
+                                      Short.MAX_VALUE - 1,
+                                      Short.MAX_VALUE };
+    Long[] testLongs = new Long[]{ null,
+                                   Long.MIN_VALUE,
+                                   Long.MIN_VALUE + 1,
+                                   Integer.MIN_VALUE - 1L,
+                                   -256L,
+                                   -255L,
+                                   -128L,
+                                   -127L,
+                                   -65L,
+                                   -64L,
+                                   -63L,
+                                   -1L,
+                                   0L,
+                                   1L,
+                                   Integer.MAX_VALUE + 1L,
+                                   Long.MAX_VALUE - 1,
+                                   Long.MAX_VALUE,
+
+                                   (1L << 1) - 1,
+                                   (1L << 1),
+                                   (1L << 2) - 1,
+                                   (1L << 2),
+                                   (1L << 3) - 1,
+                                   (1L << 3),
+                                   (1L << 4) - 1,
+                                   (1L << 4),
+                                   (1L << 5) - 1,
+                                   (1L << 5),
+                                   (1L << 6) - 1,
+                                   (1L << 6),
+                                   (1L << 7) - 1,
+                                   (1L << 7),
+                                   (1L << 8) - 1,
+                                   (1L << 8),
+                                   (1L << 9) - 1,
+                                   (1L << 9),
+                                   (1L << 10) - 1,
+                                   (1L << 10),
+                                   (1L << 11) - 1,
+                                   (1L << 11),
+                                   (1L << 12) - 1,
+                                   (1L << 12),
+                                   (1L << 13) - 1,
+                                   (1L << 13),
+                                   (1L << 14) - 1,
+                                   (1L << 14),
+                                   (1L << 15) - 1,
+                                   (1L << 15),
+                                   (1L << 16) - 1,
+                                   (1L << 16),
+                                   (1L << 17) - 1,
+                                   (1L << 17),
+                                   (1L << 18) - 1,
+                                   (1L << 18),
+                                   (1L << 19) - 1,
+                                   (1L << 19),
+                                   (1L << 20) - 1,
+                                   (1L << 20),
+                                   (1L << 21) - 1,
+                                   (1L << 21),
+                                   (1L << 22) - 1,
+                                   (1L << 22),
+                                   (1L << 23) - 1,
+                                   (1L << 23),
+                                   (1L << 24) - 1,
+                                   (1L << 24),
+                                   (1L << 25) - 1,
+                                   (1L << 25),
+                                   (1L << 26) - 1,
+                                   (1L << 26),
+                                   (1L << 27) - 1,
+                                   (1L << 27),
+                                   (1L << 28) - 1,
+                                   (1L << 28),
+                                   (1L << 29) - 1,
+                                   (1L << 29),
+                                   (1L << 30) - 1,
+                                   (1L << 30),
+                                   (1L << 31) - 1,
+                                   (1L << 31),
+                                   (1L << 32) - 1,
+                                   (1L << 32),
+                                   (1L << 33) - 1,
+                                   (1L << 33),
+                                   (1L << 34) - 1,
+                                   (1L << 34),
+                                   (1L << 35) - 1,
+                                   (1L << 35),
+                                   (1L << 36) - 1,
+                                   (1L << 36),
+                                   (1L << 37) - 1,
+                                   (1L << 37),
+                                   (1L << 38) - 1,
+                                   (1L << 38),
+                                   (1L << 39) - 1,
+                                   (1L << 39),
+                                   (1L << 40) - 1,
+                                   (1L << 40),
+                                   (1L << 41) - 1,
+                                   (1L << 41),
+                                   (1L << 42) - 1,
+                                   (1L << 42),
+                                   (1L << 43) - 1,
+                                   (1L << 43),
+                                   (1L << 44) - 1,
+                                   (1L << 44),
+                                   (1L << 45) - 1,
+                                   (1L << 45),
+                                   (1L << 46) - 1,
+                                   (1L << 46),
+                                   (1L << 47) - 1,
+                                   (1L << 47),
+                                   (1L << 48) - 1,
+                                   (1L << 48),
+                                   (1L << 49) - 1,
+                                   (1L << 49),
+                                   (1L << 50) - 1,
+                                   (1L << 50),
+                                   (1L << 51) - 1,
+                                   (1L << 51),
+                                   (1L << 52) - 1,
+                                   (1L << 52),
+                                   (1L << 53) - 1,
+                                   (1L << 53),
+                                   (1L << 54) - 1,
+                                   (1L << 54),
+                                   (1L << 55) - 1,
+                                   (1L << 55),
+                                   (1L << 56) - 1,
+                                   (1L << 56),
+                                   (1L << 57) - 1,
+                                   (1L << 57),
+                                   (1L << 58) - 1,
+                                   (1L << 58),
+                                   (1L << 59) - 1,
+                                   (1L << 59),
+                                   (1L << 60) - 1,
+                                   (1L << 60),
+                                   (1L << 61) - 1,
+                                   (1L << 61),
+                                   (1L << 62) - 1,
+                                   (1L << 62),
+                                   (1L << 63) - 1,
+
+                                   ~((1L << 1) - 1),
+                                   ~((1L << 1)),
+                                   ~((1L << 2) - 1),
+                                   ~((1L << 2)),
+                                   ~((1L << 3) - 1),
+                                   ~((1L << 3)),
+                                   ~((1L << 4) - 1),
+                                   ~((1L << 4)),
+                                   ~((1L << 5) - 1),
+                                   ~((1L << 5)),
+                                   ~((1L << 6) - 1),
+                                   ~((1L << 6)),
+                                   ~((1L << 7) - 1),
+                                   ~((1L << 7)),
+                                   ~((1L << 8) - 1),
+                                   ~((1L << 8)),
+                                   ~((1L << 9) - 1),
+                                   ~((1L << 9)),
+                                   ~((1L << 10) - 1),
+                                   ~((1L << 10)),
+                                   ~((1L << 11) - 1),
+                                   ~((1L << 11)),
+                                   ~((1L << 12) - 1),
+                                   ~((1L << 12)),
+                                   ~((1L << 13) - 1),
+                                   ~((1L << 13)),
+                                   ~((1L << 14) - 1),
+                                   ~((1L << 14)),
+                                   ~((1L << 15) - 1),
+                                   ~((1L << 15)),
+                                   ~((1L << 16) - 1),
+                                   ~((1L << 16)),
+                                   ~((1L << 17) - 1),
+                                   ~((1L << 17)),
+                                   ~((1L << 18) - 1),
+                                   ~((1L << 18)),
+                                   ~((1L << 19) - 1),
+                                   ~((1L << 19)),
+                                   ~((1L << 20) - 1),
+                                   ~((1L << 20)),
+                                   ~((1L << 21) - 1),
+                                   ~((1L << 21)),
+                                   ~((1L << 22) - 1),
+                                   ~((1L << 22)),
+                                   ~((1L << 23) - 1),
+                                   ~((1L << 23)),
+                                   ~((1L << 24) - 1),
+                                   ~((1L << 24)),
+                                   ~((1L << 25) - 1),
+                                   ~((1L << 25)),
+                                   ~((1L << 26) - 1),
+                                   ~((1L << 26)),
+                                   ~((1L << 27) - 1),
+                                   ~((1L << 27)),
+                                   ~((1L << 28) - 1),
+                                   ~((1L << 28)),
+                                   ~((1L << 29) - 1),
+                                   ~((1L << 29)),
+                                   ~((1L << 30) - 1),
+                                   ~((1L << 30)),
+                                   ~((1L << 31) - 1),
+                                   ~((1L << 31)),
+                                   ~((1L << 32) - 1),
+                                   ~((1L << 32)),
+                                   ~((1L << 33) - 1),
+                                   ~((1L << 33)),
+                                   ~((1L << 34) - 1),
+                                   ~((1L << 34)),
+                                   ~((1L << 35) - 1),
+                                   ~((1L << 35)),
+                                   ~((1L << 36) - 1),
+                                   ~((1L << 36)),
+                                   ~((1L << 37) - 1),
+                                   ~((1L << 37)),
+                                   ~((1L << 38) - 1),
+                                   ~((1L << 38)),
+                                   ~((1L << 39) - 1),
+                                   ~((1L << 39)),
+                                   ~((1L << 40) - 1),
+                                   ~((1L << 40)),
+                                   ~((1L << 41) - 1),
+                                   ~((1L << 41)),
+                                   ~((1L << 42) - 1),
+                                   ~((1L << 42)),
+                                   ~((1L << 43) - 1),
+                                   ~((1L << 43)),
+                                   ~((1L << 44) - 1),
+                                   ~((1L << 44)),
+                                   ~((1L << 45) - 1),
+                                   ~((1L << 45)),
+                                   ~((1L << 46) - 1),
+                                   ~((1L << 46)),
+                                   ~((1L << 47) - 1),
+                                   ~((1L << 47)),
+                                   ~((1L << 48) - 1),
+                                   ~((1L << 48)),
+                                   ~((1L << 49) - 1),
+                                   ~((1L << 49)),
+                                   ~((1L << 50) - 1),
+                                   ~((1L << 50)),
+                                   ~((1L << 51) - 1),
+                                   ~((1L << 51)),
+                                   ~((1L << 52) - 1),
+                                   ~((1L << 52)),
+                                   ~((1L << 53) - 1),
+                                   ~((1L << 53)),
+                                   ~((1L << 54) - 1),
+                                   ~((1L << 54)),
+                                   ~((1L << 55) - 1),
+                                   ~((1L << 55)),
+                                   ~((1L << 56) - 1),
+                                   ~((1L << 56)),
+                                   ~((1L << 57) - 1),
+                                   ~((1L << 57)),
+                                   ~((1L << 58) - 1),
+                                   ~((1L << 58)),
+                                   ~((1L << 59) - 1),
+                                   ~((1L << 59)),
+                                   ~((1L << 60) - 1),
+                                   ~((1L << 60)),
+                                   ~((1L << 61) - 1),
+                                   ~((1L << 61)),
+                                   ~((1L << 62) - 1),
+                                   ~((1L << 62)),
+                                   ~((1L << 63) - 1),
+                                   };
+    Double[] testDoubles = new Double[]{ null,
+                                         Double.NEGATIVE_INFINITY,
+                                         -Double.MAX_VALUE,
+                                         -1e+200,
+                                         -1e3,
+                                         -1e0,
+                                         -1e-3,
+                                         -1e-200,
+                                         -Double.MIN_VALUE,
+                                         -0.0,
+                                         0.0,
+                                         Double.MIN_VALUE,
+                                         1e-200,
+                                         1e-3,
+                                         1e0,
+                                         1e3,
+                                         1e+200,
+                                         Double.MAX_VALUE,
+                                         Double.POSITIVE_INFINITY,
+                                         Double.NaN };
+    Float[] testFloats = new Float[]{ null,
+                                      Float.NEGATIVE_INFINITY,
+                                      -Float.MAX_VALUE,
+                                      -1e+30f,
+                                      -1e3f,
+                                      -1e0f,
+                                      -1e-3f,
+                                      -1e-30f,
+                                      -Float.MIN_VALUE,
+                                      -0.0f,
+                                      0.0f,
+                                      Float.MIN_VALUE,
+                                      1e-30f,
+                                      1e-3f,
+                                      1e0f,
+                                      1e3f,
+                                      1e+30f,
+                                      Float.MAX_VALUE,
+                                      Float.POSITIVE_INFINITY,
+                                      Float.NaN };
+    Boolean[] testBools = new Boolean[]{ null, false, true };
+    UUID[] testUUIDs = new UUID[]{ null,
+                                   TimeUUID.Generator.nextTimeAsUUID(),
+                                   UUID.randomUUID(),
+                                   UUID.randomUUID(),
+                                   UUID.randomUUID(),
+                                   TimeUUID.Generator.atUnixMillis(123, 234).asUUID(),
+                                   TimeUUID.Generator.atUnixMillis(123, 234).asUUID(),
+                                   TimeUUID.Generator.atUnixMillis(123).asUUID(),
+                                   UUID.fromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"),
+                                   UUID.fromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"),
+                                   UUID.fromString("e902893a-9d22-3c7e-a7b8-d6e313b71d9f"),
+                                   UUID.fromString("74738ff5-5367-5958-9aee-98fffdcd1876"),
+                                   UUID.fromString("52df1bb0-6a2f-11e6-b6e4-a6dea7a01b67"),
+                                   UUID.fromString("52df1bb0-6a2f-11e6-362d-aff2143498ea"),
+                                   UUID.fromString("52df1bb0-6a2f-11e6-b62d-aff2143498ea") };
+    // Instant.MIN/MAX fail Date.from.
+    Date[] testDates = new Date[]{ null,
+                                   Date.from(Instant.ofEpochSecond(Integer.MIN_VALUE)),
+                                   Date.from(Instant.ofEpochSecond(Short.MIN_VALUE)),
+                                   Date.from(Instant.ofEpochMilli(-2000)),
+                                   Date.from(Instant.EPOCH),
+                                   Date.from(Instant.ofEpochMilli(2000)),
+                                   Date.from(Instant.ofEpochSecond(Integer.MAX_VALUE)),
+                                   Date.from(Instant.now()) };
+    InetAddress[] testInets;
+    {
+        try
+        {
+            testInets = new InetAddress[]{ null,
+                                           InetAddress.getLocalHost(),
+                                           InetAddress.getLoopbackAddress(),
+                                           InetAddress.getByName("192.168.0.1"),
+                                           InetAddress.getByName("fe80::428d:5cff:fe53:1dc9"),
+                                           InetAddress.getByName("2001:610:3:200a:192:87:36:2"),
+                                           InetAddress.getByName("10.0.0.1"),
+                                           InetAddress.getByName("0a00:0001::"),
+                                           InetAddress.getByName("::10.0.0.1") };
+        }
+        catch (UnknownHostException e)
+        {
+            throw Throwables.propagate(e);
+        }
+    }
+
+    BigInteger[] testBigInts;
+
+    {
+        Set<BigInteger> bigs = new TreeSet<>();
+        for (Long l : testLongs)
+            if (l != null)
+                bigs.add(BigInteger.valueOf(l));
+        for (int i = 0; i < 11; ++i)
+        {
+            bigs.add(BigInteger.valueOf(i));
+            bigs.add(BigInteger.valueOf(-i));
+
+            bigs.add(BigInteger.valueOf((1L << 4 * i) - 1));
+            bigs.add(BigInteger.valueOf((1L << 4 * i)));
+            bigs.add(BigInteger.valueOf(-(1L << 4 * i) - 1));
+            bigs.add(BigInteger.valueOf(-(1L << 4 * i)));
+            String p = exp10(i);
+            bigs.add(new BigInteger(p));
+            bigs.add(new BigInteger("-" + p));
+            p = exp10(1 << i);
+            bigs.add(new BigInteger(p));
+            bigs.add(new BigInteger("-" + p));
+
+            BigInteger base = BigInteger.ONE.shiftLeft(512 * i);
+            bigs.add(base);
+            bigs.add(base.add(BigInteger.ONE));
+            bigs.add(base.subtract(BigInteger.ONE));
+            base = base.negate();
+            bigs.add(base);
+            bigs.add(base.add(BigInteger.ONE));
+            bigs.add(base.subtract(BigInteger.ONE));
+        }
+        testBigInts = bigs.toArray(new BigInteger[0]);
+    }
+
+    static String exp10(int pow)
+    {
+        StringBuilder builder = new StringBuilder();
+        builder.append('1');
+        for (int i=0; i<pow; ++i)
+            builder.append('0');
+        return builder.toString();
+    }
+
+    BigDecimal[] testBigDecimals;
+    {
+        String vals = "0, 1, 1.1, 21, 98.9, 99, 99.9, 100, 100.1, 101, 331, 0.4, 0.07, 0.0700, 0.005, " +
+                      "6e4, 7e200, 6e-300, 8.1e2000, 8.1e-2000, 9e2000000000, " +
+                      "123456789012.34567890e-1000000000, 123456.78901234, 1234.56789012e2, " +
+                      "1.0000, 0.01e2, 100e-2, 00, 0.000, 0E-18, 0E+18";
+        List<BigDecimal> decs = new ArrayList<>();
+        for (String s : vals.split(", "))
+        {
+            decs.add(new BigDecimal(s));
+            decs.add(new BigDecimal("-" + s));
+        }
+        testBigDecimals = decs.toArray(new BigDecimal[0]);
+    }
+
+    Object[][] testValues = new Object[][]{ testStrings,
+                                            testInts,
+                                            testBools,
+                                            testDoubles,
+                                            testBigInts,
+                                            testBigDecimals };
+
+    AbstractType[] testTypes = new AbstractType[]{ UTF8Type.instance,
+                                                   Int32Type.instance,
+                                                   BooleanType.instance,
+                                                   DoubleType.instance,
+                                                   IntegerType.instance,
+                                                   DecimalType.instance };
+}
diff --git a/test/unit/org/apache/cassandra/utils/bytecomparable/DecoratedKeyByteSourceTest.java b/test/unit/org/apache/cassandra/utils/bytecomparable/DecoratedKeyByteSourceTest.java
new file mode 100644
index 0000000..9a39550
--- /dev/null
+++ b/test/unit/org/apache/cassandra/utils/bytecomparable/DecoratedKeyByteSourceTest.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.utils.bytecomparable;
+
+import java.nio.ByteBuffer;
+import java.util.Random;
+
+import com.google.common.collect.ImmutableList;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import org.apache.cassandra.db.BufferDecoratedKey;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.dht.ByteOrderedPartitioner;
+
+@RunWith(Parameterized.class)
+public class DecoratedKeyByteSourceTest
+{
+    private static final int NUM_ITERATIONS = 100;
+    private static final int RANDOM_BYTES_LENGTH = 100;
+
+    @Parameterized.Parameters(name = "version={0}")
+    public static Iterable<ByteComparable.Version> versions()
+    {
+        return ImmutableList.of(ByteComparable.Version.OSS42);
+    }
+
+    private final ByteComparable.Version version;
+
+    public DecoratedKeyByteSourceTest(ByteComparable.Version version)
+    {
+        this.version = version;
+    }
+
+    @Test
+    public void testDecodeBufferDecoratedKey()
+    {
+        for (int i = 0; i < NUM_ITERATIONS; ++i)
+        {
+            BufferDecoratedKey initialBuffer =
+                    (BufferDecoratedKey) ByteOrderedPartitioner.instance.decorateKey(newRandomBytesBuffer());
+            BufferDecoratedKey decodedBuffer = BufferDecoratedKey.fromByteComparable(
+                    initialBuffer, version, ByteOrderedPartitioner.instance);
+            Assert.assertEquals(initialBuffer, decodedBuffer);
+        }
+    }
+
+    @Test
+    public void testDecodeKeyBytes()
+    {
+        for (int i = 0; i < NUM_ITERATIONS; ++i)
+        {
+            BufferDecoratedKey initialBuffer =
+                    (BufferDecoratedKey) ByteOrderedPartitioner.instance.decorateKey(newRandomBytesBuffer());
+            ByteSource.Peekable src = ByteSource.peekable(initialBuffer.asComparableBytes(version));
+            byte[] keyBytes = DecoratedKey.keyFromByteSource(src, version, ByteOrderedPartitioner.instance);
+            Assert.assertEquals(ByteSource.END_OF_STREAM, src.next());
+            Assert.assertArrayEquals(initialBuffer.getKey().array(), keyBytes);
+        }
+    }
+
+    private static ByteBuffer newRandomBytesBuffer()
+    {
+        byte[] randomBytes = new byte[RANDOM_BYTES_LENGTH];
+        new Random().nextBytes(randomBytes);
+        return ByteBuffer.wrap(randomBytes);
+    }
+}