Merge pull request #1204 from apache/issue/OAK-10006

Block repository writes if repo lock is lost
diff --git a/oak-auth-ldap/pom.xml b/oak-auth-ldap/pom.xml
index 1f8997d..dd59307 100644
--- a/oak-auth-ldap/pom.xml
+++ b/oak-auth-ldap/pom.xml
@@ -107,9 +107,10 @@
             <artifactId>commons-lang3</artifactId>
             <scope>provided</scope>
         </dependency>
+
         <dependency>
-            <groupId>org.apache.felix</groupId>
-            <artifactId>org.apache.felix.scr.annotations</artifactId>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.service.component.annotations</artifactId>
             <scope>provided</scope>
         </dependency>
 
diff --git a/oak-auth-ldap/src/main/java/org/apache/jackrabbit/oak/security/authentication/ldap/impl/LdapIdentityProvider.java b/oak-auth-ldap/src/main/java/org/apache/jackrabbit/oak/security/authentication/ldap/impl/LdapIdentityProvider.java
index 200a989..631bf5d 100644
--- a/oak-auth-ldap/src/main/java/org/apache/jackrabbit/oak/security/authentication/ldap/impl/LdapIdentityProvider.java
+++ b/oak-auth-ldap/src/main/java/org/apache/jackrabbit/oak/security/authentication/ldap/impl/LdapIdentityProvider.java
@@ -59,11 +59,12 @@
 import org.apache.directory.ldap.client.api.LookupLdapConnectionValidator;
 import org.apache.directory.ldap.client.api.NoVerificationTrustManager;
 import org.apache.directory.ldap.client.api.ValidatingPoolableLdapConnectionFactory;
-import org.apache.felix.scr.annotations.Activate;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.ConfigurationPolicy;
-import org.apache.felix.scr.annotations.Deactivate;
-import org.apache.felix.scr.annotations.Service;
+
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.ConfigurationPolicy;
+
 import org.apache.jackrabbit.commons.iterator.AbstractLazyIterator;
 import org.apache.jackrabbit.oak.commons.DebugTimer;
 import org.apache.jackrabbit.oak.spi.security.ConfigurationParameters;
@@ -77,6 +78,7 @@
 import org.apache.jackrabbit.util.Text;
 import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
+import org.osgi.service.metatype.annotations.Designate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -87,10 +89,11 @@
  * Please refer to {@link LdapProviderConfig} for configuration options.
  */
 @Component(
-        // note that the metatype information is generated from LdapProviderConfig
-        policy = ConfigurationPolicy.REQUIRE
-)
-@Service
+        service = { ExternalIdentityProvider.class, PrincipalNameResolver.class },
+        configurationPolicy = ConfigurationPolicy.REQUIRE)
+@Designate(
+        ocd = LdapProviderConfig.Configuration.class,
+        factory = true )
 public class LdapIdentityProvider implements ExternalIdentityProvider, PrincipalNameResolver {
 
     /**
diff --git a/oak-auth-ldap/src/main/java/org/apache/jackrabbit/oak/security/authentication/ldap/impl/LdapProviderConfig.java b/oak-auth-ldap/src/main/java/org/apache/jackrabbit/oak/security/authentication/ldap/impl/LdapProviderConfig.java
index e0b72c8..643ee0e 100644
--- a/oak-auth-ldap/src/main/java/org/apache/jackrabbit/oak/security/authentication/ldap/impl/LdapProviderConfig.java
+++ b/oak-auth-ldap/src/main/java/org/apache/jackrabbit/oak/security/authentication/ldap/impl/LdapProviderConfig.java
@@ -21,519 +21,329 @@
 import java.util.List;
 
 import org.apache.directory.api.util.Strings;
-import org.apache.felix.scr.annotations.Component;
-import org.apache.felix.scr.annotations.Property;
 import org.apache.jackrabbit.oak.spi.security.ConfigurationParameters;
 import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
+import org.osgi.service.metatype.annotations.AttributeDefinition;
+import org.osgi.service.metatype.annotations.AttributeType;
+import org.osgi.service.metatype.annotations.ObjectClassDefinition;
 
 /**
  * Configuration of the ldap provider.
  */
-@Component(
-        label = "Apache Jackrabbit Oak LDAP Identity Provider",
-        name = "org.apache.jackrabbit.oak.security.authentication.ldap.impl.LdapIdentityProvider",
-        configurationFactory = true,
-        metatype = true,
-        ds = false
-)
 public class LdapProviderConfig {
 
-    /**
-     * @see #getName()
-     */
+    @ObjectClassDefinition(
+            id = "org.apache.jackrabbit.oak.security.authentication.ldap.impl.LdapIdentityProvider",
+            name = "Apache Jackrabbit Oak LDAP Identity Provider"
+    )
+    @interface Configuration {
+        @AttributeDefinition(
+                name = "LDAP Provider Name",
+                description = "Name of this LDAP provider configuration. This is used to reference this provider by the login modules."
+        )
+        String provider_name() default PARAM_NAME_DEFAULT;
+
+        @AttributeDefinition(
+                name = "LDAP Server Hostname",
+                description = "Hostname of the LDAP server"
+        )
+        String host_name() default PARAM_LDAP_HOST_DEFAULT;
+
+        @AttributeDefinition(
+                name = "LDAP Server Port",
+                description = "Port of the LDAP server"
+        )
+        int host_port() default PARAM_LDAP_PORT_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Use SSL",
+                description = "Indicates if an SSL (LDAPs) connection should be used."
+        )
+        boolean host_ssl() default PARAM_USE_SSL_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Use TLS",
+                description = "Indicates if TLS should be started on connections."
+        )
+        boolean host_tls() default PARAM_USE_TLS_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Disable certificate checking",
+                description = "Indicates if server certificate validation should be disabled."
+        )
+        boolean host_noCertCheck() default PARAM_NO_CERT_CHECK_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Enabled Protocols",
+                description = "Allows to explicitly set the enabled protocols on the LdapConnectionConfig.",
+                cardinality = Integer.MAX_VALUE
+        )
+        String[] host_enabledProtocols();
+
+        @AttributeDefinition(
+                name = "Bind DN",
+                description = "DN of the user for authentication. Leave empty for anonymous bind."
+        )
+        String bind_dn() default PARAM_BIND_DN_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Bind Password",
+                description = "Password of the user for authentication.",
+                type = AttributeType.PASSWORD
+        )
+        String bind_password() default PARAM_BIND_PASSWORD_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Search Timeout",
+                description = "Time in until a search times out (eg: '1s' or '1m 30s')."
+        )
+        String searchTimeout() default PARAM_SEARCH_TIMEOUT_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Admin pool max active",
+                description = "The max active size of the admin connection pool."
+        )
+        long adminPool_maxActive() default PARAM_ADMIN_POOL_MAX_ACTIVE_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Admin pool lookup on validate",
+                description = "Indicates an ROOT DSE lookup is performed to test if the connection is still valid when taking it out of the pool."
+        )
+        boolean adminPool_lookupOnValidate() default PARAM_ADMIN_POOL_LOOKUP_ON_VALIDATE_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Admin pool min evictable idle time",
+                description = "The minimum amount of time a connection from the admin pool must be idle before becoming eligible for eviction by the idle object evictor, if running (eg: '1m 30s'). When non-positive, no connections will be evicted from the pool due to idle time alone."
+        )
+        String adminPool_minEvictableIdleTime() default PARAM_ADMIN_POOL_MIN_EVICTABLE_IDLE_TIME_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Time interval to sleep between evictor runs for the admin pool",
+                description = "Time interval to sleep between runs of the idle object evictor thread for the admin pool (eg: '1m 30s'). When non-positive, no idle object evictor thread will be run."
+        )
+        String adminPool_timeBetweenEvictionRuns() default PARAM_ADMIN_POOL_TIME_BETWEEN_EVICTION_RUNS_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Max number of objects to be tested per run of the idle object evictor for the admin pool",
+                description = "The max number of objects to examine during each run of the idle object evictor thread for the admin pool (if any)"
+        )
+        int adminPool_numTestsPerEvictionRun() default PARAM_ADMIN_POOL_NUM_TESTS_PER_EVICTION_RUN_DEFAULT;
+
+        @AttributeDefinition(
+                name = "User pool max active",
+                description = "The max active size of the user connection pool."
+        )
+        long userPool_maxActive() default PARAM_USER_POOL_MAX_ACTIVE_DEFAULT;
+
+        @AttributeDefinition(
+                name = "User pool lookup on validate",
+                description = "Indicates an ROOT DSE lookup is performed to test if the connection is still valid when taking it out of the pool."
+        )
+        boolean userPool_lookupOnValidate() default PARAM_USER_POOL_LOOKUP_ON_VALIDATE_DEFAULT;
+
+        @AttributeDefinition(
+                name = "User pool min evictable idle time",
+                description = "The minimum amount of time a connection from the user pool must be idle before becoming eligible for eviction by the idle object evictor, if running (eg: '1m 30s'). When non-positive, no connections will be evicted from the pool due to idle time alone."
+        )
+        String userPool_minEvictableIdleTime() default PARAM_USER_POOL_MIN_EVICTABLE_IDLE_TIME_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Time interval to sleep between evictor runs for the user pool",
+                description = "Time interval to sleep between runs of the idle object evictor thread for the user pool (eg: '1m 30s'). When non-positive, no idle object evictor thread will be run."
+        )
+        String userPool_timeBetweenEvictionRuns() default PARAM_USER_POOL_TIME_BETWEEN_EVICTION_RUNS_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Max number of objects to be tested per run of the idle object evictor for the user pool",
+                description = "The max number of objects to examine during each run of the idle object evictor thread for the user pool (if any)"
+        )
+        int userPool_numTestsPerEvictionRun() default PARAM_USER_POOL_NUM_TESTS_PER_EVICTION_RUN_DEFAULT;
+
+        @AttributeDefinition(
+                name = "User base DN",
+                description = "The base DN for user searches."
+        )
+        String user_baseDN() default PARAM_USER_BASE_DN_DEFAULT;
+
+        @AttributeDefinition(
+                name = "User object classes",
+                description = "The list of object classes an user entry must contain.",
+                cardinality = Integer.MAX_VALUE
+        )
+        String[] user_objectclass() default {"person"};
+
+        @AttributeDefinition(
+                name = "User id attribute",
+                description = "Name of the attribute that contains the user id."
+        )
+        String user_idAttribute() default PARAM_USER_ID_ATTRIBUTE_DEFAULT;
+
+        @AttributeDefinition(
+                name = "User extra filter",
+                description = "Extra LDAP filter to use when searching for users. The final filter is" +
+                        "formatted like: '(&(<idAttr>=<userId>)(objectclass=<objectclass>)<extraFilter>)'"
+        )
+        String user_extraFilter() default PARAM_USER_EXTRA_FILTER_DEFAULT;
+
+        @AttributeDefinition(
+                name = "User DN paths",
+                description = "Controls if the DN should be used for calculating a portion of the intermediate path."
+        )
+        boolean user_makeDnPath() default PARAM_USER_MAKE_DN_PATH_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Group base DN",
+                description = "The base DN for group searches."
+        )
+        String group_baseDN() default PARAM_GROUP_BASE_DN_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Group object classes",
+                description = "The list of object classes a group entry must contain.",
+                cardinality = Integer.MAX_VALUE
+        )
+        String[] group_objectclass() default {"groupOfUniqueNames"};
+
+        @AttributeDefinition(
+                name = "Group name attribute",
+                description = "Name of the attribute that contains the group name."
+        )
+        String group_nameAttribute() default PARAM_GROUP_NAME_ATTRIBUTE_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Group extra filter",
+                description = "Extra LDAP filter to use when searching for groups. The final filter is" +
+                        "formatted like: '(&(<nameAttr>=<groupName>)(objectclass=<objectclass>)<extraFilter>)'"
+        )
+        String group_extraFilter() default PARAM_GROUP_EXTRA_FILTER_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Group DN paths",
+                description = "Controls if the DN should be used for calculating a portion of the intermediate path."
+        )
+        boolean group_makeDnPath() default PARAM_GROUP_MAKE_DN_PATH_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Group member attribute",
+                description = "Group attribute that contains the member(s) of a group."
+        )
+        String group_memberAttribute() default PARAM_GROUP_MEMBER_ATTRIBUTE_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Use user id for external ids",
+                description = "If enabled, the value of the user id (resp. group name) attribute will be used to create external identifiers. Leave disabled to use the DN instead."
+        )
+        boolean useUidForExtId() default PARAM_USE_UID_FOR_EXT_ID_DEFAULT;
+
+        @AttributeDefinition(
+                name = "Custom Attributes",
+                description = "Attributes retrieved when looking up LDAP entries. Leave empty to retrieve all attributes.",
+                cardinality = Integer.MAX_VALUE
+        )
+        String[] customattributes();
+    }
+
+    public static final String PARAM_NAME = "provider.name";
     public static final String PARAM_NAME_DEFAULT = "ldap";
 
-    /**
-     * @see #getName()
-     */
-    @Property(
-            label = "LDAP Provider Name",
-            description = "Name of this LDAP provider configuration. This is used to reference this provider by the login modules.",
-            value = PARAM_NAME_DEFAULT
-    )
-    public static final String PARAM_NAME = "provider.name";
-
-    /**
-     * @see #getHostname()
-     */
+    public static final String PARAM_LDAP_HOST = "host.name";
     public static final String PARAM_LDAP_HOST_DEFAULT = "localhost";
 
-    /**
-     * @see #getHostname()
-     */
-    @Property(
-            label = "LDAP Server Hostname",
-            description = "Hostname of the LDAP server",
-            value = PARAM_LDAP_HOST_DEFAULT
-    )
-    public static final String PARAM_LDAP_HOST = "host.name";
-
-    /**
-     * @see #getPort()
-     */
+    public static final String PARAM_LDAP_PORT = "host.port";
     public static final int PARAM_LDAP_PORT_DEFAULT = 389;
 
-    /**
-     * @see #getPort()
-     */
-    @Property(
-            label = "LDAP Server Port",
-            description = "Port of the LDAP server",
-            intValue = PARAM_LDAP_PORT_DEFAULT
-    )
-    public static final String PARAM_LDAP_PORT = "host.port";
-
-    /**
-     * @see #useSSL()
-     */
+    public static final String PARAM_USE_SSL = "host.ssl";
     public static final boolean PARAM_USE_SSL_DEFAULT = false;
 
-    /**
-     * @see #useSSL()
-     */
-    @Property(
-            label = "Use SSL",
-            description = "Indicates if an SSL (LDAPs) connection should be used.",
-            boolValue = PARAM_USE_SSL_DEFAULT
-    )
-    public static final String PARAM_USE_SSL = "host.ssl";
-
-    /**
-     * @see #useTLS()
-     */
+    public static final String PARAM_USE_TLS = "host.tls";
     public static final boolean PARAM_USE_TLS_DEFAULT = false;
 
-    /**
-     * @see #useTLS()
-     */
-    @Property(
-            label = "Use TLS",
-            description = "Indicates if TLS should be started on connections.",
-            boolValue = PARAM_USE_TLS_DEFAULT
-    )
-    public static final String PARAM_USE_TLS = "host.tls";
-
-    /**
-     * @see #noCertCheck()
-     */
+    public static final String PARAM_NO_CERT_CHECK = "host.noCertCheck";
     public static final boolean PARAM_NO_CERT_CHECK_DEFAULT = false;
 
-    /**
-     * @see #noCertCheck()
-     */
-    @Property(
-            label = "Disable certificate checking",
-            description = "Indicates if server certificate validation should be disabled.",
-            boolValue = PARAM_NO_CERT_CHECK_DEFAULT
-    )
-    public static final String PARAM_NO_CERT_CHECK = "host.noCertCheck";
-    
-    /**
-     * @see #enabledProtocols() 
-     */
-    @Property(
-            label = "Enabled Protocols",
-            description = "Allows to explicitly set the enabled protocols on the LdapConnectionConfig.",
-            value = {},
-            cardinality = Integer.MAX_VALUE
-    )
     public static final String PARAM_ENABLED_PROTOCOLS = "host.enabledProtocols";
 
-    /**
-     * @see #getBindDN()
-     */
+
+    public static final String PARAM_BIND_DN = "bind.dn";
     public static final String PARAM_BIND_DN_DEFAULT = "";
 
-    /**
-     * @see #getBindDN()
-     */
-    @Property(
-            label = "Bind DN",
-            description = "DN of the user for authentication. Leave empty for anonymous bind.",
-            value = PARAM_BIND_DN_DEFAULT
-    )
-    public static final String PARAM_BIND_DN = "bind.dn";
 
-    /**
-     * @see #getBindPassword()
-     */
+    public static final String PARAM_BIND_PASSWORD = "bind.password";
     public static final String PARAM_BIND_PASSWORD_DEFAULT = "";
 
-    /**
-     * @see #getBindPassword()
-     */
-    @Property(
-            label = "Bind Password",
-            description = "Password of the user for authentication.",
-            passwordValue = PARAM_BIND_PASSWORD_DEFAULT
-    )
-    public static final String PARAM_BIND_PASSWORD = "bind.password";
 
-    /**
-     * @see #getSearchTimeout()
-     */
+    public static final String PARAM_SEARCH_TIMEOUT = "searchTimeout";
     public static final String PARAM_SEARCH_TIMEOUT_DEFAULT = "60s";
 
-    /**
-     * @see #getSearchTimeout()
-     */
-    @Property(
-            label = "Search Timeout",
-            description = "Time in until a search times out (eg: '1s' or '1m 30s').",
-            value = PARAM_SEARCH_TIMEOUT_DEFAULT
-    )
-    public static final String PARAM_SEARCH_TIMEOUT = "searchTimeout";
-
-    /**
-     * @see PoolConfig#getMaxActive()
-     */
+    public static final String PARAM_ADMIN_POOL_MAX_ACTIVE = "adminPool.maxActive";
     public static final int PARAM_ADMIN_POOL_MAX_ACTIVE_DEFAULT = 8;
 
-    /**
-     * @see PoolConfig#getMaxActive()
-     */
-    @Property(
-            label = "Admin pool max active",
-            description = "The max active size of the admin connection pool.",
-            longValue = PARAM_ADMIN_POOL_MAX_ACTIVE_DEFAULT
-    )
-    public static final String PARAM_ADMIN_POOL_MAX_ACTIVE = "adminPool.maxActive";
-
-    /**
-     * @see PoolConfig#lookupOnValidate()
-     */
+    public static final String PARAM_ADMIN_POOL_LOOKUP_ON_VALIDATE = "adminPool.lookupOnValidate";
     public static final boolean PARAM_ADMIN_POOL_LOOKUP_ON_VALIDATE_DEFAULT = true;
 
-    /**
-     * @see PoolConfig#lookupOnValidate()
-     */
-    @Property(
-            label = "Admin pool lookup on validate",
-            description = "Indicates an ROOT DSE lookup is performed to test if the connection is still valid when taking it out of the pool.",
-            boolValue = PARAM_ADMIN_POOL_LOOKUP_ON_VALIDATE_DEFAULT
-    )
-    public static final String PARAM_ADMIN_POOL_LOOKUP_ON_VALIDATE = "adminPool.lookupOnValidate";
-
-    /**
-     * @see PoolConfig#getMinEvictableIdleTimeMillis()
-     */
+    public static final String PARAM_ADMIN_POOL_MIN_EVICTABLE_IDLE_TIME = "adminPool.minEvictableIdleTime";
     public static final String PARAM_ADMIN_POOL_MIN_EVICTABLE_IDLE_TIME_DEFAULT = "-1";
 
-    /**
-     * @see PoolConfig#getMinEvictableIdleTimeMillis()
-     */
-    @Property(
-            label = "Admin pool min evictable idle time",
-            description = "The minimum amount of time a connection from the admin pool must be idle before becoming eligible for eviction by the idle object evictor, if running (eg: '1m 30s'). When non-positive, no connections will be evicted from the pool due to idle time alone.",
-            value = PARAM_ADMIN_POOL_MIN_EVICTABLE_IDLE_TIME_DEFAULT
-    )
-    public static final String PARAM_ADMIN_POOL_MIN_EVICTABLE_IDLE_TIME = "adminPool.minEvictableIdleTime";
-
-    /**
-     * @see PoolConfig#getTimeBetweenEvictionRunsMillis()
-     */
+    public static final String PARAM_ADMIN_POOL_TIME_BETWEEN_EVICTION_RUNS = "adminPool.timeBetweenEvictionRuns";
     public static final String PARAM_ADMIN_POOL_TIME_BETWEEN_EVICTION_RUNS_DEFAULT = "-1";
 
-    /**
-     * @see PoolConfig#getTimeBetweenEvictionRunsMillis()
-     */
-    @Property(
-            label = "Time interval to sleep between evictor runs for the admin pool",
-            description = "Time interval to sleep between runs of the idle object evictor thread for the admin pool (eg: '1m 30s'). When non-positive, no idle object evictor thread will be run.",
-            value = PARAM_ADMIN_POOL_TIME_BETWEEN_EVICTION_RUNS_DEFAULT
-    )
-    public static final String PARAM_ADMIN_POOL_TIME_BETWEEN_EVICTION_RUNS = "adminPool.timeBetweenEvictionRuns";
-
-    /**
-     * @see PoolConfig#getNumTestsPerEvictionRun()
-     */
+    public static final String PARAM_ADMIN_POOL_NUM_TESTS_PER_EVICTION_RUN = "adminPool.numTestsPerEvictionRun";
     public static final int PARAM_ADMIN_POOL_NUM_TESTS_PER_EVICTION_RUN_DEFAULT = 3;
 
-    /**
-     * @see PoolConfig#getNumTestsPerEvictionRun()
-     */
-    @Property(
-            label = "Max number of objects to be tested per run of the idle object evictor for the admin pool",
-            description = "The max number of objects to examine during each run of the idle object evictor thread for the admin pool (if any)",
-            intValue = PARAM_ADMIN_POOL_NUM_TESTS_PER_EVICTION_RUN_DEFAULT
-    )
-    public static final String PARAM_ADMIN_POOL_NUM_TESTS_PER_EVICTION_RUN = "adminPool.numTestsPerEvictionRun";
-
-    /**
-     * @see PoolConfig#getMaxActive()
-     */
+    public static final String PARAM_USER_POOL_MAX_ACTIVE = "userPool.maxActive";
     public static final int PARAM_USER_POOL_MAX_ACTIVE_DEFAULT = 8;
 
-    /**
-     * @see PoolConfig#getMaxActive()
-     */
-    @Property(
-            label = "User pool max active",
-            description = "The max active size of the user connection pool.",
-            longValue = PARAM_USER_POOL_MAX_ACTIVE_DEFAULT
-    )
-    public static final String PARAM_USER_POOL_MAX_ACTIVE = "userPool.maxActive";
-
-    /**
-     * @see PoolConfig#lookupOnValidate()
-     */
+    public static final String PARAM_USER_POOL_LOOKUP_ON_VALIDATE = "userPool.lookupOnValidate";
     public static final boolean PARAM_USER_POOL_LOOKUP_ON_VALIDATE_DEFAULT = true;
 
-    /**
-     * @see PoolConfig#lookupOnValidate()
-     */
-    @Property(
-            label = "User pool lookup on validate",
-            description = "Indicates an ROOT DSE lookup is performed to test if the connection is still valid when taking it out of the pool.",
-            boolValue = PARAM_USER_POOL_LOOKUP_ON_VALIDATE_DEFAULT
-    )
-    public static final String PARAM_USER_POOL_LOOKUP_ON_VALIDATE = "userPool.lookupOnValidate";
-
-    /**
-     * @see PoolConfig#getMinEvictableIdleTimeMillis()
-     */
+    public static final String PARAM_USER_POOL_MIN_EVICTABLE_IDLE_TIME = "userPool.minEvictableIdleTime";
     public static final String PARAM_USER_POOL_MIN_EVICTABLE_IDLE_TIME_DEFAULT = "-1";
 
-    /**
-     * @see PoolConfig#getMinEvictableIdleTimeMillis()
-     */
-    @Property(
-            label = "User pool min evictable idle time",
-            description = "The minimum amount of time a connection from the user pool must be idle before becoming eligible for eviction by the idle object evictor, if running (eg: '1m 30s'). When non-positive, no connections will be evicted from the pool due to idle time alone.",
-            value = PARAM_USER_POOL_MIN_EVICTABLE_IDLE_TIME_DEFAULT
-    )
-    public static final String PARAM_USER_POOL_MIN_EVICTABLE_IDLE_TIME = "userPool.minEvictableIdleTime";
-
-    /**
-     * @see PoolConfig#getTimeBetweenEvictionRunsMillis()
-     */
+    public static final String PARAM_USER_POOL_TIME_BETWEEN_EVICTION_RUNS = "userPool.timeBetweenEvictionRuns";
     public static final String PARAM_USER_POOL_TIME_BETWEEN_EVICTION_RUNS_DEFAULT = "-1";
 
-    /**
-     * @see PoolConfig#getTimeBetweenEvictionRunsMillis()
-     */
-    @Property(
-            label = "Time interval to sleep between evictor runs for the user pool",
-            description = "Time interval to sleep between runs of the idle object evictor thread for the user pool (eg: '1m 30s'). When non-positive, no idle object evictor thread will be run.",
-            value = PARAM_USER_POOL_TIME_BETWEEN_EVICTION_RUNS_DEFAULT
-    )
-    public static final String PARAM_USER_POOL_TIME_BETWEEN_EVICTION_RUNS = "userPool.timeBetweenEvictionRuns";
-
-    /**
-     * @see PoolConfig#getNumTestsPerEvictionRun()
-     */
+    public static final String PARAM_USER_POOL_NUM_TESTS_PER_EVICTION_RUN = "userPool.numTestsPerEvictionRun";
     public static final int PARAM_USER_POOL_NUM_TESTS_PER_EVICTION_RUN_DEFAULT = 3;
 
-    /**
-     * @see PoolConfig#getNumTestsPerEvictionRun()
-     */
-    @Property(
-            label = "Max number of objects to be tested per run of the idle object evictor for the user pool",
-            description = "The max number of objects to examine during each run of the idle object evictor thread for the user pool (if any)",
-            intValue = PARAM_USER_POOL_NUM_TESTS_PER_EVICTION_RUN_DEFAULT
-    )
-    public static final String PARAM_USER_POOL_NUM_TESTS_PER_EVICTION_RUN = "userPool.numTestsPerEvictionRun";
-
-    /**
-     * @see Identity#getBaseDN()
-     */
+    public static final String PARAM_USER_BASE_DN = "user.baseDN";
     public static final String PARAM_USER_BASE_DN_DEFAULT = "ou=people,o=example,dc=com";
 
-    /**
-     * @see Identity#getBaseDN()
-     */
-    @Property(
-            label = "User base DN",
-            description = "The base DN for user searches.",
-            value = PARAM_USER_BASE_DN_DEFAULT
-    )
-    public static final String PARAM_USER_BASE_DN = "user.baseDN";
-
-    /**
-     * @see Identity#getObjectClasses()
-     */
+    public static final String PARAM_USER_OBJECTCLASS = "user.objectclass";
     public static final String[] PARAM_USER_OBJECTCLASS_DEFAULT = {"person"};
 
-    /**
-     * @see Identity#getObjectClasses()
-     */
-    @Property(
-            label = "User object classes",
-            description = "The list of object classes an user entry must contain.",
-            value = {"person"},
-            cardinality = Integer.MAX_VALUE
-    )
-    public static final String PARAM_USER_OBJECTCLASS = "user.objectclass";
-
-    /**
-     * @see Identity#getIdAttribute()
-     */
+    public static final String PARAM_USER_ID_ATTRIBUTE = "user.idAttribute";
     public static final String PARAM_USER_ID_ATTRIBUTE_DEFAULT = "uid";
 
-    /**
-     * @see Identity#getIdAttribute()
-     */
-    @Property(
-            label = "User id attribute",
-            description = "Name of the attribute that contains the user id.",
-            value = PARAM_USER_ID_ATTRIBUTE_DEFAULT
-    )
-    public static final String PARAM_USER_ID_ATTRIBUTE = "user.idAttribute";
-
-    /**
-     * @see Identity#getExtraFilter()
-     */
+    public static final String PARAM_USER_EXTRA_FILTER = "user.extraFilter";
     public static final String PARAM_USER_EXTRA_FILTER_DEFAULT = "";
 
-    /**
-     * @see Identity#getExtraFilter()
-     */
-    @Property(
-            label = "User extra filter",
-            description = "Extra LDAP filter to use when searching for users. The final filter is" +
-                    "formatted like: '(&(<idAttr>=<userId>)(objectclass=<objectclass>)<extraFilter>)'",
-            value = PARAM_USER_EXTRA_FILTER_DEFAULT
-    )
-    public static final String PARAM_USER_EXTRA_FILTER = "user.extraFilter";
-
-    /**
-     * @see Identity#makeDnPath()
-     */
+    public static final String PARAM_USER_MAKE_DN_PATH = "user.makeDnPath";
     public static final boolean PARAM_USER_MAKE_DN_PATH_DEFAULT = false;
 
-    /**
-     * @see Identity#makeDnPath()
-     */
-    @Property(
-            label = "User DN paths",
-            description = "Controls if the DN should be used for calculating a portion of the intermediate path.",
-            boolValue = PARAM_USER_MAKE_DN_PATH_DEFAULT
-    )
-    public static final String PARAM_USER_MAKE_DN_PATH = "user.makeDnPath";
-
-    /**
-     * @see Identity#getBaseDN()
-     */
+    public static final String PARAM_GROUP_BASE_DN = "group.baseDN";
     public static final String PARAM_GROUP_BASE_DN_DEFAULT = "ou=groups,o=example,dc=com";
 
-    /**
-     * @see Identity#getBaseDN()
-     */
-    @Property(
-            label = "Group base DN",
-            description = "The base DN for group searches.",
-            value = PARAM_GROUP_BASE_DN_DEFAULT
-    )
-    public static final String PARAM_GROUP_BASE_DN = "group.baseDN";
-
-    /**
-     * @see Identity#getObjectClasses()
-     */
+    public static final String PARAM_GROUP_OBJECTCLASS = "group.objectclass";
     public static final String[] PARAM_GROUP_OBJECTCLASS_DEFAULT = {"groupOfUniqueNames"};
 
-    /**
-     * @see Identity#getObjectClasses()
-     */
-    @Property(
-            label = "Group object classes",
-            description = "The list of object classes a group entry must contain.",
-            value = {"groupOfUniqueNames"},
-            cardinality = Integer.MAX_VALUE
-    )
-    public static final String PARAM_GROUP_OBJECTCLASS = "group.objectclass";
-
-    /**
-     * @see Identity#getIdAttribute()
-     */
+    public static final String PARAM_GROUP_NAME_ATTRIBUTE = "group.nameAttribute";
     public static final String PARAM_GROUP_NAME_ATTRIBUTE_DEFAULT = "cn";
 
-    /**
-     * @see Identity#getIdAttribute()
-     */
-    @Property(
-            label = "Group name attribute",
-            description = "Name of the attribute that contains the group name.",
-            value = PARAM_GROUP_NAME_ATTRIBUTE_DEFAULT
-    )
-    public static final String PARAM_GROUP_NAME_ATTRIBUTE = "group.nameAttribute";
-
-    /**
-     * @see Identity#getExtraFilter()
-     */
+    public static final String PARAM_GROUP_EXTRA_FILTER = "group.extraFilter";
     public static final String PARAM_GROUP_EXTRA_FILTER_DEFAULT = "";
 
-    /**
-     * @see Identity#getExtraFilter()
-     */
-    @Property(
-            label = "Group extra filter",
-            description = "Extra LDAP filter to use when searching for groups. The final filter is" +
-                    "formatted like: '(&(<nameAttr>=<groupName>)(objectclass=<objectclass>)<extraFilter>)'",
-            value = PARAM_GROUP_EXTRA_FILTER_DEFAULT
-    )
-    public static final String PARAM_GROUP_EXTRA_FILTER = "group.extraFilter";
-
-    /**
-     * @see Identity#makeDnPath()
-     */
+    public static final String PARAM_GROUP_MAKE_DN_PATH = "group.makeDnPath";
     public static final boolean PARAM_GROUP_MAKE_DN_PATH_DEFAULT = false;
 
-    /**
-     * @see Identity#makeDnPath()
-     */
-    @Property(
-            label = "Group DN paths",
-            description = "Controls if the DN should be used for calculating a portion of the intermediate path.",
-            boolValue = PARAM_GROUP_MAKE_DN_PATH_DEFAULT
-    )
-    public static final String PARAM_GROUP_MAKE_DN_PATH = "group.makeDnPath";
-
-    /**
-     * @see #getGroupMemberAttribute()
-     */
+    public static final String PARAM_GROUP_MEMBER_ATTRIBUTE = "group.memberAttribute";
     public static final String PARAM_GROUP_MEMBER_ATTRIBUTE_DEFAULT = "uniquemember";
 
-    /**
-     * @see #getGroupMemberAttribute()
-     */
-    @Property(
-            label = "Group member attribute",
-            description = "Group attribute that contains the member(s) of a group.",
-            value = PARAM_GROUP_MEMBER_ATTRIBUTE_DEFAULT
-    )
-    public static final String PARAM_GROUP_MEMBER_ATTRIBUTE = "group.memberAttribute";
-
-    /**
-     * @see #getUseUidForExtId()
-     */
+    public static final String PARAM_USE_UID_FOR_EXT_ID = "useUidForExtId";
     public static final boolean PARAM_USE_UID_FOR_EXT_ID_DEFAULT = false;
 
-    /**
-     * @see #getUseUidForExtId()
-     */
-    @Property(
-            label = "Use user id for external ids",
-            description = "If enabled, the value of the user id (resp. group name) attribute will be used to create external identifiers. Leave disabled to use the DN instead.",
-            boolValue = PARAM_USE_UID_FOR_EXT_ID_DEFAULT
-    )
-    public static final String PARAM_USE_UID_FOR_EXT_ID = "useUidForExtId";
-
-    /**
-     * @see Identity#getCustomAttributes()
-     */
-    public static final String[] PARAM_CUSTOM_ATTRIBUTES_DEFAULT = {};
-
-    /**
-     * @see Identity#getCustomAttributes()
-     */
-    @Property(
-            label = "Custom Attributes",
-            description = "Attributes retrieved when looking up LDAP entries. Leave empty to retrieve all attributes.",
-            value = {},
-            cardinality = Integer.MAX_VALUE
-    )
     public static final String PARAM_CUSTOM_ATTRIBUTES = "customattributes";
+    public static final String[] PARAM_CUSTOM_ATTRIBUTES_DEFAULT = {};
 
     /**
      * Defines the configuration of an identity (user or group).
diff --git a/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/sort/ExternalSortByteArray.java b/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/sort/ExternalSortByteArray.java
index 10c3d77..8dd1c59 100644
--- a/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/sort/ExternalSortByteArray.java
+++ b/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/sort/ExternalSortByteArray.java
@@ -19,11 +19,11 @@
 import org.apache.jackrabbit.guava.common.base.Preconditions;
 import org.apache.jackrabbit.oak.commons.Compression;
 
-import java.io.BufferedOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.EOFException;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.OutputStream;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.util.ArrayList;
@@ -39,14 +39,14 @@
 public class ExternalSortByteArray {
     private final static int DEFAULT_BUFFER_SIZE = 16 * 1024;
 
-    public static <T> void mergeSortedFilesBinary(List<Path> files, BufferedOutputStream fbw, final Comparator<T> cmp,
+    public static <T> void mergeSortedFilesBinary(List<Path> files, OutputStream fbw, final Comparator<T> cmp,
                                                   boolean distinct, Compression algorithm,
                                                   Function<T, byte[]> typeToByteArray, Function<byte[], T> byteArrayToType)
             throws IOException {
         mergeSortedFilesBinary(files, fbw, cmp, distinct, algorithm, typeToByteArray, byteArrayToType, DEFAULT_BUFFER_SIZE);
     }
 
-    public static <T> void mergeSortedFilesBinary(List<Path> files, BufferedOutputStream fbw, final Comparator<T> cmp,
+    public static <T> void mergeSortedFilesBinary(List<Path> files, OutputStream fbw, final Comparator<T> cmp,
                                                   boolean distinct, Compression algorithm,
                                                   Function<T, byte[]> typeToByteArray, Function<byte[], T> byteArrayToType, int readBufferSize)
             throws IOException {
@@ -70,7 +70,7 @@
         }
     }
 
-    private static <T> int mergeBinary(BufferedOutputStream fbw, final Comparator<T> cmp, boolean distinct,
+    private static <T> int mergeBinary(OutputStream fbw, final Comparator<T> cmp, boolean distinct,
                                        List<BinaryFileBuffer<T>> buffers, Function<T, byte[]> typeToByteArray)
             throws IOException {
         PriorityQueue<BinaryFileBuffer<T>> pq = new PriorityQueue<>(
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/FormattingUtils.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/FormattingUtils.java
index 4c1a640..a885ce4 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/FormattingUtils.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/FormattingUtils.java
@@ -18,18 +18,30 @@
 
 import org.apache.jackrabbit.guava.common.base.Stopwatch;
 
-import java.time.LocalTime;
-import java.time.format.DateTimeFormatter;
 import java.util.concurrent.TimeUnit;
 
 public class FormattingUtils {
     public static String formatToSeconds(Stopwatch stopwatch) {
-        LocalTime seconds = LocalTime.ofSecondOfDay(stopwatch.elapsed(TimeUnit.SECONDS));
-        return DateTimeFormatter.ISO_TIME.format(seconds);
+        return formatToSeconds(stopwatch.elapsed(TimeUnit.SECONDS));
+    }
+
+    public static String formatToSeconds(long seconds) {
+        long absSeconds = Math.abs(seconds);
+        long hoursPart = TimeUnit.SECONDS.toHours(absSeconds);
+        long minutesPart = TimeUnit.SECONDS.toMinutes(absSeconds) % 60;
+        long secondsPart = TimeUnit.SECONDS.toSeconds(absSeconds) % 60;
+        String sign = seconds < 0 ? "-" : "";
+        return String.format("%s%02d:%02d:%02d", sign, hoursPart, minutesPart, secondsPart);
     }
 
     public static String formatToMillis(Stopwatch stopwatch) {
-        LocalTime nanoSeconds = LocalTime.ofNanoOfDay(stopwatch.elapsed(TimeUnit.MILLISECONDS)*1000000);
-        return DateTimeFormatter.ISO_TIME.format(nanoSeconds);
+        long millis = stopwatch.elapsed(TimeUnit.MILLISECONDS);
+        long absMillis = Math.abs(millis);
+        long hoursPart = TimeUnit.MILLISECONDS.toHours(absMillis);
+        long minutesPart = TimeUnit.MILLISECONDS.toMinutes(absMillis) % 60;
+        long secondsPart = TimeUnit.MILLISECONDS.toSeconds(absMillis) % 60;
+        long millisPart = TimeUnit.MILLISECONDS.toMillis(absMillis) % 1000;
+        String sign = millis < 0 ? "-" : "";
+        return String.format("%s%02d:%02d:%02d.%03d", sign, hoursPart, minutesPart, secondsPart, millisPart);
     }
 }
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/IndexerMBeanImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/IndexerMBeanImpl.java
index 8751f1d..3f873f9 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/IndexerMBeanImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/IndexerMBeanImpl.java
@@ -36,6 +36,7 @@
 import org.apache.jackrabbit.oak.spi.whiteboard.Registration;
 import org.apache.jackrabbit.oak.spi.whiteboard.Tracker;
 import org.apache.jackrabbit.oak.spi.whiteboard.Whiteboard;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
 import org.osgi.framework.BundleContext;
 import org.osgi.service.component.annotations.Activate;
 import org.osgi.service.component.annotations.Component;
@@ -74,7 +75,7 @@
 
         try {
             IndexImporter importer =
-                    new IndexImporter(nodeStore, new File(indexDirPath), editorProvider, createLock(ignoreLocalLock));
+                    new IndexImporter(nodeStore, new File(indexDirPath), editorProvider, createLock(ignoreLocalLock), StatisticsProvider.NOOP);
             providerTracker.getServices().forEach(importer::addImporterProvider);
             importer.importIndex();
         } catch (IOException | CommitFailedException | RuntimeException e) {
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/MetricsUtils.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/MetricsUtils.java
new file mode 100644
index 0000000..d1f04aa
--- /dev/null
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/MetricsUtils.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index;
+
+import org.apache.jackrabbit.oak.stats.CounterStats;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
+import org.apache.jackrabbit.oak.stats.StatsOptions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MetricsUtils {
+    private final static Logger LOG = LoggerFactory.getLogger(MetricsUtils.class);
+
+    /**
+     * <p>Set a counter metric to the given value. The previous value of the metric is discarded and replaced by the
+     * given value.</p>
+     * <p>
+     * Note that this method is to support a non-standard use case of the counter metrics.
+     * Normally counters are incremented periodically to keep track of how many times an event occurred. This method
+     * instead is intended to be called only once for a given metric, to provide the final value of the metrics. If
+     * called more than once for the same metric, it will log a warning and discard the old value.
+     * </p>
+     *
+     * @param statisticsProvider The statistics provider to use.
+     * @param name               The name of the counter to set.
+     * @param value              The value to set
+     */
+    public static void setCounterOnce(StatisticsProvider statisticsProvider, String name, long value) {
+        CounterStats metric = statisticsProvider.getCounterStats(name, StatsOptions.METRICS_ONLY);
+        LOG.debug("Setting counter {} to {}", name, value);
+        if (metric.getCount() != 0) {
+            LOG.warn("Counter was not 0: {} {}", name, metric.getCount());
+            // Reset to 0
+            metric.dec(metric.getCount());
+        }
+        metric.inc(value);
+    }
+}
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/importer/IndexImporter.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/importer/IndexImporter.java
index d06ab32..d8d1ae4 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/importer/IndexImporter.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/importer/IndexImporter.java
@@ -19,15 +19,6 @@
 
 package org.apache.jackrabbit.oak.plugins.index.importer;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
 import org.apache.jackrabbit.guava.common.base.Stopwatch;
 import org.apache.jackrabbit.guava.common.collect.ArrayListMultimap;
 import org.apache.jackrabbit.guava.common.collect.ListMultimap;
@@ -41,6 +32,7 @@
 import org.apache.jackrabbit.oak.plugins.index.IndexUpdateCallback;
 import org.apache.jackrabbit.oak.plugins.index.IndexUtils;
 import org.apache.jackrabbit.oak.plugins.index.MetricsFormatter;
+import org.apache.jackrabbit.oak.plugins.index.MetricsUtils;
 import org.apache.jackrabbit.oak.plugins.index.importer.AsyncIndexerLock.LockToken;
 import org.apache.jackrabbit.oak.plugins.index.upgrade.IndexDisabler;
 import org.apache.jackrabbit.oak.spi.commit.EditorDiff;
@@ -49,9 +41,19 @@
 import org.apache.jackrabbit.oak.spi.state.NodeState;
 import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
 import org.apache.jackrabbit.oak.spi.state.NodeStore;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
 import static org.apache.jackrabbit.guava.common.base.Preconditions.checkArgument;
 import static org.apache.jackrabbit.guava.common.base.Preconditions.checkNotNull;
 import static org.apache.jackrabbit.oak.plugins.index.IndexConstants.REINDEX_COUNT;
@@ -85,9 +87,16 @@
     static final int RETRIES = Integer.getInteger("oak.index.import.retries", 5);
     public static final String INDEX_IMPORT_STATE_KEY = "indexImportState";
     private final Set<String> indexPathsToUpdate;
+    private final StatisticsProvider statisticsProvider;
 
     public IndexImporter(NodeStore nodeStore, File indexDir, IndexEditorProvider indexEditorProvider,
                          AsyncIndexerLock indexerLock) throws IOException {
+        this(nodeStore, indexDir, indexEditorProvider, indexerLock, StatisticsProvider.NOOP);
+    }
+
+    public IndexImporter(NodeStore nodeStore, File indexDir, IndexEditorProvider indexEditorProvider,
+                         AsyncIndexerLock indexerLock, StatisticsProvider statisticsProvider) throws IOException {
+        this.statisticsProvider = statisticsProvider;
         checkArgument(indexDir.exists() && indexDir.isDirectory(), "Path [%s] does not point " +
                 "to existing directory", indexDir.getAbsolutePath());
         this.nodeStore = nodeStore;
@@ -465,19 +474,26 @@
     }
 
     void runWithRetry(int maxRetries, IndexImportState indexImportState, IndexImporterStepExecutor step) throws CommitFailedException, IOException {
+        String indexImportPhaseName = indexImportState == null ? "null" : indexImportState.toString();
         int count = 1;
         Stopwatch start = Stopwatch.createStarted();
         while (count <= maxRetries) {
-            LOG.info("IndexImporterStepExecutor:{}, count:{}", indexImportState, count);
-            LOG.info("[TASK:{}:START]", indexImportState);
+            LOG.info("IndexImporterStepExecutor:{}, count:{}", indexImportPhaseName, count);
+            LOG.info("[TASK:{}:START]", indexImportPhaseName);
             try {
                 step.execute();
-                LOG.info("[TASK:{}:END] Metrics: {}", indexImportState,
+                long durationSeconds = start.elapsed(TimeUnit.SECONDS);
+                LOG.info("[TASK:{}:END] Metrics: {}", indexImportPhaseName,
                         MetricsFormatter.newBuilder()
-                                .add("duration", FormattingUtils.formatToSeconds(start))
-                                .add("durationSeconds", start.elapsed(TimeUnit.SECONDS))
+                                .add("duration", FormattingUtils.formatToSeconds(durationSeconds))
+                                .add("durationSeconds", durationSeconds)
                                 .build()
                 );
+
+                MetricsUtils.setCounterOnce(statisticsProvider,
+                        "oak_indexer_import_" + indexImportPhaseName.toLowerCase() + "_duration_seconds",
+                        durationSeconds);
+
                 break;
             } catch (CommitFailedException | IOException e) {
                 LOG.warn("IndexImporterStepExecutor:{} fail count: {}, retries left: {}", indexImportState, count, maxRetries - count, e);
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/importer/package-info.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/importer/package-info.java
index 6590336..a0d4e8b 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/importer/package-info.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/importer/package-info.java
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-@Version("0.3.0")
+@Version("0.4.0")
 package org.apache.jackrabbit.oak.plugins.index.importer;
 
 import org.osgi.annotation.versioning.Version;
\ No newline at end of file
diff --git a/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/FormattingUtilsTest.java b/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/FormattingUtilsTest.java
new file mode 100644
index 0000000..eb494db
--- /dev/null
+++ b/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/FormattingUtilsTest.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index;
+
+import org.apache.jackrabbit.guava.common.base.Stopwatch;
+import org.apache.jackrabbit.guava.common.base.Ticker;
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+
+
+public class FormattingUtilsTest {
+
+    private static class TestTicker extends Ticker {
+        private long time = 0;
+        @Override
+        public long read() {
+            return time;
+        }
+        public void set(long nanos) {
+            time = nanos;
+        }
+    }
+    private final TestTicker ticker = new TestTicker();
+    private final Stopwatch sw = Stopwatch.createStarted(ticker);
+
+    @Test
+    public void formatToSeconds() {
+        testFormatToSeconds("00:00:00", 0);
+        testFormatToSeconds("00:00:59", TimeUnit.MILLISECONDS.toNanos(59_567));
+        testFormatToSeconds("00:01:00", TimeUnit.MILLISECONDS.toNanos(60_567));
+        testFormatToSeconds("00:59:00", TimeUnit.MINUTES.toNanos(59));
+        testFormatToSeconds("01:00:00", TimeUnit.MINUTES.toNanos(60));
+        testFormatToSeconds("23:00:00", TimeUnit.HOURS.toNanos(23));
+        testFormatToSeconds("24:00:00", TimeUnit.HOURS.toNanos(24));
+        testFormatToSeconds("48:00:00", TimeUnit.HOURS.toNanos(48));
+        testFormatToSeconds("23:59:59", TimeUnit.HOURS.toNanos(23) +
+                TimeUnit.MINUTES.toNanos(59) +
+                TimeUnit.SECONDS.toNanos(59) +
+                TimeUnit.MILLISECONDS.toNanos(999)
+        );
+        testFormatToSeconds("-00:01:00", -TimeUnit.SECONDS.toNanos(60));
+    }
+
+    private void testFormatToSeconds(String expected, long nanos) {
+        ticker.set(nanos);
+        assertEquals(expected, FormattingUtils.formatToSeconds(sw));
+    }
+
+    @Test
+    public void formatToMillis() {
+        testFormatToMillis("00:00:00.000", 0);
+        testFormatToMillis("00:00:59.567", TimeUnit.MILLISECONDS.toNanos(59_567));
+        testFormatToMillis("00:01:00.567", TimeUnit.MILLISECONDS.toNanos(60_567));
+        testFormatToMillis("00:59:00.000", TimeUnit.MINUTES.toNanos(59));
+        testFormatToMillis("01:00:00.000", TimeUnit.MINUTES.toNanos(60));
+        testFormatToMillis("23:00:00.000", TimeUnit.HOURS.toNanos(23));
+        testFormatToMillis("24:00:00.000", TimeUnit.HOURS.toNanos(24));
+        testFormatToMillis("48:00:00.000", TimeUnit.HOURS.toNanos(48));
+        testFormatToMillis("23:59:59.999", TimeUnit.HOURS.toNanos(23) +
+                TimeUnit.MINUTES.toNanos(59) +
+                TimeUnit.SECONDS.toNanos(59) +
+                TimeUnit.MILLISECONDS.toNanos(999)
+        );
+        testFormatToMillis("-00:01:00.000", -TimeUnit.SECONDS.toNanos(60));
+    }
+
+    private void testFormatToMillis(String expected, long nanos) {
+        ticker.set(nanos);
+        assertEquals(expected, FormattingUtils.formatToMillis(sw));
+    }
+}
\ No newline at end of file
diff --git a/oak-doc/src/site/markdown/nodestore/segment/overview.md b/oak-doc/src/site/markdown/nodestore/segment/overview.md
index 499bcb3..cd2133c 100644
--- a/oak-doc/src/site/markdown/nodestore/segment/overview.md
+++ b/oak-doc/src/site/markdown/nodestore/segment/overview.md
@@ -93,12 +93,12 @@
 If the output of this phase reports that the amount of garbage is beyond a certain threshold, the system creates a new generation and goes on with the next phase.
 
 Compaction executes after a new generation is created.
-The purpose of compaction is to create a compact representation of the current generation. For this the current generation is copied to the new generation leaving out anything from the current generation that is not reachable anymore. Starting with Oak 1.8 compaction can operate in either of two modes: full compaction and tail compaction. Full compaction copies all revisions pertaining to the current generation to the new generation. In contrast tail compaction only copies the most recent ones. The two compaction modes differ in usage of system resources and how much time they consume. While full compaction is more thorough overall, it usually requires much more time, disk spice and disk IO than tail compaction.
+The purpose of compaction is to create a compact representation of the current generation. For this the current generation is copied to the new generation leaving out anything from the current generation that is not reachable anymore. Starting with Oak 1.8 compaction can operate in either of two modes: full compaction and tail compaction. Full compaction copies all revisions pertaining to the current generation to the new generation. In contrast tail compaction only copies the most recent ones. The two compaction modes differ in usage of system resources and how much time they consume. While full compaction is more thorough overall, it usually requires much more time, disk space and disk IO than tail compaction.
 
 Cleanup is the last phase of garbage collection and kicks in as soon as compaction is done.
 Once relevant data is safe in the new generation, old and unused data from a previous generation can be removed.
 This phase locates outdated pieces of data from one of the oldest generations and removes it from the system.
-This is the only phase where data is actually deleted and disk space is finally freed. The amount of freed disk space depends on the preceding compaction operation. In general cleanup can free less space after a tail compaction than after a full compaction. However, this only becomes effective a further garbage collection cycle due to the system always retaining a total of two generations. 
+This is the only phase where data is actually deleted and disk space is finally freed. The amount of freed disk space depends on the preceding compaction operation. In general cleanup can free less space after a tail compaction than after a full compaction. However, this usually only becomes effective after a further garbage collection cycle as the system retains a total of two generations by default. 
 
 ### <a name="offline-garbage-collection"/> Offline Garbage Collection
 
@@ -133,7 +133,7 @@
 
 Please note that the following messages are to be used as an example only.
 To make the examples clear, some information like the date and time, the name of the thread, and the name of the logger are removed.
-These information depend on the configuration of your logging framework.
+This information depends on the configuration of your logging framework.
 Moreover, some of those messages contain data that can and will change from one execution to the other.
 
 Every log message generated during the garbage collection process includes a sequence number 
@@ -291,17 +291,17 @@
 ##### <a name="how-does-compaction-make-use-of-multithreading"/> How does compaction make use of multithreading?
 
 The parallel compactor adds an initial exploration phase to the compaction process, which scans and splits the content tree
-into multiple parts to be processed simultaneously. For this to be efficient, the tree is only expanded until a certain 
-number of nodes is reached, which is defined relative to the number of threads (main thread + compaction workers).
-
+into multiple parts to be processed simultaneously. For this to be efficient, the tree is only expanded until a pre-defined
+(currently 10,000) number of nodes is reached.
 ```
 TarMK GC #2: compacting with 8 threads.
 TarMK GC #2: exploring content tree to find subtrees for parallel compaction.
-TarMK GC #2: target node count for expansion is 7000, based on 7 available workers.
-TarMK GC #2: Found 3 nodes at depth 1, target is 7000.
-TarMK GC #2: Found 48 nodes at depth 2, target is 7000.
-TarMK GC #2: Found 663 nodes at depth 3, target is 7000.
-TarMK GC #2: Found 66944 nodes at depth 4, target is 7000.
+TarMK GC #2: target node count for expansion is 10000.
+TarMK GC #2: found 1 nodes at depth 0.
+TarMK GC #2: found 3 nodes at depth 1.
+TarMK GC #2: found 48 nodes at depth 2.
+TarMK GC #2: found 663 nodes at depth 3.
+TarMK GC #2: found 66944 nodes at depth 4.
 ```
 
 ##### <a name="how-does-compaction-works-with-concurrent-writes"/> How does compaction work with concurrent writes?
diff --git a/oak-it-osgi/pom.xml b/oak-it-osgi/pom.xml
index 97f985b..233e357 100644
--- a/oak-it-osgi/pom.xml
+++ b/oak-it-osgi/pom.xml
@@ -178,6 +178,12 @@
     </dependency>
     <dependency>
       <groupId>org.apache.jackrabbit</groupId>
+      <artifactId>oak-search-elastic</artifactId>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.jackrabbit</groupId>
       <artifactId>oak-store-composite</artifactId>
       <version>${project.version}</version>
       <scope>test</scope>
diff --git a/oak-it-osgi/test-bundles.xml b/oak-it-osgi/test-bundles.xml
index 6b8906f..fed3470 100644
--- a/oak-it-osgi/test-bundles.xml
+++ b/oak-it-osgi/test-bundles.xml
@@ -55,6 +55,7 @@
         <include>org.apache.jackrabbit:oak-segment-azure</include>
         <include>org.apache.jackrabbit:oak-jcr</include>
         <include>org.apache.jackrabbit:oak-lucene</include>
+        <include>org.apache.jackrabbit:oak-search-elastic</include>
         <include>org.apache.tika:tika-core</include>
         <include>org.apache.jackrabbit:oak-blob</include>
         <include>org.apache.jackrabbit:oak-blob-cloud-azure</include>
diff --git a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/NamePathTest.java b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/NamePathTest.java
index b70f169..556a41f 100644
--- a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/NamePathTest.java
+++ b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/NamePathTest.java
@@ -17,11 +17,16 @@
 package org.apache.jackrabbit.oak.jcr;
 
 import static org.apache.jackrabbit.oak.jcr.AbstractRepositoryTest.dispose;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 import java.util.List;
+import java.util.Random;
 
+import javax.jcr.NamespaceException;
 import javax.jcr.Node;
 import javax.jcr.Repository;
 import javax.jcr.RepositoryException;
@@ -30,8 +35,10 @@
 import org.apache.jackrabbit.guava.common.collect.ImmutableList;
 
 import org.apache.jackrabbit.oak.spi.security.OpenSecurityProvider;
+import org.apache.jackrabbit.spi.commons.conversion.DefaultNamePathResolver;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Test;
 
 public class NamePathTest {
@@ -212,6 +219,58 @@
         session.getNode(n.getPath());
     }
 
+    @Test
+    @Ignore("OAK-10544")
+    public void testPrefixRemapping() throws NamespaceException, RepositoryException {
+        Random r = new Random();
+        int i1 = r.nextInt();
+        int i2 = r.nextInt();
+        String prefix = "nstest" + i1 + "XXX";
+        String uri1 = "foobar:1-" + i1;
+        String uri2 = "foobar:2-" + i2;
+        String testLocalName = "test";
+        String expandedTestName ="{" + uri1  + "}" + testLocalName;
+
+        DefaultNamePathResolver resolver = new DefaultNamePathResolver(session);
+
+        try {
+            session.getWorkspace().getNamespaceRegistry().registerNamespace(prefix, uri1);
+
+            String originalName = prefix + ":" + testLocalName;
+            Node testNode = session.getRootNode().addNode(originalName);
+            session.save();
+
+            // verify that name resolver finds correct namespaceURI
+            assertEquals(uri1, resolver.getQName(testNode.getName()).getNamespaceURI());
+
+            // check that expanded name works
+            Node n2 = session.getRootNode().getNode(expandedTestName);
+            assertTrue(testNode.isSame(n2));
+
+            // remap prefix1 to uri2
+            session.setNamespacePrefix(prefix, uri2);
+
+            // check that expanded name still works
+            Node n3 = session.getRootNode().getNode(expandedTestName);
+            assertTrue(testNode.isSame(n3));
+
+            String remappedName = n3.getName();
+            assertNotEquals(originalName, remappedName);
+
+            int colon = remappedName.indexOf(':');
+            assertTrue("remapped name must contain colon:" + remappedName, colon > 0);
+            String remappedPrefix = remappedName.substring(0, colon);
+            assertNotEquals("prefix after mapping must be different", prefix, remappedPrefix);
+
+            // OAK-10544: adding the line below makes the test pass
+            // session.getNamespacePrefix(uri1);
+
+            assertEquals("remapped prefix need to map to original URI " + uri1, uri1, session.getNamespaceURI(remappedPrefix));
+        } finally {
+            session.getWorkspace().getNamespaceRegistry().unregisterNamespace(prefix);
+        }
+    }
+
 
     private void testPaths(List<String> paths) throws RepositoryException {
         for (String path : paths) {
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java
index 036b112..69421c0 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java
@@ -209,6 +209,9 @@
     public final static String CACHE_FACET_RESULTS_NAME = "oak.lucene.cacheFacetResults";
     private final boolean CACHE_FACET_RESULTS =
             Boolean.parseBoolean(System.getProperty(CACHE_FACET_RESULTS_NAME, "true"));
+    public final static String EAGER_FACET_CACHE_FILL_NAME = "oak.lucene.cacheFacetEagerFill";
+    private final static boolean EAGER_FACET_CACHE_FILL =
+            Boolean.parseBoolean(System.getProperty(EAGER_FACET_CACHE_FILL_NAME, "true"));
 
     private static boolean FLAG_CACHE_FACET_RESULTS_CHANGE = true;
 
@@ -1644,11 +1647,39 @@
                 return cachedResults.get(cacheKey);
             }
             LOG.trace("columnName = {} facet Data not present in cache...", columnName);
+            if (EAGER_FACET_CACHE_FILL) {
+                fillFacetCache(numberOfFacets);
+                if (cachedResults.containsKey(cacheKey)) {
+                    LOG.trace("columnName = {} now found", cacheKey);
+                    return cachedResults.get(cacheKey);
+                }
+                LOG.warn("Facet data for {} not found: read using query", cacheKey);
+            }
             List<Facet> result = getFacetsUncached(numberOfFacets, columnName);
             cachedResults.put(cacheKey, result);
             return result;
         }
 
+        private List<Facet> fillFacetCache(int numberOfFacets) throws IOException {
+            List<Facet> result = null;
+            LuceneIndexNode indexNode = index.acquireIndexNode(plan);
+            try {
+                IndexSearcher searcher = indexNode.getSearcher();
+                Facets facets = FacetHelper.getFacets(searcher, query, plan, config);
+                if (facets != null) {
+                    List<String> allColumnNames = FacetHelper.getFacetColumnNamesFromPlan(plan);
+                    for (String column : allColumnNames) {
+                        result = getFacetsUncached(facets, numberOfFacets, column);
+                        String cc = column + "/" + numberOfFacets;
+                        cachedResults.put(cc, result);
+                    }
+                }
+            } finally {
+                indexNode.release();
+            }
+            return result;
+        }
+
         private List<Facet> getFacetsUncached(int numberOfFacets, String columnName) throws IOException {
             LuceneIndexNode indexNode = index.acquireIndexNode(plan);
             try {
@@ -1677,6 +1708,28 @@
                 indexNode.release();
             }
         }
+
+        private List<Facet> getFacetsUncached(Facets facets, int numberOfFacets, String columnName) throws IOException {
+            String facetFieldName = FulltextIndex.parseFacetField(columnName);
+            try {
+                ImmutableList.Builder<Facet> res = new ImmutableList.Builder<>();
+                FacetResult topChildren = facets.getTopChildren(numberOfFacets, facetFieldName);
+                if (topChildren == null) {
+                    return null;
+                }
+                for (LabelAndValue lav : topChildren.labelValues) {
+                    res.add(new Facet(
+                            lav.label, lav.value.intValue()
+                    ));
+                }
+                return res.build();
+            } catch (IllegalArgumentException iae) {
+                LOG.debug(iae.getMessage(), iae);
+                LOG.warn("facets for {} not yet indexed: {}", facetFieldName, iae);
+                return null;
+            }
+        }
+
     }
 
     static class LuceneFacetProvider implements FacetProvider {
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/FacetHelper.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/FacetHelper.java
index 13dabce..37eb574 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/FacetHelper.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/FacetHelper.java
@@ -19,14 +19,17 @@
 package org.apache.jackrabbit.oak.plugins.index.lucene.util;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.stream.Collectors;
 
 import org.apache.jackrabbit.oak.plugins.index.search.FieldNames;
 import org.apache.jackrabbit.oak.plugins.index.search.IndexDefinition.SecureFacetConfiguration;
-import org.apache.jackrabbit.oak.spi.query.QueryConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.spi.query.FulltextIndex;
 import org.apache.jackrabbit.oak.spi.query.QueryIndex;
+import org.apache.jackrabbit.oak.spi.query.QueryIndex.IndexPlan;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
 import org.apache.lucene.facet.FacetResult;
 import org.apache.lucene.facet.Facets;
@@ -60,6 +63,23 @@
         return new NodeStateFacetsConfig(definition);
     }
 
+    /**
+     * Get the column names of all the facets from the index plan, if any.
+     *
+     * @param plan the plan
+     * @return a list (possibly empty)
+     */
+    public static List<String> getFacetColumnNamesFromPlan(IndexPlan plan) {
+        @SuppressWarnings("unchecked")
+        List<String> facetFields = (List<String>) plan.getAttribute(ATTR_FACET_FIELDS);
+        if (facetFields == null) {
+            return Collections.emptyList();
+        }
+        return facetFields.stream().map(
+                FulltextIndex::convertFacetFieldNameToColumnName).
+                collect(Collectors.toList());
+    }
+
     public static Facets getFacets(IndexSearcher searcher, Query query, QueryIndex.IndexPlan plan,
                                    SecureFacetConfiguration secureFacetConfiguration) throws IOException {
         Facets facets = null;
@@ -104,10 +124,6 @@
         return facets;
     }
 
-    public static String parseFacetField(String columnName) {
-        return columnName.substring(QueryConstants.REP_FACET.length() + 1, columnName.length() - 1);
-    }
-
     private static final Facets NULL_FACETS = new Facets() {
         @Override
         public FacetResult getTopChildren(int topN, String dim, String... path) {
diff --git a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/hybrid/ManyFacetsTest.java b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/hybrid/ManyFacetsTest.java
new file mode 100644
index 0000000..be0b0b2
--- /dev/null
+++ b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/hybrid/ManyFacetsTest.java
@@ -0,0 +1,280 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.lucene.hybrid;
+
+import static org.apache.jackrabbit.guava.common.util.concurrent.MoreExecutors.newDirectExecutorService;
+import static org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants.PROP_FACETS;
+import static org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants.STATISTICAL_FACET_SAMPLE_SIZE_DEFAULT;
+import static org.apache.jackrabbit.oak.spi.mount.Mounts.defaultMountInfoProvider;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Properties;
+import java.util.Random;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Predicate;
+
+import javax.jcr.GuestCredentials;
+import javax.jcr.Repository;
+import javax.jcr.RepositoryException;
+import javax.jcr.Session;
+import javax.jcr.query.Query;
+import javax.jcr.query.QueryManager;
+import javax.jcr.query.QueryResult;
+import javax.jcr.query.Row;
+import javax.jcr.query.RowIterator;
+
+import org.apache.jackrabbit.oak.InitialContent;
+import org.apache.jackrabbit.oak.Oak;
+import org.apache.jackrabbit.oak.api.ContentRepository;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.commons.PathUtils;
+import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser;
+import org.apache.jackrabbit.oak.commons.json.JsonObject;
+import org.apache.jackrabbit.oak.jcr.Jcr;
+import org.apache.jackrabbit.oak.plugins.index.AsyncIndexUpdate;
+import org.apache.jackrabbit.oak.plugins.index.counter.NodeCounterEditorProvider;
+import org.apache.jackrabbit.oak.plugins.index.lucene.IndexCopier;
+import org.apache.jackrabbit.oak.plugins.index.lucene.IndexTracker;
+import org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexEditorProvider;
+import org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexProvider;
+import org.apache.jackrabbit.oak.plugins.index.lucene.LucenePropertyIndex;
+import org.apache.jackrabbit.oak.plugins.index.lucene.TestUtil;
+import org.apache.jackrabbit.oak.plugins.index.lucene.TestUtil.OptionalEditorProvider;
+import org.apache.jackrabbit.oak.plugins.index.lucene.reader.DefaultIndexReaderFactory;
+import org.apache.jackrabbit.oak.plugins.index.lucene.reader.LuceneIndexReaderFactory;
+import org.apache.jackrabbit.oak.plugins.index.lucene.util.LuceneIndexDefinitionBuilder;
+import org.apache.jackrabbit.oak.plugins.index.nodetype.NodeTypeIndexProvider;
+import org.apache.jackrabbit.oak.plugins.index.property.PropertyIndexEditorProvider;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.IndexDefinitionBuilder.PropertyRule;
+import org.apache.jackrabbit.oak.plugins.memory.MemoryNodeStore;
+import org.apache.jackrabbit.oak.query.AbstractQueryTest;
+import org.apache.jackrabbit.oak.spi.commit.Observer;
+import org.apache.jackrabbit.oak.spi.mount.MountInfoProvider;
+import org.apache.jackrabbit.oak.spi.query.QueryIndexProvider;
+import org.apache.jackrabbit.oak.spi.security.OpenSecurityProvider;
+import org.apache.jackrabbit.oak.spi.state.NodeStore;
+import org.apache.jackrabbit.oak.spi.whiteboard.Whiteboard;
+import org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardUtils;
+import org.apache.jackrabbit.oak.stats.Clock;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
+import org.jetbrains.annotations.Nullable;
+import org.junit.After;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+public class ManyFacetsTest extends AbstractQueryTest {
+
+    @Rule
+    public TemporaryFolder temporaryFolder = new TemporaryFolder(new File("target"));
+
+    private static final int NUM_LABELS = 4;
+    private static final int NUM_LEAF_NODES = STATISTICAL_FACET_SAMPLE_SIZE_DEFAULT;
+    private static final String FACET_PROP = "facets";
+    private static final long REFRESH_DELTA = TimeUnit.SECONDS.toMillis(1);
+    
+    private static final int FACET_COUNT = 10;
+
+    private ExecutorService executorService = Executors.newFixedThreadPool(2);
+    private OptionalEditorProvider optionalEditorProvider = new OptionalEditorProvider();
+    private NRTIndexFactory nrtIndexFactory;
+    private LuceneIndexProvider luceneIndexProvider;
+    private NodeStore nodeStore;
+    private DocumentQueue queue;
+    private Clock clock = new Clock.Virtual();
+    private Whiteboard wb;
+    private QueryManager qm;
+    private Repository jcrRepo;
+    private Jcr jcr;
+    private Oak oak;
+    // backup original system properties i.e. before test started
+    private final Properties backupProperties = (Properties) System.getProperties().clone();
+
+    @After
+    public void tearDown() throws IOException {
+        luceneIndexProvider.close();
+        new ExecutorCloser(executorService).close();
+        nrtIndexFactory.close();
+        // restore original system properties i.e. before test started
+        System.setProperties(backupProperties);
+    }
+
+    @Override
+    protected ContentRepository createRepository() {
+        IndexCopier copier;
+        try {
+            copier = new IndexCopier(executorService, temporaryFolder.getRoot());
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+        MountInfoProvider mip = defaultMountInfoProvider();
+        nrtIndexFactory = new NRTIndexFactory(copier, clock, TimeUnit.MILLISECONDS.toSeconds(REFRESH_DELTA), StatisticsProvider.NOOP);
+        nrtIndexFactory.setAssertAllResourcesClosed(true);
+        LuceneIndexReaderFactory indexReaderFactory = new DefaultIndexReaderFactory(mip, copier);
+        IndexTracker tracker = new IndexTracker(indexReaderFactory, nrtIndexFactory);
+        luceneIndexProvider = new LuceneIndexProvider(tracker);
+        queue = new DocumentQueue(100, tracker, newDirectExecutorService());
+        LuceneIndexEditorProvider editorProvider = new LuceneIndexEditorProvider(copier,
+                tracker,
+                null,
+                null,
+                mip);
+        editorProvider.setIndexingQueue(queue);
+        LocalIndexObserver localIndexObserver = new LocalIndexObserver(queue, StatisticsProvider.NOOP);
+        nodeStore = new MemoryNodeStore();
+        oak = new Oak(nodeStore)
+                .with(new InitialContent())
+                .with(new OpenSecurityProvider())
+                .with((QueryIndexProvider) luceneIndexProvider)
+                .with((Observer) luceneIndexProvider)
+                .with(localIndexObserver)
+                .with(editorProvider)
+                .with(new PropertyIndexEditorProvider())
+                .with(new NodeTypeIndexProvider())
+                .with(optionalEditorProvider)
+                .with(new NodeCounterEditorProvider())
+                //Effectively disable async indexing auto run
+                //such that we can control run timing as per test requirement
+                .withAsyncIndexing("async", TimeUnit.DAYS.toSeconds(1));
+
+        wb = oak.getWhiteboard();
+        ContentRepository repo = oak.createContentRepository();
+        return repo;
+    }
+    
+    private void createSmallDataset(int k) throws RepositoryException {
+        Random random = new Random(42);
+        Tree par = createPath("/parent" + k);
+        par.setProperty("foo", "bar");
+        for (int i = 0; i < NUM_LABELS * 2; i++) {
+            Tree subPar = par.addChild("par" + i);
+            for (int j = 0; j < NUM_LEAF_NODES / (2 * NUM_LABELS); j++) {
+                Tree child = subPar.addChild("c" + j);
+                child.setProperty("cons", "val");
+                for (int f = 0; f < FACET_COUNT; f++) {
+                    int labelNum = random.nextInt(NUM_LABELS);
+                    child.setProperty("foo" + f, "foo"  + f + "x" + labelNum);
+                }
+            }
+        }
+    }
+    
+    private Tree createPath(String path) {
+        Tree base = root.getTree("/");
+        for (String e : PathUtils.elements(path)) {
+            base = base.addChild(e);
+        }
+        return base;
+    }
+    
+    private void runAsyncIndex() {
+        AsyncIndexUpdate async = (AsyncIndexUpdate) WhiteboardUtils.getService(wb, Runnable.class, new Predicate<Runnable>() {
+            @Override
+            public boolean test(@Nullable Runnable input) {
+                return input instanceof AsyncIndexUpdate;
+            }
+        });
+        assertNotNull(async);
+        async.run();
+        if (async.isFailing()) {
+            fail("AsyncIndexUpdate failed");
+        }
+        root.refresh();
+    }
+    
+    @Test
+    public void facet() throws Exception {
+        // Explicitly setting following configs to run DelayedLuceneFacetProvider and a thread sleep of 50 ms in refresh readers. Refer: OAK-8898
+        System.setProperty(LucenePropertyIndex.OLD_FACET_PROVIDER_CONFIG_NAME, "false");
+        // The variable is static final so once set it remains same for all tests and which will lead to slow execution
+        // of other tests as this add a sleep of specified milliseconds in refresh reader method in LuceneIndexNodeManager.
+        // System.setProperty(LuceneIndexNodeManager.OLD_FACET_PROVIDER_TEST_FAILURE_SLEEP_INSTRUMENT_NAME, "40");
+        Thread.currentThread().setName("main");
+        String idxName = "hybridtest";
+        Tree idx = createIndex(root.getTree("/"), idxName);
+        TestUtil.enableIndexingMode(idx, FulltextIndexConstants.IndexingMode.NRT);
+        setTraversalEnabled(false);
+        root.commit();
+        jcr = new Jcr(oak);
+        jcrRepo = jcr.createRepository();
+        createSmallDataset(0);
+        clock.waitUntil(clock.getTime() + REFRESH_DELTA + 1);
+        root.commit();
+        runAsyncIndex();
+        createSmallDataset(2);
+        clock.waitUntil(clock.getTime() + REFRESH_DELTA + 1);
+        root.commit();
+        Session anonSession = jcrRepo.login(new GuestCredentials());
+        qm = anonSession.getWorkspace().getQueryManager();
+        String facetList = "";
+        for (int i = 0; i < FACET_COUNT; i++) {
+            if (i > 0) {
+                facetList += ", ";
+            }
+            facetList += "[rep:facet(foo" + i + ")]";
+        }
+        String queryString = "SELECT " + facetList + 
+                " FROM [nt:base] WHERE [cons] = 'val'";
+        Query q = qm.createQuery(queryString, SQL2);
+        QueryResult qr = q.execute();
+        try {
+            RowIterator it = qr.getRows();
+            assertTrue(it.hasNext());
+            while (it.hasNext()) {
+                Row r = it.nextRow();
+                for (int i = 0; i < qr.getColumnNames().length; i++) {
+                    String columnName = qr.getColumnNames()[i];
+                    String v = r.getValue(columnName).getString();
+                    JsonObject json = JsonObject.fromJson(v, true);
+                    for (int j = 0; j < NUM_LABELS; j++) {
+                        String n = json.getProperties().get("foo" + i + "x" + j);
+                        assertNotNull(n);
+                    }
+                }
+            }
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw e;
+        }
+    }
+    
+    private Tree createIndex(Tree index, String name) throws RepositoryException {
+        LuceneIndexDefinitionBuilder idxBuilder = new LuceneIndexDefinitionBuilder();
+        PropertyRule pr = idxBuilder.noAsync()
+                .indexRule("nt:base")
+                .property("cons").propertyIndex();
+        for (int i = 0; i < FACET_COUNT; i++) {
+            pr.property("foo" + i).propertyIndex().getBuilderTree().setProperty(PROP_FACETS, true);
+        }
+        Tree facetConfig = idxBuilder.getBuilderTree().addChild(FACET_PROP);
+        facetConfig.setProperty("jcr:primaryType", "nt:unstructured", Type.NAME);
+        facetConfig.setProperty("secure", "statistical");
+        facetConfig.setProperty("topChildren", "100");
+        Tree idxTree = index.getChild("oak:index").addChild(name);
+        idxBuilder.build(idxTree);
+        return idxTree;
+    }
+    
+}
diff --git a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/FacetHelperTest.java b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/FacetHelperTest.java
index ee9a38d..ce4894d 100644
--- a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/FacetHelperTest.java
+++ b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/FacetHelperTest.java
@@ -23,7 +23,6 @@
 import org.junit.Test;
 
 import static org.apache.jackrabbit.oak.InitialContentHelper.INITIAL_CONTENT;
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 
 /**
@@ -39,17 +38,4 @@
         assertNotNull(facetsConfig);
     }
 
-    @Test
-    public void testParseFacetField() throws Exception {
-        String field = FacetHelper.parseFacetField("rep:facet(text)");
-        assertNotNull(field);
-        assertEquals("text", field);
-        field = FacetHelper.parseFacetField("rep:facet(jcr:title)");
-        assertNotNull(field);
-        assertEquals("jcr:title", field);
-        field = FacetHelper.parseFacetField("rep:facet(jcr:primaryType)");
-        assertNotNull(field);
-        assertEquals("jcr:primaryType", field);
-
-    }
 }
\ No newline at end of file
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/IndexImporterSupportBase.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/IndexImporterSupportBase.java
index 7e57e29..2b70365 100644
--- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/IndexImporterSupportBase.java
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/IndexImporterSupportBase.java
@@ -40,7 +40,7 @@
     }
 
     public void importIndex(File importDir) throws IOException, CommitFailedException {
-        IndexImporter importer = new IndexImporter(nodeStore, importDir, createIndexEditorProvider(), createLock());
+        IndexImporter importer = new IndexImporter(nodeStore, importDir, createIndexEditorProvider(), createLock(), indexHelper.getStatisticsProvider());
         addImportProviders(importer);
         importer.importIndex();
     }
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/DocumentStoreIndexerBase.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/DocumentStoreIndexerBase.java
index 40824bf..d180252 100644
--- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/DocumentStoreIndexerBase.java
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/DocumentStoreIndexerBase.java
@@ -45,6 +45,7 @@
 import org.apache.jackrabbit.oak.plugins.index.IndexConstants;
 import org.apache.jackrabbit.oak.plugins.index.IndexUpdateCallback;
 import org.apache.jackrabbit.oak.plugins.index.MetricsFormatter;
+import org.apache.jackrabbit.oak.plugins.index.MetricsUtils;
 import org.apache.jackrabbit.oak.plugins.index.NodeTraversalCallback;
 import org.apache.jackrabbit.oak.plugins.index.progress.IndexingProgressReporter;
 import org.apache.jackrabbit.oak.plugins.index.progress.MetricRateEstimator;
@@ -81,6 +82,11 @@
 import static org.apache.jackrabbit.oak.plugins.index.IndexConstants.TYPE_PROPERTY_NAME;
 
 public abstract class DocumentStoreIndexerBase implements Closeable {
+    public static final String INDEXER_METRICS_PREFIX = "oak_indexer_";
+    public static final String METRIC_INDEXING_DURATION_SECONDS = INDEXER_METRICS_PREFIX + "indexing_duration_seconds";
+    public static final String METRIC_MERGE_NODE_STORE_DURATION_SECONDS = INDEXER_METRICS_PREFIX + "merge_node_store_duration_seconds";
+    public static final String METRIC_FULL_INDEX_CREATION_DURATION_SECONDS = INDEXER_METRICS_PREFIX + "full_index_creation_duration_seconds";
+
     private final Logger log = LoggerFactory.getLogger(getClass());
     private final Logger traversalLog = LoggerFactory.getLogger(DocumentStoreIndexerBase.class.getName() + ".traversal");
     protected final Closer closer = Closer.create();
@@ -174,7 +180,8 @@
                         .withMongoDatabase(getMongoDatabase())
                         .withNodeStateEntryTraverserFactory(new MongoNodeStateEntryTraverserFactory(rootDocumentState.getRootRevision(),
                                 nodeStore, getMongoDocumentStore(), traversalLog))
-                        .withCheckpoint(indexerSupport.getCheckpoint());
+                        .withCheckpoint(indexerSupport.getCheckpoint())
+                        .withStatisticsProvider(indexHelper.getStatisticsProvider());
 
                 for (File dir : previousDownloadDirs) {
                     builder.addExistingDataDumpDir(dir);
@@ -298,25 +305,32 @@
 
         progressReporter.reindexingTraversalEnd();
         progressReporter.logReport();
-        log.info("Completed the indexing in {}", indexerWatch);
+        long indexingDurationSeconds = indexerWatch.elapsed(TimeUnit.SECONDS);
+        log.info("Completed the indexing in {}", FormattingUtils.formatToSeconds(indexingDurationSeconds));
         log.info("[TASK:INDEXING:END] Metrics: {}", MetricsFormatter.newBuilder()
-                .add("duration", FormattingUtils.formatToSeconds(indexerWatch))
-                .add("durationSeconds", indexerWatch.elapsed(TimeUnit.SECONDS))
+                .add("duration", FormattingUtils.formatToSeconds(indexingDurationSeconds))
+                .add("durationSeconds", indexingDurationSeconds)
                 .build());
+        MetricsUtils.setCounterOnce(indexHelper.getStatisticsProvider(), METRIC_INDEXING_DURATION_SECONDS, indexingDurationSeconds);
 
         log.info("[TASK:MERGE_NODE_STORE:START] Starting merge node store");
         Stopwatch mergeNodeStoreWatch = Stopwatch.createStarted();
         copyOnWriteStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+        long mergeNodeStoreDurationSeconds = mergeNodeStoreWatch.elapsed(TimeUnit.SECONDS);
         log.info("[TASK:MERGE_NODE_STORE:END] Metrics: {}", MetricsFormatter.newBuilder()
-                .add("duration", FormattingUtils.formatToSeconds(mergeNodeStoreWatch))
-                .add("durationSeconds", mergeNodeStoreWatch.elapsed(TimeUnit.SECONDS))
+                .add("duration", FormattingUtils.formatToSeconds(mergeNodeStoreDurationSeconds))
+                .add("durationSeconds", mergeNodeStoreDurationSeconds)
                 .build());
+        MetricsUtils.setCounterOnce(indexHelper.getStatisticsProvider(), METRIC_MERGE_NODE_STORE_DURATION_SECONDS, mergeNodeStoreDurationSeconds);
 
         indexerSupport.postIndexWork(copyOnWriteStore);
+
+        long fullIndexCreationDurationSeconds = indexJobWatch.elapsed(TimeUnit.SECONDS);
         log.info("[TASK:FULL_INDEX_CREATION:END] Metrics {}", MetricsFormatter.newBuilder()
-                .add("duration", FormattingUtils.formatToSeconds(indexJobWatch))
-                .add("durationSeconds", indexJobWatch.elapsed(TimeUnit.SECONDS))
+                .add("duration", FormattingUtils.formatToSeconds(fullIndexCreationDurationSeconds))
+                .add("durationSeconds", fullIndexCreationDurationSeconds)
                 .build());
+        MetricsUtils.setCounterOnce(indexHelper.getStatisticsProvider(), METRIC_FULL_INDEX_CREATION_DURATION_SECONDS, fullIndexCreationDurationSeconds);
     }
 
     private void indexParallel(List<FlatFileStore> storeList, CompositeIndexer indexer, IndexingProgressReporter progressReporter)
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/FlatFileNodeStoreBuilder.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/FlatFileNodeStoreBuilder.java
index efc9490..fbe4d1b 100644
--- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/FlatFileNodeStoreBuilder.java
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/FlatFileNodeStoreBuilder.java
@@ -40,6 +40,7 @@
 import org.apache.jackrabbit.oak.spi.blob.BlobStore;
 import org.apache.jackrabbit.oak.spi.filter.PathFilter;
 import org.apache.jackrabbit.oak.spi.state.NodeStore;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -141,6 +142,7 @@
     private MongoDatabase mongoDatabase = null;
     private Set<IndexDefinition> indexDefinitions = null;
     private String checkpoint;
+    private StatisticsProvider statisticsProvider = StatisticsProvider.NOOP;
 
     public enum SortStrategyType {
         /**
@@ -240,6 +242,11 @@
         return this;
     }
 
+    public FlatFileNodeStoreBuilder withStatisticsProvider(StatisticsProvider statisticsProvider) {
+        this.statisticsProvider = statisticsProvider;
+        return this;
+    }
+
     public FlatFileStore build() throws IOException, CompositeException {
         logFlags();
         entryWriter = new NodeStateEntryWriter(blobStore);
@@ -374,7 +381,7 @@
                 log.info("Using PipelinedStrategy");
                 List<PathFilter> pathFilters = indexDefinitions.stream().map(IndexDefinition::getPathFilter).collect(Collectors.toList());
                 return new PipelinedStrategy(mongoDocumentStore, mongoDatabase, nodeStore, rootRevision,
-                        preferredPathElements, blobStore, dir, algorithm, pathPredicate, pathFilters, checkpoint);
+                        preferredPathElements, blobStore, dir, algorithm, pathPredicate, pathFilters, checkpoint, statisticsProvider);
 
         }
         throw new IllegalStateException("Not a valid sort strategy value " + sortStrategyType);
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/NodeStateEntryWriter.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/NodeStateEntryWriter.java
index d5bfe31..48cfbcc 100644
--- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/NodeStateEntryWriter.java
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/NodeStateEntryWriter.java
@@ -28,12 +28,9 @@
 import org.apache.jackrabbit.oak.spi.blob.BlobStore;
 import org.apache.jackrabbit.oak.spi.state.NodeState;
 
-import java.io.IOException;
-import java.io.Writer;
+import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.List;
-import java.util.stream.Stream;
-import java.util.stream.StreamSupport;
 
 import static org.apache.jackrabbit.guava.common.base.Preconditions.checkState;
 
@@ -67,16 +64,6 @@
         return path + DELIMITER + nodeStateAsJson;
     }
 
-    public void writeTo(Writer writer, NodeStateEntry nse) throws IOException {
-        writeTo(writer, nse.getPath(), asJson(nse.getNodeState()));
-    }
-
-    public void writeTo(Writer writer, String path, String value) throws IOException {
-        writer.write(path);
-        writer.write(DELIMITER);
-        writer.write(value);
-    }
-
     public String toString(List<String> pathElements, String nodeStateAsJson) {
         int pathStringSize = pathElements.stream().mapToInt(String::length).sum();
         StringBuilder sb = new StringBuilder(nodeStateAsJson.length() + pathStringSize + pathElements.size() + 1);
@@ -90,18 +77,20 @@
         if (SORTED_PROPERTIES) {
             return asSortedJson(nodeState);
         }
-        return asJson(StreamSupport.stream(nodeState.getProperties().spliterator(), false));
+        return asJson(nodeState.getProperties());
     }
 
     String asSortedJson(NodeState nodeState) {
-        return asJson(StreamSupport.stream(nodeState.getProperties().spliterator(), false)
-                .sorted(Comparator.comparing(PropertyState::getName)));
+        List<PropertyState> properties = new ArrayList<>();
+        nodeState.getProperties().forEach(properties::add);
+        properties.sort(Comparator.comparing(PropertyState::getName));
+        return asJson(properties);
     }
 
-    private String asJson(Stream<? extends PropertyState> stream) {
+    private String asJson(Iterable<? extends PropertyState> properties) {
         jw.resetWriter();
         jw.object();
-        stream.forEach(ps -> {
+        properties.forEach(ps -> {
             String name = ps.getName();
             if (include(name)) {
                 jw.key(name);
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/NodeStateEntryBatch.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/NodeStateEntryBatch.java
index 2c68f1e..bfddbd4 100644
--- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/NodeStateEntryBatch.java
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/NodeStateEntryBatch.java
@@ -18,12 +18,25 @@
  */
 package org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined;
 
+import java.nio.BufferOverflowException;
 import java.nio.ByteBuffer;
-import java.util.ArrayList;
+import java.nio.charset.StandardCharsets;
 
 public class NodeStateEntryBatch {
+
+    public static class BufferFullException extends RuntimeException {
+        public BufferFullException(String message) {
+            super(message);
+        }
+
+        public BufferFullException(String message, Throwable cause) {
+            super(message, cause);
+        }
+    }
+
     // Must be large enough to hold a full node state entry
     static final int MIN_BUFFER_SIZE = 256 * 1024;
+
     public static NodeStateEntryBatch createNodeStateEntryBatch(int bufferSizeBytes, int maxNumEntries) {
         if (bufferSizeBytes < MIN_BUFFER_SIZE) {
             throw new IllegalArgumentException("Buffer size must be at least " + MIN_BUFFER_SIZE + " bytes");
@@ -36,43 +49,46 @@
     }
 
     private final ByteBuffer buffer;
-    private final ArrayList<SortKey> sortBuffer;
     private final int maxEntries;
+    private int numberOfEntries = 0;
+    private int sizeOfEntriesBytes = 0;
 
     public NodeStateEntryBatch(ByteBuffer buffer, int maxEntries) {
         this.buffer = buffer;
         this.maxEntries = maxEntries;
-        this.sortBuffer = new ArrayList<>(maxEntries);
     }
 
-    public ArrayList<SortKey> getSortBuffer() {
-        return sortBuffer;
+    public int addEntry(String path, byte[] entryData) throws BufferFullException {
+        if (numberOfEntries == maxEntries) {
+            throw new BufferFullException("Sort buffer size is full, reached max entries: " + numberOfEntries);
+        }
+        int bufferPos = buffer.position();
+        byte[] pathBytes = path.getBytes(StandardCharsets.UTF_8);
+        int totalSize = 4 + pathBytes.length + 4 + entryData.length;
+        try {
+            // Each nse is written as: "len(path)|path|len(json)|json"
+            buffer.putInt(pathBytes.length);
+            buffer.put(pathBytes);
+            buffer.putInt(entryData.length);
+            buffer.put(entryData);
+            numberOfEntries++;
+            sizeOfEntriesBytes += totalSize;
+            return totalSize;
+        } catch (BufferOverflowException e) {
+            buffer.position(bufferPos);
+            throw new BufferFullException("while adding entry " + path + " of size: " + totalSize, e);
+        }
     }
 
     public ByteBuffer getBuffer() {
         return buffer;
     }
 
-    public void addEntry(String path, byte[] entryData) {
-        if (numberOfEntries() == maxEntries) {
-            throw new IllegalStateException("Sort buffer size exceeded max entries: " + sortBuffer.size() + " > " + maxEntries);
-        }
-        int bufferPos = buffer.position();
-        buffer.putInt(entryData.length);
-        buffer.put(entryData);
-        String[] key = SortKey.genSortKeyPathElements(path);
-        sortBuffer.add(new SortKey(key, bufferPos));
-    }
-
     public boolean isAtMaxEntries() {
-        if (sortBuffer.size() > maxEntries) {
-            throw new AssertionError("Sort buffer size exceeded max entries: " + sortBuffer.size() + " > " + maxEntries);
+        if (numberOfEntries > maxEntries) {
+            throw new AssertionError("Sort buffer size exceeded max entries: " + numberOfEntries + " > " + maxEntries);
         }
-        return sortBuffer.size() == maxEntries;
-    }
-
-    public boolean hasSpaceForEntry(byte[] entryData) {
-        return !isAtMaxEntries() && entryData.length + 4 <= buffer.remaining();
+        return numberOfEntries == maxEntries;
     }
 
     public void flip() {
@@ -81,18 +97,19 @@
 
     public void reset() {
         buffer.clear();
-        sortBuffer.clear();
+        numberOfEntries = 0;
+        sizeOfEntriesBytes = 0;
     }
 
-    public int sizeOfEntries() {
-        return buffer.position();
+    public int sizeOfEntriesBytes() {
+        return sizeOfEntriesBytes;
     }
 
     public int numberOfEntries() {
-        return sortBuffer.size();
+        return numberOfEntries;
     }
 
     public int capacity() {
         return buffer.capacity();
     }
-}
\ No newline at end of file
+}
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMergeSortTask.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMergeSortTask.java
index d14fc0f..bf4083e 100644
--- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMergeSortTask.java
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMergeSortTask.java
@@ -27,12 +27,14 @@
 import org.apache.jackrabbit.oak.index.indexer.document.indexstore.IndexStoreUtils;
 import org.apache.jackrabbit.oak.plugins.index.FormattingUtils;
 import org.apache.jackrabbit.oak.plugins.index.MetricsFormatter;
+import org.apache.jackrabbit.oak.plugins.index.MetricsUtils;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
 import org.jetbrains.annotations.NotNull;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.BufferedOutputStream;
 import java.io.IOException;
+import java.io.OutputStream;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.util.ArrayList;
@@ -47,6 +49,10 @@
 import java.util.function.Function;
 import java.util.stream.Collectors;
 
+import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedMetrics.OAK_INDEXER_PIPELINED_MERGE_SORT_EAGER_MERGES_RUNS;
+import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedMetrics.OAK_INDEXER_PIPELINED_MERGE_SORT_FINAL_MERGE_FILES_COUNT;
+import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedMetrics.OAK_INDEXER_PIPELINED_MERGE_SORT_FINAL_MERGE_TIME;
+import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedMetrics.OAK_INDEXER_PIPELINED_MERGE_SORT_INTERMEDIATE_FILES_COUNT;
 import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedStrategy.SENTINEL_SORTED_FILES_QUEUE;
 import static org.apache.jackrabbit.oak.index.indexer.document.indexstore.IndexStoreUtils.getSortedStoreFileName;
 
@@ -150,6 +156,7 @@
     private final Comparator<NodeStateHolder> comparator;
     private final Compression algorithm;
     private final BlockingQueue<Path> sortedFilesQueue;
+    private final StatisticsProvider statisticsProvider;
     private final PriorityQueue<PathAndSize> sortedFiles = new PriorityQueue<>();
     private final AtomicBoolean stopEagerMerging = new AtomicBoolean(false);
     private final int mergeTriggerThreshold;
@@ -163,11 +170,13 @@
     public PipelinedMergeSortTask(Path storeDir,
                                   PathElementComparator pathComparator,
                                   Compression algorithm,
-                                  BlockingQueue<Path> sortedFilesQueue) {
+                                  BlockingQueue<Path> sortedFilesQueue,
+                                  StatisticsProvider statisticsProvider) {
         this.storeDir = storeDir;
         this.comparator = (e1, e2) -> pathComparator.compare(e1.getPathElements(), e2.getPathElements());
         this.algorithm = algorithm;
         this.sortedFilesQueue = sortedFilesQueue;
+        this.statisticsProvider = statisticsProvider;
 
         this.mergeTriggerThreshold = ConfigHelper.getSystemPropertyAsInt(OAK_INDEXER_PIPELINED_EAGER_MERGE_TRIGGER_THRESHOLD, DEFAULT_OAK_INDEXER_PIPELINED_EAGER_MERGE_TRIGGER_THRESHOLD);
         Preconditions.checkArgument(mergeTriggerThreshold >= 16,
@@ -210,15 +219,20 @@
 
                     LOG.info("Final merge completed in {}. Created file: {}", FormattingUtils.formatToSeconds(w), flatFileStore.toAbsolutePath());
                     long ffsSizeBytes = Files.size(flatFileStore);
+                    long durationSeconds = w.elapsed(TimeUnit.SECONDS);
                     String metrics = MetricsFormatter.newBuilder()
                             .add("duration", FormattingUtils.formatToSeconds(w))
-                            .add("durationSeconds", w.elapsed(TimeUnit.SECONDS))
+                            .add("durationSeconds", durationSeconds)
                             .add("filesMerged", sortedFiles.size())
                             .add("ffsSizeBytes", ffsSizeBytes)
                             .add("ffsSize", IOUtils.humanReadableByteCountBin(ffsSizeBytes))
                             .build();
 
                     LOG.info("[TASK:{}:END] Metrics: {}", THREAD_NAME.toUpperCase(Locale.ROOT), metrics);
+                    MetricsUtils.setCounterOnce(statisticsProvider, OAK_INDEXER_PIPELINED_MERGE_SORT_INTERMEDIATE_FILES_COUNT, intermediateFilesCount);
+                    MetricsUtils.setCounterOnce(statisticsProvider, OAK_INDEXER_PIPELINED_MERGE_SORT_EAGER_MERGES_RUNS, eagerMergeRuns);
+                    MetricsUtils.setCounterOnce(statisticsProvider, OAK_INDEXER_PIPELINED_MERGE_SORT_FINAL_MERGE_FILES_COUNT, sortedFiles.size());
+                    MetricsUtils.setCounterOnce(statisticsProvider, OAK_INDEXER_PIPELINED_MERGE_SORT_FINAL_MERGE_TIME, durationSeconds);
                     return new Result(flatFileStore, intermediateFilesCount, sortedFiles.size(), eagerMergeRuns);
 
                 } else {
@@ -314,7 +328,7 @@
 
     private Path sortStoreFile(List<Path> sortedFilesBatch) throws IOException {
         Path sortedFile = storeDir.resolve(getSortedStoreFileName(algorithm));
-        try (BufferedOutputStream writer = IndexStoreUtils.createOutputStream(sortedFile, algorithm)) {
+        try (OutputStream writer = IndexStoreUtils.createOutputStream(sortedFile, algorithm)) {
             Function<byte[], NodeStateHolder> byteArrayToType = new NodeStateHolderFactory();
             Function<NodeStateHolder, byte[]> typeToByteArray = holder -> holder == null ? null : holder.getLine();
             ExternalSortByteArray.mergeSortedFilesBinary(sortedFilesBatch,
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMetrics.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMetrics.java
new file mode 100644
index 0000000..f44f79a
--- /dev/null
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMetrics.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined;
+
+public final class PipelinedMetrics {
+    public static final String METRIC_NAME_PREFIX = "oak_indexer_pipelined_";
+    public static final String OAK_INDEXER_PIPELINED_DOCUMENTS_DOWNLOADED = METRIC_NAME_PREFIX + "documentsDownloaded";
+    public static final String OAK_INDEXER_PIPELINED_DOCUMENTS_TRAVERSED = METRIC_NAME_PREFIX + "documentsTraversed";
+    public static final String OAK_INDEXER_PIPELINED_DOCUMENTS_REJECTED_SPLIT = METRIC_NAME_PREFIX + "documentsRejectedSplit";
+    public static final String OAK_INDEXER_PIPELINED_DOCUMENTS_ACCEPTED = METRIC_NAME_PREFIX + "documentsAccepted";
+    public static final String OAK_INDEXER_PIPELINED_DOCUMENTS_REJECTED = METRIC_NAME_PREFIX + "documentsRejected";
+    public static final String OAK_INDEXER_PIPELINED_DOCUMENTS_REJECTED_EMPTY_NODE_STATE = METRIC_NAME_PREFIX + "documentsRejectedEmptyNodeState";
+    public static final String OAK_INDEXER_PIPELINED_ENTRIES_TRAVERSED = METRIC_NAME_PREFIX + "entriesTraversed";
+    public static final String OAK_INDEXER_PIPELINED_ENTRIES_ACCEPTED = METRIC_NAME_PREFIX + "entriesAccepted";
+    public static final String OAK_INDEXER_PIPELINED_ENTRIES_REJECTED = METRIC_NAME_PREFIX + "entriesRejected";
+    public static final String OAK_INDEXER_PIPELINED_ENTRIES_REJECTED_HIDDEN_PATHS = METRIC_NAME_PREFIX + "entriesRejectedHiddenPaths";
+    public static final String OAK_INDEXER_PIPELINED_ENTRIES_REJECTED_PATH_FILTERED = METRIC_NAME_PREFIX + "entriesRejectedPathFiltered";
+    public static final String OAK_INDEXER_PIPELINED_EXTRACTED_ENTRIES_TOTAL_SIZE = METRIC_NAME_PREFIX + "extractedEntriesTotalSize";
+    public static final String OAK_INDEXER_PIPELINED_MONGO_DOWNLOAD_ENQUEUE_DELAY_PERCENTAGE = METRIC_NAME_PREFIX + "mongoDownloadEnqueueDelayPercentage";
+    public static final String OAK_INDEXER_PIPELINED_MERGE_SORT_INTERMEDIATE_FILES_COUNT = METRIC_NAME_PREFIX + "mergeSortIntermediateFilesCount";
+    public static final String OAK_INDEXER_PIPELINED_MERGE_SORT_EAGER_MERGES_RUNS = METRIC_NAME_PREFIX + "mergeSortEagerMergesRuns";
+    public static final String OAK_INDEXER_PIPELINED_MERGE_SORT_FINAL_MERGE_FILES_COUNT = METRIC_NAME_PREFIX + "mergeSortFinalMergeFilesCount";
+    public static final String OAK_INDEXER_PIPELINED_MERGE_SORT_FINAL_MERGE_TIME = METRIC_NAME_PREFIX + "mergeSortFinalMergeTime";
+
+    private PipelinedMetrics() {
+    }
+
+}
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMongoDownloadTask.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMongoDownloadTask.java
index 13eafcb..6c16faf 100644
--- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMongoDownloadTask.java
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMongoDownloadTask.java
@@ -38,7 +38,9 @@
 import org.apache.jackrabbit.oak.plugins.document.util.Utils;
 import org.apache.jackrabbit.oak.plugins.index.FormattingUtils;
 import org.apache.jackrabbit.oak.plugins.index.MetricsFormatter;
+import org.apache.jackrabbit.oak.plugins.index.MetricsUtils;
 import org.apache.jackrabbit.oak.spi.filter.PathFilter;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
 import org.bson.BsonDocument;
 import org.bson.codecs.configuration.CodecRegistries;
 import org.bson.codecs.configuration.CodecRegistry;
@@ -128,6 +130,7 @@
     private final ReadPreference readPreference;
     private final Stopwatch downloadStartWatch = Stopwatch.createUnstarted();
     private final int maxBatchSizeBytes;
+    private final StatisticsProvider statisticsProvider;
 
     private long totalEnqueueWaitTimeMillis = 0;
     private Instant lastDelayedEnqueueWarningMessageLoggedTimestamp = Instant.now();
@@ -141,7 +144,9 @@
                                       int maxBatchSizeBytes,
                                       int maxBatchNumberOfDocuments,
                                       BlockingQueue<NodeDocument[]> queue,
-                                      List<PathFilter> pathFilters) {
+                                      List<PathFilter> pathFilters,
+                                      StatisticsProvider statisticsProvider) {
+        this.statisticsProvider = statisticsProvider;
         NodeDocumentCodecProvider nodeDocumentCodecProvider = new NodeDocumentCodecProvider(mongoDocStore, Collection.NODES);
         CodecRegistry nodeDocumentCodecRegistry = CodecRegistries.fromRegistries(
                 CodecRegistries.fromProviders(nodeDocumentCodecProvider),
@@ -191,15 +196,20 @@
             } else {
                 downloadWithNaturalOrdering();
             }
-            String enqueueingDelayPercentage = String.format("%1.2f", (100.0 * totalEnqueueWaitTimeMillis) / downloadStartWatch.elapsed(TimeUnit.MILLISECONDS));
+            long durationMillis = downloadStartWatch.elapsed(TimeUnit.MILLISECONDS);
+            String enqueueingDelayPercentage = PipelinedUtils.formatAsPercentage(totalEnqueueWaitTimeMillis, durationMillis);
             String metrics = MetricsFormatter.newBuilder()
                     .add("duration", FormattingUtils.formatToSeconds(downloadStartWatch))
-                    .add("durationSeconds", downloadStartWatch.elapsed(TimeUnit.SECONDS))
+                    .add("durationSeconds", durationMillis/1000)
                     .add("documentsDownloaded", documentsRead)
-                    .add("enqueueingDelayMs", totalEnqueueWaitTimeMillis)
+                    .add("enqueueingDelayMillis", totalEnqueueWaitTimeMillis)
                     .add("enqueueingDelayPercentage", enqueueingDelayPercentage)
                     .build();
 
+            MetricsUtils.setCounterOnce(statisticsProvider,
+                    PipelinedMetrics.OAK_INDEXER_PIPELINED_MONGO_DOWNLOAD_ENQUEUE_DELAY_PERCENTAGE,
+                    PipelinedUtils.toPercentage(totalEnqueueWaitTimeMillis, durationMillis)
+            );
             LOG.info("[TASK:{}:END] Metrics: {}", THREAD_NAME.toUpperCase(Locale.ROOT), metrics);
             return new Result(documentsRead);
         } catch (InterruptedException t) {
@@ -216,7 +226,7 @@
     private void reportProgress(String id) {
         if (this.documentsRead % 10000 == 0) {
             double rate = ((double) this.documentsRead) / downloadStartWatch.elapsed(TimeUnit.SECONDS);
-            String formattedRate = String.format("%1.2f nodes/s, %1.2f nodes/hr", rate, rate * 3600);
+            String formattedRate = String.format(Locale.ROOT, "%1.2f nodes/s, %1.2f nodes/hr", rate, rate * 3600);
             LOG.info("Dumping from NSET Traversed #{} {} [{}] (Elapsed {})",
                     this.documentsRead, id, formattedRate, FormattingUtils.formatToSeconds(downloadStartWatch));
         }
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedSortBatchTask.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedSortBatchTask.java
index ab5fca9..c53c72a 100644
--- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedSortBatchTask.java
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedSortBatchTask.java
@@ -21,13 +21,15 @@
 import org.apache.commons.io.FileUtils;
 import org.apache.jackrabbit.guava.common.base.Stopwatch;
 import org.apache.jackrabbit.oak.commons.Compression;
+import org.apache.jackrabbit.oak.index.indexer.document.indexstore.IndexStoreUtils;
 import org.apache.jackrabbit.oak.plugins.index.MetricsFormatter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.BufferedOutputStream;
 import java.io.IOException;
+import java.io.OutputStream;
 import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.util.ArrayList;
@@ -38,7 +40,7 @@
 
 import static org.apache.jackrabbit.oak.commons.IOUtils.humanReadableByteCountBin;
 import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedStrategy.SENTINEL_NSE_BUFFER;
-import static org.apache.jackrabbit.oak.index.indexer.document.indexstore.IndexStoreUtils.createOutputStream;
+import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedUtils.formatAsPercentage;
 
 /**
  * Receives batches of node state entries, sorts then in memory, and finally writes them to a file.
@@ -66,9 +68,12 @@
     private final BlockingQueue<NodeStateEntryBatch> nonEmptyBuffersQueue;
     private final BlockingQueue<Path> sortedFilesQueue;
     private final Path sortWorkDir;
-    private final byte[] copyBuffer = new byte[4096];
+    private final ArrayList<SortKey> sortBuffer = new ArrayList<>(32 * 1024);
     private long entriesProcessed = 0;
     private long batchesProcessed = 0;
+    private long timeCreatingSortArrayMillis = 0;
+    private long timeSortingMillis = 0;
+    private long timeWritingMillis = 0;
 
     public PipelinedSortBatchTask(Path storeDir,
                                   PathElementComparator pathComparator,
@@ -86,6 +91,7 @@
 
     @Override
     public Result call() throws Exception {
+        Stopwatch taskStartTime = Stopwatch.createStarted();
         String originalName = Thread.currentThread().getName();
         Thread.currentThread().setName(THREAD_NAME);
         try {
@@ -94,9 +100,22 @@
                 LOG.info("Waiting for next batch");
                 NodeStateEntryBatch nseBuffer = nonEmptyBuffersQueue.take();
                 if (nseBuffer == SENTINEL_NSE_BUFFER) {
+                    long totalTimeMillis = taskStartTime.elapsed().toMillis();
+                    sortBuffer.clear(); // It should be empty already
+                    sortBuffer.trimToSize();  // Release the internal array which may be very large, several millions
+                    String timeCreatingSortArrayPercentage = formatAsPercentage(timeCreatingSortArrayMillis, totalTimeMillis);
+                    String timeSortingPercentage = formatAsPercentage(timeSortingMillis, totalTimeMillis);
+                    String timeWritingPercentage = formatAsPercentage(timeWritingMillis, totalTimeMillis);
                     String metrics = MetricsFormatter.newBuilder()
                             .add("batchesProcessed", batchesProcessed)
                             .add("entriesProcessed", entriesProcessed)
+                            .add("timeCreatingSortArrayMillis", timeCreatingSortArrayMillis)
+                            .add("timeCreatingSortArrayPercentage", timeCreatingSortArrayPercentage)
+                            .add("timeSortingMillis", timeSortingMillis)
+                            .add("timeSortingPercentage", timeSortingPercentage)
+                            .add("timeWritingMillis", timeWritingMillis)
+                            .add("timeWritingPercentage", timeWritingPercentage)
+                            .add("totalTimeSeconds", totalTimeMillis / 1000)
                             .build();
                     LOG.info("[TASK:{}:END] Metrics: {}", THREAD_NAME.toUpperCase(Locale.ROOT), metrics);
                     return new Result(entriesProcessed);
@@ -116,48 +135,81 @@
         }
     }
 
+    private void buildSortArray(NodeStateEntryBatch nseb) {
+        Stopwatch startTime = Stopwatch.createStarted();
+        ByteBuffer buffer = nseb.getBuffer();
+        int totalPathSize = 0;
+        while (buffer.hasRemaining()) {
+            int positionInBuffer = buffer.position();
+            // Read the next key from the buffer
+            int pathLength = buffer.getInt();
+            totalPathSize += pathLength;
+            // Create the String directly from the buffer without creating an intermediate byte[]
+            String path = new String(buffer.array(), buffer.position(), pathLength, StandardCharsets.UTF_8);
+            buffer.position(buffer.position() + pathLength);
+            // Skip the json
+            int entryLength = buffer.getInt();
+            buffer.position(buffer.position() + entryLength);
+            String[] pathSegments = SortKey.genSortKeyPathElements(path);
+            sortBuffer.add(new SortKey(pathSegments, positionInBuffer));
+        }
+        timeCreatingSortArrayMillis += startTime.elapsed().toMillis();
+        LOG.info("Built sort array in {}. Entries: {}, Total size of path strings: {}",
+                startTime, sortBuffer.size(), humanReadableByteCountBin(totalPathSize));
+    }
+
     private void sortAndSaveBatch(NodeStateEntryBatch nseb) throws Exception {
-        ArrayList<SortKey> sortBuffer = nseb.getSortBuffer();
         ByteBuffer buffer = nseb.getBuffer();
         LOG.info("Going to sort batch in memory. Entries: {}, Size: {}",
-                sortBuffer.size(), humanReadableByteCountBin(buffer.remaining()));
+                nseb.numberOfEntries(), humanReadableByteCountBin(nseb.sizeOfEntriesBytes()));
+        sortBuffer.clear();
+        buildSortArray(nseb);
         if (sortBuffer.isEmpty()) {
             return;
         }
         Stopwatch sortClock = Stopwatch.createStarted();
         sortBuffer.sort(pathComparator);
+        timeSortingMillis += sortClock.elapsed().toMillis();
         LOG.info("Sorted batch in {}. Saving to disk", sortClock);
         Stopwatch saveClock = Stopwatch.createStarted();
         Path newtmpfile = Files.createTempFile(sortWorkDir, "sortInBatch", "flatfile");
         long textSize = 0;
         batchesProcessed++;
-        try (BufferedOutputStream writer = createOutputStream(newtmpfile, algorithm)) {
+        try (OutputStream os = IndexStoreUtils.createOutputStream(newtmpfile, algorithm)) {
             for (SortKey entry : sortBuffer) {
                 entriesProcessed++;
                 // Retrieve the entry from the buffer
                 int posInBuffer = entry.getBufferPos();
                 buffer.position(posInBuffer);
-                int entrySize = buffer.getInt();
 
-                // Write the entry to the file without creating intermediate byte[]
-                int bytesRemaining = entrySize;
-                while (bytesRemaining > 0) {
-                    int bytesRead = Math.min(copyBuffer.length, bytesRemaining);
-                    buffer.get(copyBuffer, 0, bytesRead);
-                    writer.write(copyBuffer, 0, bytesRead);
-                    bytesRemaining -= bytesRead;
-                }
-                writer.write(PipelinedStrategy.FLATFILESTORE_LINE_SEPARATOR);
-                textSize += entrySize + 1;
+                int pathSize = buffer.getInt();
+                copyField(os, buffer, pathSize);
+                os.write(PipelinedStrategy.FLATFILESTORE_DELIMITER);
+                int jsonSize = buffer.getInt();
+                copyField(os, buffer, jsonSize);
+                os.write(PipelinedStrategy.FLATFILESTORE_LINE_SEPARATOR);
+                textSize += pathSize + jsonSize + 2;
             }
         }
-        LOG.info("Stored batch of size {} (uncompressed {}) with {} entries in {}",
-                humanReadableByteCountBin(Files.size(newtmpfile)),
+        timeWritingMillis += saveClock.elapsed().toMillis();
+        long compressedSize = Files.size(newtmpfile);
+        LOG.info("Wrote batch of size {} (uncompressed {}) with {} entries in {} at {}",
+                humanReadableByteCountBin(compressedSize),
                 humanReadableByteCountBin(textSize),
-                sortBuffer.size(), saveClock);
+                sortBuffer.size(), saveClock,
+                PipelinedUtils.formatAsTransferSpeedMBs(compressedSize, saveClock.elapsed().toMillis())
+        );
+        // Free the memory taken by the entries in the buffer
+        sortBuffer.clear();
         sortedFilesQueue.put(newtmpfile);
     }
 
+    private void copyField(OutputStream writer, ByteBuffer buffer, int fieldSize) throws IOException {
+        // Write the entry to the file without creating intermediate byte[]
+        writer.write(buffer.array(), buffer.position(), fieldSize);
+        buffer.position(buffer.position() + fieldSize);
+    }
+
     private static Path createdSortWorkDir(Path storeDir) throws IOException {
         Path sortedFileDir = storeDir.resolve("sort-work-dir");
         FileUtils.forceMkdir(sortedFileDir.toFile());
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedStrategy.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedStrategy.java
index b7f1a8d..14bf4e8 100644
--- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedStrategy.java
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedStrategy.java
@@ -33,8 +33,10 @@
 import org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore;
 import org.apache.jackrabbit.oak.plugins.index.FormattingUtils;
 import org.apache.jackrabbit.oak.plugins.index.MetricsFormatter;
+import org.apache.jackrabbit.oak.plugins.index.MetricsUtils;
 import org.apache.jackrabbit.oak.spi.blob.BlobStore;
 import org.apache.jackrabbit.oak.spi.filter.PathFilter;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -60,6 +62,7 @@
 import java.util.function.Predicate;
 
 import static org.apache.jackrabbit.oak.commons.IOUtils.humanReadableByteCountBin;
+import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedMetrics.OAK_INDEXER_PIPELINED_DOCUMENTS_DOWNLOADED;
 
 /**
  * Downloads the contents of the MongoDB repository dividing the tasks in a pipeline with the following stages:
@@ -128,13 +131,16 @@
     public static final String OAK_INDEXER_PIPELINED_WORKING_MEMORY_MB = "oak.indexer.pipelined.workingMemoryMB";
     // 0 means autodetect
     public static final int DEFAULT_OAK_INDEXER_PIPELINED_WORKING_MEMORY_MB = 0;
+    // Between 1 and 100
+    public static final String OAK_INDEXER_PIPELINED_SORT_BUFFER_MEMORY_PERCENTAGE = "oak.indexer.pipelined.sortBufferMemoryPercentage";
+    public static final int DEFAULT_OAK_INDEXER_PIPELINED_SORT_BUFFER_MEMORY_PERCENTAGE = 25;
 
     static final NodeDocument[] SENTINEL_MONGO_DOCUMENT = new NodeDocument[0];
     static final NodeStateEntryBatch SENTINEL_NSE_BUFFER = new NodeStateEntryBatch(ByteBuffer.allocate(0), 0);
     static final Path SENTINEL_SORTED_FILES_QUEUE = Paths.get("SENTINEL");
     static final Charset FLATFILESTORE_CHARSET = StandardCharsets.UTF_8;
     static final char FLATFILESTORE_LINE_SEPARATOR = '\n';
-
+    static final byte FLATFILESTORE_DELIMITER = '|';
     private static final Logger LOG = LoggerFactory.getLogger(PipelinedStrategy.class);
     // A MongoDB document is at most 16MB, so the buffer that holds node state entries must be at least that big
     private static final int MIN_MONGO_DOC_QUEUE_RESERVED_MEMORY_MB = 16;
@@ -210,21 +216,21 @@
     private final BlobStore blobStore;
     private final PathElementComparator pathComparator;
     private final List<PathFilter> pathFilters;
+    private final StatisticsProvider statisticsProvider;
     private final int numberOfTransformThreads;
     private final int mongoDocQueueSize;
     private final int mongoDocBatchMaxSizeMB;
     private final int mongoDocBatchMaxNumberOfDocuments;
     private final int nseBuffersCount;
-    private final int nseBufferMaxEntriesPerBuffer;
     private final int nseBuffersSizeBytes;
 
     private long nodeStateEntriesExtracted;
 
-
     /**
-     * @param pathPredicate Used by the transform stage to test if a node should be kept or discarded.
-     * @param pathFilters   If non-empty, the download stage will use these filters to try to create a query that downloads
-     *                      only the matching MongoDB documents.
+     * @param pathPredicate      Used by the transform stage to test if a node should be kept or discarded.
+     * @param pathFilters        If non-empty, the download stage will use these filters to try to create a query that downloads
+     *                           only the matching MongoDB documents.
+     * @param statisticsProvider Used to collect statistics about the indexing process.
      */
     public PipelinedStrategy(MongoDocumentStore documentStore,
                              MongoDatabase mongoDatabase,
@@ -236,7 +242,8 @@
                              Compression algorithm,
                              Predicate<String> pathPredicate,
                              List<PathFilter> pathFilters,
-                             String checkpoint) {
+                             String checkpoint,
+                             StatisticsProvider statisticsProvider) {
         super(storeDir, algorithm, pathPredicate, preferredPathElements, checkpoint);
         this.docStore = documentStore;
         this.mongoDatabase = mongoDatabase;
@@ -245,6 +252,7 @@
         this.blobStore = blobStore;
         this.pathComparator = new PathElementComparator(preferredPathElements);
         this.pathFilters = pathFilters;
+        this.statisticsProvider = statisticsProvider;
         Preconditions.checkState(documentStore.isReadOnly(), "Traverser can only be used with readOnly store");
 
         int mongoDocQueueReservedMemoryMB = ConfigHelper.getSystemPropertyAsInt(OAK_INDEXER_PIPELINED_MONGO_DOC_QUEUE_RESERVED_MEMORY_MB, DEFAULT_OAK_INDEXER_PIPELINED_MONGO_DOC_QUEUE_RESERVED_MEMORY_MB);
@@ -263,10 +271,9 @@
         Preconditions.checkArgument(numberOfTransformThreads > 0,
                 "Invalid value for property " + OAK_INDEXER_PIPELINED_TRANSFORM_THREADS + ": " + numberOfTransformThreads + ". Must be > 0");
 
-        // Derived values for transform <-> sort-save
-        int nseBuffersReservedMemoryMB = readNSEBuffersReservedMemory();
-
-        // Calculate values derived from the configuration settings
+        int sortBufferMemoryPercentage = ConfigHelper.getSystemPropertyAsInt(OAK_INDEXER_PIPELINED_SORT_BUFFER_MEMORY_PERCENTAGE, DEFAULT_OAK_INDEXER_PIPELINED_SORT_BUFFER_MEMORY_PERCENTAGE);
+        Preconditions.checkArgument(sortBufferMemoryPercentage > 0 && sortBufferMemoryPercentage <= 100,
+                "Invalid value for property " + OAK_INDEXER_PIPELINED_SORT_BUFFER_MEMORY_PERCENTAGE + ": " + numberOfTransformThreads + ". Must be between 1 and 100");
 
         // mongo-dump  <-> transform threads
         Preconditions.checkArgument(mongoDocQueueReservedMemoryMB >= 8 * mongoDocBatchMaxSizeMB,
@@ -276,21 +283,19 @@
         );
         this.mongoDocQueueSize = mongoDocQueueReservedMemoryMB / mongoDocBatchMaxSizeMB;
 
-        // Transform threads <-> merge-sort
+        // Derived values for transform <-> sort-save
+        int nseWorkingMemoryMB = readNSEBuffersReservedMemory();
         this.nseBuffersCount = 1 + numberOfTransformThreads;
-
-        long nseBuffersReservedMemoryBytes = nseBuffersReservedMemoryMB * FileUtils.ONE_MB;
+        long nseWorkingMemoryBytes = (long) nseWorkingMemoryMB * FileUtils.ONE_MB;
         // The working memory is divided in the following regions:
         // - #transforThreads   NSE Binary buffers
-        // - 1x                 Metadata of NSE entries in Binary buffers, list of SortKeys
-        // A ByteBuffer can be at most Integer.MAX_VALUE bytes long
-        this.nseBuffersSizeBytes = limitToIntegerRange(nseBuffersReservedMemoryBytes / (nseBuffersCount + 1));
+        // - x1                 Memory reserved for the array created by the sort-batch thread with the keys of the entries
+        //                      in the batch that is being sorted
+        long memoryReservedForSortKeysArray = estimateMaxSizeOfSortKeyArray(nseWorkingMemoryBytes, nseBuffersCount, sortBufferMemoryPercentage);
+        long memoryReservedForBuffers = nseWorkingMemoryBytes - memoryReservedForSortKeysArray;
 
-        // Assuming 1 instance of SortKey takes around 256 bytes. We have #transformThreads + 1 regions of nseBufferSizeBytes.
-        // The extra region is for the SortKey instances. Below we compute the total number of SortKey instances that
-        // fit in the memory region reserved for them, assuming that each SortKey instance takes 256 bytes. Then we
-        // distribute equally these available entries among the nse buffers
-        this.nseBufferMaxEntriesPerBuffer = (this.nseBuffersSizeBytes / 256) / this.nseBuffersCount;
+        // A ByteBuffer can be at most Integer.MAX_VALUE bytes long
+        this.nseBuffersSizeBytes = limitToIntegerRange(memoryReservedForBuffers / nseBuffersCount);
 
         if (nseBuffersSizeBytes < MIN_ENTRY_BATCH_BUFFER_SIZE_MB * FileUtils.ONE_MB) {
             throw new IllegalArgumentException("Entry batch buffer size too small: " + nseBuffersSizeBytes +
@@ -304,11 +309,21 @@
                 mongoDocQueueReservedMemoryMB,
                 mongoDocBatchMaxSizeMB,
                 mongoDocQueueSize);
-        LOG.info("NodeStateEntryBuffers: [ workingMemory: {} MB, numberOfBuffers: {}, bufferSize: {}, maxEntriesPerBuffer: {} ]",
-                nseBuffersReservedMemoryMB,
+        LOG.info("NodeStateEntryBuffers: [ workingMemory: {} MB, numberOfBuffers: {}, bufferSize: {}, sortBufferReservedMemory: {} ]",
+                nseWorkingMemoryMB,
                 nseBuffersCount,
                 IOUtils.humanReadableByteCountBin(nseBuffersSizeBytes),
-                nseBufferMaxEntriesPerBuffer);
+                IOUtils.humanReadableByteCountBin(memoryReservedForSortKeysArray)
+        );
+    }
+
+    static long estimateMaxSizeOfSortKeyArray(long nseWorkingMemoryBytes, long nseBuffersCount, int sortBufferMemoryPercentage) {
+        // We reserve a percentage of the size of a buffer for the sort keys array. That is, we are assuming that for every line
+        // in the sort buffer, the memory needed to store the SortKey of the path section of the line will not be more
+        // than sortBufferMemoryPercentage of the total size of the line in average
+        // Estimate memory needed by the sort keys array. We assume each entry requires 256 bytes.
+        long approxNseBufferSize = limitToIntegerRange(nseWorkingMemoryBytes / nseBuffersCount);
+        return approxNseBufferSize * sortBufferMemoryPercentage / 100;
     }
 
     private int readNSEBuffersReservedMemory() {
@@ -383,26 +398,25 @@
 
             // Create empty buffers
             for (int i = 0; i < nseBuffersCount; i++) {
-                emptyBatchesQueue.add(NodeStateEntryBatch.createNodeStateEntryBatch(nseBuffersSizeBytes, nseBufferMaxEntriesPerBuffer));
+                // No limits on the number of entries, only on their total size. This might be revised later.
+                emptyBatchesQueue.add(NodeStateEntryBatch.createNodeStateEntryBatch(nseBuffersSizeBytes, Integer.MAX_VALUE));
             }
 
             LOG.info("[TASK:PIPELINED-DUMP:START] Starting to build FFS");
             Stopwatch start = Stopwatch.createStarted();
-            PipelinedMongoDownloadTask downloadTask = new PipelinedMongoDownloadTask(
+            ecs.submit(new PipelinedMongoDownloadTask(
                     mongoDatabase,
                     docStore,
                     (int) (mongoDocBatchMaxSizeMB * FileUtils.ONE_MB),
                     mongoDocBatchMaxNumberOfDocuments,
                     mongoDocQueue,
-                    pathFilters
-            );
-            ecs.submit(downloadTask);
-
-            Path flatFileStore = null;
+                    pathFilters,
+                    statisticsProvider
+            ));
 
             for (int i = 0; i < numberOfTransformThreads; i++) {
                 NodeStateEntryWriter entryWriter = new NodeStateEntryWriter(blobStore);
-                PipelinedTransformTask transformTask = new PipelinedTransformTask(
+                ecs.submit(new PipelinedTransformTask(
                         docStore,
                         documentNodeStore,
                         rootRevision,
@@ -412,20 +426,27 @@
                         emptyBatchesQueue,
                         nonEmptyBatchesQueue,
                         transformStageStatistics
-                );
-                ecs.submit(transformTask);
+                ));
             }
 
-            PipelinedSortBatchTask sortTask = new PipelinedSortBatchTask(
-                    this.getStoreDir().toPath(), pathComparator, this.getAlgorithm(), emptyBatchesQueue, nonEmptyBatchesQueue, sortedFilesQueue
-            );
-            ecs.submit(sortTask);
+            ecs.submit(new PipelinedSortBatchTask(
+                    this.getStoreDir().toPath(),
+                    pathComparator,
+                    this.getAlgorithm(),
+                    emptyBatchesQueue,
+                    nonEmptyBatchesQueue,
+                    sortedFilesQueue
+            ));
 
-            PipelinedMergeSortTask mergeSortTask = new PipelinedMergeSortTask(this.getStoreDir().toPath(), pathComparator,
-                    this.getAlgorithm(), sortedFilesQueue);
+            PipelinedMergeSortTask mergeSortTask = new PipelinedMergeSortTask(
+                    this.getStoreDir().toPath(),
+                    pathComparator,
+                    this.getAlgorithm(),
+                    sortedFilesQueue, statisticsProvider);
+
             ecs.submit(mergeSortTask);
 
-
+            Path flatFileStore = null;
             try {
                 LOG.info("Waiting for tasks to complete");
                 int tasksFinished = 0;
@@ -442,6 +463,7 @@
                                 mongoDocQueue.put(SENTINEL_MONGO_DOCUMENT);
                             }
                             mergeSortTask.stopEagerMerging();
+                            MetricsUtils.setCounterOnce(statisticsProvider, OAK_INDEXER_PIPELINED_DOCUMENTS_DOWNLOADED, downloadResult.getDocumentsDownloaded());
 
                         } else if (result instanceof PipelinedTransformTask.Result) {
                             PipelinedTransformTask.Result transformResult = (PipelinedTransformTask.Result) result;
@@ -455,13 +477,22 @@
                                 monitorFuture.cancel(false);
                                 // Terminate the sort thread.
                                 nonEmptyBatchesQueue.put(SENTINEL_NSE_BUFFER);
+                                transformStageStatistics.publishStatistics(statisticsProvider);
                             }
 
                         } else if (result instanceof PipelinedSortBatchTask.Result) {
                             PipelinedSortBatchTask.Result sortTaskResult = (PipelinedSortBatchTask.Result) result;
                             LOG.info("Sort batch task finished. Entries processed: {}", sortTaskResult.getTotalEntries());
-                            printStatistics(mongoDocQueue, emptyBatchesQueue, nonEmptyBatchesQueue, sortedFilesQueue, transformStageStatistics, true);
                             sortedFilesQueue.put(SENTINEL_SORTED_FILES_QUEUE);
+                            // The buffers between transform and merge sort tasks are no longer needed, so remove them
+                            // from the queues so they can be garbage collected.
+                            // These buffers can be very large, so this is important to avoid running out of memory in
+                            // the merge-sort phase
+                            if (!nonEmptyBatchesQueue.isEmpty()) {
+                                LOG.warn("emptyBatchesQueue is not empty. Size: {}", emptyBatchesQueue.size());
+                            }
+                            emptyBatchesQueue.clear();
+                            printStatistics(mongoDocQueue, emptyBatchesQueue, nonEmptyBatchesQueue, sortedFilesQueue, transformStageStatistics, true);
 
                         } else if (result instanceof PipelinedMergeSortTask.Result) {
                             PipelinedMergeSortTask.Result mergeSortedFilesTask = (PipelinedMergeSortTask.Result) result;
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedTransformTask.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedTransformTask.java
index 782a1d9..9d9f94f8 100644
--- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedTransformTask.java
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedTransformTask.java
@@ -20,6 +20,7 @@
 
 import org.apache.commons.io.FileUtils;
 import org.apache.jackrabbit.guava.common.base.Stopwatch;
+import org.apache.jackrabbit.oak.commons.IOUtils;
 import org.apache.jackrabbit.oak.index.indexer.document.NodeStateEntry;
 import org.apache.jackrabbit.oak.index.indexer.document.flatfile.NodeStateEntryWriter;
 import org.apache.jackrabbit.oak.plugins.document.DocumentNodeState;
@@ -36,8 +37,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.ByteArrayOutputStream;
-import java.io.OutputStreamWriter;
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Locale;
 import java.util.concurrent.ArrayBlockingQueue;
@@ -89,6 +89,7 @@
     private final TransformStageStatistics statistics;
     private final int threadId = threadIdGenerator.getAndIncrement();
     private long totalEnqueueDelayMillis = 0;
+    private long totalEmptyBatchQueueWaitTimeMillis = 0;
 
     public PipelinedTransformTask(MongoDocumentStore mongoStore,
                                   DocumentNodeStore documentNodeStore,
@@ -124,10 +125,6 @@
             long mongoObjectsProcessed = 0;
             LOG.debug("Waiting for an empty buffer");
             NodeStateEntryBatch nseBatch = emptyBatchesQueue.take();
-
-            // Used to serialize a node state entry before writing it to the buffer
-            ByteArrayOutputStream baos = new ByteArrayOutputStream(4096);
-            OutputStreamWriter writer = new OutputStreamWriter(baos, PipelinedStrategy.FLATFILESTORE_CHARSET);
             LOG.debug("Obtained an empty buffer. Starting to convert Mongo documents to node state entries");
 
             ArrayList<NodeStateEntry> nodeStateEntries = new ArrayList<>();
@@ -137,16 +134,20 @@
                 NodeDocument[] nodeDocumentBatch = mongoDocQueue.take();
                 totalDocumentQueueWaitTimeMillis += docQueueWaitStopwatch.elapsed(TimeUnit.MILLISECONDS);
                 if (nodeDocumentBatch == SENTINEL_MONGO_DOCUMENT) {
-                    String totalDocumentQueueWaitPercentage = String.format("%1.2f", (100.0 * totalDocumentQueueWaitTimeMillis) / taskStartWatch.elapsed(TimeUnit.MILLISECONDS));
-                    String totalEnqueueDelayPercentage = String.format("%1.2f", (100.0 * totalEnqueueDelayMillis) / taskStartWatch.elapsed(TimeUnit.MILLISECONDS));
+                    long totalDurationMillis = taskStartWatch.elapsed(TimeUnit.MILLISECONDS);
+                    String totalDocumentQueueWaitPercentage = PipelinedUtils.formatAsPercentage(totalDocumentQueueWaitTimeMillis, totalDurationMillis);
+                    String totalEnqueueDelayPercentage = PipelinedUtils.formatAsPercentage(totalEnqueueDelayMillis, totalDurationMillis);
+                    String totalEmptyBatchQueueWaitPercentage = PipelinedUtils.formatAsPercentage(totalEmptyBatchQueueWaitTimeMillis, totalDurationMillis);
                     String metrics = MetricsFormatter.newBuilder()
                             .add("duration", FormattingUtils.formatToSeconds(taskStartWatch))
-                            .add("durationSeconds", taskStartWatch.elapsed(TimeUnit.SECONDS))
+                            .add("durationSeconds", totalDurationMillis / 1000)
                             .add("nodeStateEntriesGenerated", totalEntryCount)
                             .add("enqueueDelayMillis", totalEnqueueDelayMillis)
                             .add("enqueueDelayPercentage", totalEnqueueDelayPercentage)
                             .add("documentQueueWaitMillis", totalDocumentQueueWaitTimeMillis)
                             .add("documentQueueWaitPercentage", totalDocumentQueueWaitPercentage)
+                            .add("totalEmptyBatchQueueWaitTimeMillis", totalEmptyBatchQueueWaitTimeMillis)
+                            .add("totalEmptyBatchQueueWaitPercentage", totalEmptyBatchQueueWaitPercentage)
                             .build();
                     LOG.info("[TASK:{}:END] Metrics: {}", threadName.toUpperCase(Locale.ROOT), metrics);
                     //Save the last batch
@@ -155,12 +156,12 @@
                     return new Result(threadId, totalEntryCount);
                 } else {
                     for (NodeDocument nodeDoc : nodeDocumentBatch) {
-                        statistics.incrementMongoDocumentsProcessed();
+                        statistics.incrementMongoDocumentsTraversed();
                         mongoObjectsProcessed++;
                         if (mongoObjectsProcessed % 50000 == 0) {
                             LOG.info("Mongo objects: {}, total entries: {}, current batch: {}, Size: {}/{} MB",
                                     mongoObjectsProcessed, totalEntryCount, nseBatch.numberOfEntries(),
-                                    nseBatch.sizeOfEntries() / FileUtils.ONE_MB,
+                                    nseBatch.sizeOfEntriesBytes() / FileUtils.ONE_MB,
                                     nseBatch.capacity() / FileUtils.ONE_MB
                             );
                         }
@@ -181,21 +182,25 @@
                                         statistics.incrementEntriesAccepted();
                                         totalEntryCount++;
                                         // Serialize entry
-                                        entryWriter.writeTo(writer, nse);
-                                        writer.flush();
-                                        byte[] entryData = baos.toByteArray();
-                                        baos.reset();
-                                        statistics.incrementTotalExtractedEntriesSize(entryData.length);
-                                        if (!nseBatch.hasSpaceForEntry(entryData)) {
+                                        byte[] jsonBytes = entryWriter.asJson(nse.getNodeState()).getBytes(StandardCharsets.UTF_8);
+                                        int entrySize;
+                                        try {
+                                            entrySize = nseBatch.addEntry(path, jsonBytes);
+                                        } catch (NodeStateEntryBatch.BufferFullException e) {
                                             LOG.info("Buffer full, passing buffer to sort task. Total entries: {}, entries in buffer {}, buffer size: {}",
-                                                    totalEntryCount, nseBatch.numberOfEntries(), nseBatch.sizeOfEntries());
+                                                    totalEntryCount, nseBatch.numberOfEntries(), IOUtils.humanReadableByteCountBin(nseBatch.sizeOfEntriesBytes()));
                                             nseBatch.flip();
                                             tryEnqueue(nseBatch);
                                             // Get an empty buffer
+                                            Stopwatch emptyBatchesQueueStopwatch = Stopwatch.createStarted();
                                             nseBatch = emptyBatchesQueue.take();
+                                            totalEmptyBatchQueueWaitTimeMillis += emptyBatchesQueueStopwatch.elapsed(TimeUnit.MILLISECONDS);
+
+                                            // Now it must fit, otherwise it means that the buffer is smaller than a single
+                                            // entry, which is an error.
+                                            entrySize = nseBatch.addEntry(path, jsonBytes);
                                         }
-                                        // Write entry to buffer
-                                        nseBatch.addEntry(nse.getPath(), entryData);
+                                        statistics.incrementTotalExtractedEntriesSize(entrySize);
                                     } else {
                                         statistics.incrementEntriesRejected();
                                         if (NodeStateUtils.isHiddenPath(path)) {
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedUtils.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedUtils.java
new file mode 100644
index 0000000..85fad87
--- /dev/null
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedUtils.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined;
+
+import org.apache.commons.io.FileUtils;
+
+import java.util.Locale;
+
+public class PipelinedUtils {
+    /**
+     * <p>Format a percentage as a string with 2 decimal places. For instance:
+     * <code>formatAsPercentage(52, 1000)</code> returns <code>"5.20"</code>.</p>
+     */
+    public static String formatAsPercentage(long numerator, long denominator) {
+        if (denominator == 0) {
+            return "N/A";
+        } else {
+            return String.format(Locale.ROOT, "%1.2f", (100.0 * numerator) / denominator);
+        }
+    }
+
+    /**
+     * <p>Convert to a percentage as an integer from 0 to 100, with -1 representing an undefined value.</p>
+     */
+    public static int toPercentage(long numerator, long denominator) {
+        if (denominator == 0) {
+            return -1;
+        } else {
+            return (int) Math.round((100.0 * numerator) / denominator);
+        }
+    }
+
+    public static String formatAsTransferSpeedMBs(long numberOfBytes, long timeMillis) {
+        if (timeMillis == 0) {
+            return "N/A";
+        } else {
+            double speed = 1000 * (((double) numberOfBytes) / timeMillis) / FileUtils.ONE_MB;
+            return String.format(Locale.ROOT, "%1.2f MB/s", speed);
+        }
+    }
+
+}
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/TransformStageStatistics.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/TransformStageStatistics.java
index 060c5aa..61cf67e 100644
--- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/TransformStageStatistics.java
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/TransformStageStatistics.java
@@ -20,10 +20,15 @@
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.jackrabbit.oak.plugins.index.MetricsFormatter;
+import org.apache.jackrabbit.oak.plugins.index.MetricsUtils;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.concurrent.atomic.LongAdder;
 
 public class TransformStageStatistics {
+    public static final Logger LOG = LoggerFactory.getLogger(TransformStageStatistics.class);
     private static final int MAX_HISTOGRAM_SIZE = 1000;
     private final LongAdder mongoDocumentsTraversed = new LongAdder();
     private final LongAdder documentsRejectedSplit = new LongAdder();
@@ -50,8 +55,8 @@
         return entriesRejected.sum();
     }
 
-    public LongAdder getDocumentsRejectedSplit() {
-        return documentsRejectedSplit;
+    public long getDocumentsRejectedSplit() {
+        return documentsRejectedSplit.sum();
     }
 
     public BoundedHistogram getHiddenPathsRejectedHistogram() {
@@ -70,7 +75,7 @@
         return emptyNodeStateHistogram;
     }
 
-    public void incrementMongoDocumentsProcessed() {
+    public void incrementMongoDocumentsTraversed() {
         mongoDocumentsTraversed.increment();
     }
 
@@ -128,6 +133,32 @@
                 '}';
     }
 
+    public void publishStatistics(StatisticsProvider statisticsProvider) {
+        LOG.info("Publishing transform stage statistics");
+
+        long mongoDocumentsTraversedSum = mongoDocumentsTraversed.sum();
+        long documentsRejectedSplitSum = documentsRejectedSplit.sum();
+        long documentsRejectedEmptyNodeStateSum = documentsRejectedEmptyNodeState.sum();
+        long documentsRejectedTotal = documentsRejectedSplitSum + documentsRejectedEmptyNodeStateSum;
+        long documentsAcceptedTotal = mongoDocumentsTraversedSum - documentsRejectedTotal;
+        long entriesAcceptedSum = entriesAccepted.sum();
+        long entriesAcceptedTotalSizeSum = entriesAcceptedTotalSize.sum();
+        long entriesRejectedSum = entriesRejected.sum();
+        long entriesTraversed = entriesAcceptedSum + entriesRejectedSum;
+
+        MetricsUtils.setCounterOnce(statisticsProvider, PipelinedMetrics.OAK_INDEXER_PIPELINED_DOCUMENTS_TRAVERSED, mongoDocumentsTraversedSum);
+        MetricsUtils.setCounterOnce(statisticsProvider, PipelinedMetrics.OAK_INDEXER_PIPELINED_DOCUMENTS_REJECTED_SPLIT, documentsRejectedSplitSum);
+        MetricsUtils.setCounterOnce(statisticsProvider, PipelinedMetrics.OAK_INDEXER_PIPELINED_DOCUMENTS_REJECTED_EMPTY_NODE_STATE, documentsRejectedEmptyNodeStateSum);
+        MetricsUtils.setCounterOnce(statisticsProvider, PipelinedMetrics.OAK_INDEXER_PIPELINED_DOCUMENTS_ACCEPTED, documentsAcceptedTotal);
+        MetricsUtils.setCounterOnce(statisticsProvider, PipelinedMetrics.OAK_INDEXER_PIPELINED_DOCUMENTS_REJECTED, documentsRejectedTotal);
+        MetricsUtils.setCounterOnce(statisticsProvider, PipelinedMetrics.OAK_INDEXER_PIPELINED_ENTRIES_TRAVERSED, entriesTraversed);
+        MetricsUtils.setCounterOnce(statisticsProvider, PipelinedMetrics.OAK_INDEXER_PIPELINED_ENTRIES_ACCEPTED, entriesAcceptedSum);
+        MetricsUtils.setCounterOnce(statisticsProvider, PipelinedMetrics.OAK_INDEXER_PIPELINED_ENTRIES_REJECTED, entriesRejectedSum);
+        MetricsUtils.setCounterOnce(statisticsProvider, PipelinedMetrics.OAK_INDEXER_PIPELINED_ENTRIES_REJECTED_HIDDEN_PATHS, entriesRejectedHiddenPaths.sum());
+        MetricsUtils.setCounterOnce(statisticsProvider, PipelinedMetrics.OAK_INDEXER_PIPELINED_ENTRIES_REJECTED_PATH_FILTERED, entriesRejectedPathFiltered.sum());
+        MetricsUtils.setCounterOnce(statisticsProvider, PipelinedMetrics.OAK_INDEXER_PIPELINED_EXTRACTED_ENTRIES_TOTAL_SIZE, entriesAcceptedTotalSizeSum);
+    }
+
     public String formatStats() {
         long mongoDocumentsTraversedSum = mongoDocumentsTraversed.sum();
         long entriesAcceptedSum = entriesAccepted.sum();
@@ -138,10 +169,8 @@
         long documentsRejectedTotal = documentsRejectedSplitSum + documentsRejectedEmptyNodeStateSum;
         long documentsAcceptedTotal = mongoDocumentsTraversedSum - documentsRejectedTotal;
         long totalEntries = entriesAcceptedSum + entriesRejectedSum;
-        String documentsAcceptedPercentage = mongoDocumentsTraversedSum == 0 ? "N/A" :
-                String.format("%2.1f%%", (100.0 * documentsAcceptedTotal) / mongoDocumentsTraversedSum);
-        String entriesAcceptedPercentage = totalEntries == 0 ? "N/A" :
-                String.format("%1.1f%%", (100.0 * entriesAcceptedSum) / totalEntries);
+        String documentsAcceptedPercentage = PipelinedUtils.formatAsPercentage(documentsAcceptedTotal, mongoDocumentsTraversedSum);
+        String entriesAcceptedPercentage = PipelinedUtils.formatAsPercentage(entriesAcceptedSum, totalEntries);
         long avgEntrySize = entriesAcceptedSum == 0 ? -1 :
                 extractedEntriesTotalSizeSum / entriesAcceptedSum;
         return MetricsFormatter.newBuilder()
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/incrementalstore/MergeIncrementalFlatFileStore.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/incrementalstore/MergeIncrementalFlatFileStore.java
index b6cc8ec..485105f 100644
--- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/incrementalstore/MergeIncrementalFlatFileStore.java
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/incrementalstore/MergeIncrementalFlatFileStore.java
@@ -96,6 +96,13 @@
         }
     }
 
+    /**
+     * Merges multiple index store files.
+     *
+     * This method is a little verbose, but I think this is fine
+     * as we are not getting consistent data from checkpoint diff
+     * and we need to handle cases differently.
+     */
     private void mergeIndexStoreFiles() throws IOException {
         Map<String, IncrementalStoreOperand> enumMap = Arrays.stream(IncrementalStoreOperand.values())
                 .collect(Collectors.toUnmodifiableMap(IncrementalStoreOperand::toString, k -> IncrementalStoreOperand.valueOf(k.name())));
@@ -113,18 +120,24 @@
                     compared = comparator.compare(new SimpleNodeStateHolder(baseFFSLine), new SimpleNodeStateHolder(incrementalFFSLine));
                     if (compared < 0) { // write baseFFSLine in merged file and advance line in baseFFS
                         baseFFSLine = writeAndAdvance(writer, baseFFSBufferedReader, baseFFSLine);
-                    } else if (compared > 0) { // write incrementalFFSline and advance line in incrementalFFS
-                        String[] incrementalFFSParts = IncrementalFlatFileStoreNodeStateEntryWriter.getParts(incrementalFFSLine);
-                        if (!IncrementalStoreOperand.ADD.toString().equals(getOperand(incrementalFFSParts))) {
-                            log.warn("Expected operand {} but got {} for incremental line {}. Merging will proceed as usual, but this needs to be looked into.",
-                                    IncrementalStoreOperand.ADD, getOperand(incrementalFFSParts), incrementalFFSLine);
-                        }
-                        incrementalFFSLine = writeAndAdvance(writer, incrementalFFSBufferedReader,
-                                getFFSLineFromIncrementalFFSParts(incrementalFFSParts));
+                    }
+                    // We are adding warn logs instead of checkState e.g.
+                    // 1- delete a node
+                    // 2- add node at same path.
+                    // The incremental FFS with above operations are dumped as node added instead of modified.
+                    else if (compared > 0) { // write incrementalFFSline and advance line in incrementalFFS
+                        incrementalFFSLine = processIncrementalFFSLine(enumMap, writer, incrementalFFSBufferedReader, incrementalFFSLine);
                     } else {
                         String[] incrementalFFSParts = IncrementalFlatFileStoreNodeStateEntryWriter.getParts(incrementalFFSLine);
                         String operand = getOperand(incrementalFFSParts);
                         switch (enumMap.get(operand)) {
+                            case ADD:
+                                log.warn("Expected operand {} or {} but got {} for incremental line {}. " +
+                                                "Merging will proceed, but this is unexpected.",
+                                        IncrementalStoreOperand.MODIFY, IncrementalStoreOperand.DELETE, getOperand(incrementalFFSParts), incrementalFFSLine);
+                                incrementalFFSLine = writeAndAdvance(writer, incrementalFFSBufferedReader,
+                                        getFFSLineFromIncrementalFFSParts(incrementalFFSParts));
+                                break;
                             case MODIFY:
                                 incrementalFFSLine = writeAndAdvance(writer, incrementalFFSBufferedReader,
                                         getFFSLineFromIncrementalFFSParts(incrementalFFSParts));
@@ -149,6 +162,40 @@
         }
     }
 
+    private String processIncrementalFFSLine(Map<String, IncrementalStoreOperand> enumMap, BufferedWriter writer,
+                                             BufferedReader incrementalFFSBufferedReader, String incrementalFFSLine) throws IOException {
+        String[] incrementalFFSParts = IncrementalFlatFileStoreNodeStateEntryWriter.getParts(incrementalFFSLine);
+        String operand = getOperand(incrementalFFSParts);
+        switch (enumMap.get(operand)) {
+            case ADD:
+                incrementalFFSLine = writeAndAdvance(writer, incrementalFFSBufferedReader,
+                        getFFSLineFromIncrementalFFSParts(incrementalFFSParts));
+                break;
+            case MODIFY:
+                // this case should not happen. But in case this happens we consider modify as addition of node
+                // this implies node is not present in older FFS and in checkpointdiff this came as modified instead of
+                // node addition.
+                log.warn("Expected operand {} but got {} for incremental line {}. " +
+                                "Merging will proceed, but this is unexpected.",
+                        IncrementalStoreOperand.ADD, operand, incrementalFFSLine);
+                incrementalFFSLine = writeAndAdvance(writer, incrementalFFSBufferedReader,
+                        getFFSLineFromIncrementalFFSParts(incrementalFFSParts));
+                break;
+            case DELETE:
+                // This case should not happen. If this happens, it means we don't have any such node in baseFFS
+                // but this node came as deletion of node for an already non-existing node.
+                // we just skip this node in this case.
+                log.warn("Expected operand {} but got {} for incremental line {}. Merging will proceed as usual, but this needs to be looked into.",
+                        IncrementalStoreOperand.ADD, operand, incrementalFFSLine);
+                incrementalFFSLine = incrementalFFSBufferedReader.readLine();
+                break;
+            default:
+                log.error("Wrong operand in incremental ffs: operand:{}, line:{}", operand, incrementalFFSLine);
+                throw new RuntimeException("wrong operand in incremental ffs: operand:" + operand + ", line:" + incrementalFFSLine);
+        }
+        return incrementalFFSLine;
+    }
+
     private IndexStoreMetadata getIndexStoreMetadataForMergedFile() throws IOException {
         File baseFFSMetadataFile = IndexStoreUtils.getMetadataFile(baseFFS, algorithm);
         File incrementalMetadataFile = IndexStoreUtils.getMetadataFile(incrementalFFS, algorithm);
@@ -202,13 +249,10 @@
     }
 
     private String writeRestOfIncrementalFileAndAdvance(BufferedWriter writer, BufferedReader bufferedReader, String incrementalFFSLine) throws IOException {
+        Map<String, IncrementalStoreOperand> enumMap = Arrays.stream(IncrementalStoreOperand.values())
+                .collect(Collectors.toUnmodifiableMap(IncrementalStoreOperand::toString, k -> IncrementalStoreOperand.valueOf(k.name())));
         do {
-            String[] incrementalFFSParts = IncrementalFlatFileStoreNodeStateEntryWriter.getParts(incrementalFFSLine);
-            String operand = getOperand(incrementalFFSParts);
-            checkState(!IncrementalStoreOperand.MODIFY.toString().equals(operand)
-                            && !IncrementalStoreOperand.DELETE.toString().equals(operand),
-                    "incremental ffs should not have modify or delete operands: {}", incrementalFFSLine);
-            incrementalFFSLine = writeAndAdvance(writer, bufferedReader, getFFSLineFromIncrementalFFSParts(incrementalFFSParts));
+            incrementalFFSLine = processIncrementalFFSLine(enumMap, writer, bufferedReader, incrementalFFSLine);
         } while (incrementalFFSLine != null);
         return bufferedReader.readLine();
     }
diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/indexstore/IndexStoreUtils.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/indexstore/IndexStoreUtils.java
index b901621..cdb3c1f 100644
--- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/indexstore/IndexStoreUtils.java
+++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/index/indexer/document/indexstore/IndexStoreUtils.java
@@ -88,14 +88,12 @@
         return new BufferedWriter(new OutputStreamWriter(algorithm.getOutputStream(out)));
     }
 
-    public static BufferedOutputStream createOutputStream(File file, Compression algorithm) throws IOException {
-        OutputStream out = new FileOutputStream(file);
-        return new BufferedOutputStream(algorithm.getOutputStream(out));
-    }
-
-    public static BufferedOutputStream createOutputStream(Path file, Compression algorithm) throws IOException {
-        OutputStream out = Files.newOutputStream(file);
-        return new BufferedOutputStream(algorithm.getOutputStream(out));
+    public static OutputStream createOutputStream(Path file, Compression algorithm) throws IOException {
+        // The output streams created by LZ4 and GZIP buffer their input, so we should not wrap then again.
+        // However, the implementation of the compression streams may make small writes to the underlying stream,
+        // so we buffer the FileOutputStream
+        OutputStream out = new BufferedOutputStream(Files.newOutputStream(file));
+        return algorithm.getOutputStream(out);
     }
 
     public static long sizeOf(List<File> sortedFiles) {
diff --git a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/NodeStateEntryBatchTest.java b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/NodeStateEntryBatchTest.java
index 32cae0a..4d7857c 100644
--- a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/NodeStateEntryBatchTest.java
+++ b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/NodeStateEntryBatchTest.java
@@ -20,8 +20,8 @@
 
 import org.junit.Test;
 
-import java.nio.BufferOverflowException;
 import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
@@ -40,43 +40,52 @@
         batch.addEntry("b", new byte[1]);
         assertEquals(2, batch.numberOfEntries());
         assertTrue(batch.isAtMaxEntries());
-        assertThrows(IllegalStateException.class, () -> batch.addEntry("c", new byte[1]));
+        assertThrows(NodeStateEntryBatch.BufferFullException.class, () -> batch.addEntry("c", new byte[1]));
     }
 
     @Test
     public void testMaximumBufferSize() {
         NodeStateEntryBatch batch = NodeStateEntryBatch.createNodeStateEntryBatch(NodeStateEntryBatch.MIN_BUFFER_SIZE, 10);
-        assertTrue(batch.hasSpaceForEntry(new byte[NodeStateEntryBatch.MIN_BUFFER_SIZE -4])); // Needs 4 bytes for the length
-        assertFalse(batch.hasSpaceForEntry(new byte[NodeStateEntryBatch.MIN_BUFFER_SIZE]));
+        int keySize = "a".getBytes(StandardCharsets.UTF_8).length;
+        batch.addEntry("a", new byte[NodeStateEntryBatch.MIN_BUFFER_SIZE - 4 - 4 - keySize]); // Needs 4 bytes for the length of the path and of the entry data
+        assertThrows(NodeStateEntryBatch.BufferFullException.class, () -> batch.addEntry("b", new byte[NodeStateEntryBatch.MIN_BUFFER_SIZE]));
 
-        batch.addEntry("a", new byte[NodeStateEntryBatch.MIN_BUFFER_SIZE -4]);
-        assertEquals(NodeStateEntryBatch.MIN_BUFFER_SIZE, batch.sizeOfEntries());
+        assertEquals(NodeStateEntryBatch.MIN_BUFFER_SIZE, batch.sizeOfEntriesBytes());
         assertEquals(1, batch.numberOfEntries());
-        assertFalse(batch.hasSpaceForEntry(new byte[1]));
-        assertThrows(BufferOverflowException.class, () -> batch.addEntry("b", new byte[1]));
+        assertThrows(NodeStateEntryBatch.BufferFullException.class, () -> batch.addEntry("b", new byte[1]));
     }
 
     @Test
     public void flipAndResetBuffer() {
-        int sizeOfEntry = NodeStateEntryBatch.MIN_BUFFER_SIZE-4;
         NodeStateEntryBatch batch = NodeStateEntryBatch.createNodeStateEntryBatch(NodeStateEntryBatch.MIN_BUFFER_SIZE, 10);
-        byte[] testArray = new byte[sizeOfEntry];
-        for (int i = 0; i < sizeOfEntry; i++) {
-            testArray[i] = (byte) (i % 127);
+        int expectedBytesWrittenToBuffer = NodeStateEntryBatch.MIN_BUFFER_SIZE;
+
+        String key = "a";
+        int keyLength = "a".getBytes(StandardCharsets.UTF_8).length;
+        byte[] jsonNodeBytes = new byte[batch.capacity()-4-4-keyLength];
+        for (int i = 0; i < jsonNodeBytes.length; i++) {
+            jsonNodeBytes[i] = (byte) (i % 127);
         }
-        batch.addEntry("a", testArray);
-        assertEquals(batch.getBuffer().position(), sizeOfEntry + 4);
+
+        int bytesWrittenToBuffer = batch.addEntry(key, jsonNodeBytes);
+        assertEquals(bytesWrittenToBuffer, expectedBytesWrittenToBuffer);
+        assertEquals(batch.getBuffer().position(), expectedBytesWrittenToBuffer);
 
         batch.flip();
 
         ByteBuffer buffer = batch.getBuffer();
         assertEquals(buffer.position(), 0);
-        assertEquals(buffer.remaining(), sizeOfEntry + 4);
-        assertEquals(sizeOfEntry, buffer.getInt());
-        byte[] entryData = new byte[sizeOfEntry];
-        buffer.get(entryData);
-        assertEquals(buffer.position(), sizeOfEntry + 4);
-        assertArrayEquals(testArray, entryData);
+        assertEquals(buffer.remaining(), expectedBytesWrittenToBuffer);
+        assertEquals(keyLength, buffer.getInt());
+        byte[] keyBytes = new byte[keyLength];
+        buffer.get(keyBytes);
+        assertEquals(key, new String(keyBytes, StandardCharsets.UTF_8));
+
+        int jsonLength = buffer.getInt();
+        byte[] jsonBytes = new byte[jsonLength];
+        buffer.get(jsonBytes);
+        assertEquals(buffer.position(), expectedBytesWrittenToBuffer);
+        assertArrayEquals(jsonNodeBytes, jsonBytes);
 
         batch.reset();
 
diff --git a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedIT.java b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedIT.java
index e1356a6..c771dc4 100644
--- a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedIT.java
+++ b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedIT.java
@@ -32,6 +32,7 @@
 import org.apache.jackrabbit.oak.plugins.document.RevisionVector;
 import org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore;
 import org.apache.jackrabbit.oak.plugins.document.util.MongoConnection;
+import org.apache.jackrabbit.oak.plugins.metric.MetricStatisticsProvider;
 import org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore;
 import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
 import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
@@ -40,6 +41,7 @@
 import org.apache.jackrabbit.oak.spi.state.NodeStore;
 import org.jetbrains.annotations.NotNull;
 import org.junit.After;
+import org.junit.AfterClass;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -48,6 +50,8 @@
 import org.junit.Test;
 import org.junit.contrib.java.lang.system.RestoreSystemProperties;
 import org.junit.rules.TemporaryFolder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
@@ -57,8 +61,12 @@
 import java.util.Comparator;
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
 import java.util.function.Predicate;
+import java.util.stream.Collectors;
 
+import static java.lang.management.ManagementFactory.getPlatformMBeanServer;
 import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedMongoDownloadTask.OAK_INDEXER_PIPELINED_MONGO_REGEX_PATH_FILTERING;
 import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedMongoDownloadTask.OAK_INDEXER_PIPELINED_RETRY_ON_CONNECTION_ERRORS;
 import static org.junit.Assert.assertArrayEquals;
@@ -67,10 +75,12 @@
 import static org.junit.Assert.assertTrue;
 
 public class PipelinedIT {
+    private static final Logger LOG = LoggerFactory.getLogger(PipelinedIT.class);
     private static final PathFilter contentDamPathFilter = new PathFilter(List.of("/content/dam"), List.of());
     private static final int LONG_PATH_TEST_LEVELS = 30;
     private static final String LONG_PATH_LEVEL_STRING = "Z12345678901234567890-Level_";
 
+    private static ScheduledExecutorService executorService;
     @Rule
     public final MongoConnectionFactory connectionFactory = new MongoConnectionFactory();
     @Rule
@@ -80,6 +90,9 @@
     @Rule
     public final TemporaryFolder sortFolder = new TemporaryFolder();
 
+
+    private MetricStatisticsProvider statsProvider;
+
     @BeforeClass
     public static void setup() throws IOException {
         Assume.assumeTrue(MongoUtils.isAvailable());
@@ -89,14 +102,33 @@
             path.append("/").append(LONG_PATH_LEVEL_STRING).append(i);
             EXPECTED_FFS.add(path + "|{}");
         }
+        executorService = Executors.newSingleThreadScheduledExecutor();
     }
 
-    @After @Before
+    @AfterClass
+    public static void teardown() {
+        if (executorService != null) {
+            executorService.shutdown();
+        }
+    }
+
+    @Before
+    public void before() {
+        MongoConnection c = connectionFactory.getConnection();
+        if (c != null) {
+            c.getDatabase().drop();
+        }
+        statsProvider = new MetricStatisticsProvider(getPlatformMBeanServer(), executorService);
+    }
+
+    @After
     public void tear() {
         MongoConnection c = connectionFactory.getConnection();
         if (c != null) {
             c.getDatabase().drop();
         }
+        statsProvider.close();
+        statsProvider = null;
     }
 
     @Test
@@ -188,6 +220,39 @@
         File file = pipelinedStrategy.createSortedStoreFile();
         assertTrue(file.exists());
         assertEquals(expected, Files.readAllLines(file.toPath()));
+        assertMetrics();
+    }
+
+    private void assertMetrics() {
+        // Check the statistics
+        Set<String> metricsNames = statsProvider.getRegistry().getCounters().keySet();
+
+        assertEquals(Set.of(
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_DOCUMENTS_DOWNLOADED,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_DOCUMENTS_TRAVERSED,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_DOCUMENTS_REJECTED_SPLIT,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_DOCUMENTS_ACCEPTED,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_DOCUMENTS_REJECTED,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_DOCUMENTS_REJECTED_EMPTY_NODE_STATE,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_ENTRIES_TRAVERSED,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_ENTRIES_ACCEPTED,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_ENTRIES_REJECTED,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_ENTRIES_REJECTED_HIDDEN_PATHS,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_ENTRIES_REJECTED_PATH_FILTERED,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_EXTRACTED_ENTRIES_TOTAL_SIZE,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_MONGO_DOWNLOAD_ENQUEUE_DELAY_PERCENTAGE,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_MERGE_SORT_INTERMEDIATE_FILES_COUNT,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_MERGE_SORT_EAGER_MERGES_RUNS,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_MERGE_SORT_FINAL_MERGE_FILES_COUNT,
+                PipelinedMetrics.OAK_INDEXER_PIPELINED_MERGE_SORT_FINAL_MERGE_TIME
+        ), metricsNames);
+
+        String pipelinedMetrics = statsProvider.getRegistry()
+                .getCounters()
+                .entrySet().stream()
+                .map(e -> e.getKey() + " " + e.getValue().getCount())
+                .collect(Collectors.joining("\n"));
+        LOG.info("Metrics\n{}", pipelinedMetrics);
     }
 
     @Test
@@ -281,6 +346,7 @@
         File file = pipelinedStrategy.createSortedStoreFile();
         assertTrue(file.exists());
         assertArrayEquals(expected.toArray(new String[0]), Files.readAllLines(file.toPath()).toArray(new String[0]));
+        assertMetrics();
     }
 
 
@@ -317,7 +383,8 @@
                 Compression.NONE,
                 pathPredicate,
                 pathFilters,
-                null);
+                null,
+                statsProvider);
     }
 
     private void createContent(NodeStore rwNodeStore) throws CommitFailedException {
diff --git a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMergeSortTaskTest.java b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMergeSortTaskTest.java
index 1eda6af..1dbc106 100644
--- a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMergeSortTaskTest.java
+++ b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMergeSortTaskTest.java
@@ -19,6 +19,9 @@
 package org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined;
 
 import org.apache.jackrabbit.oak.commons.Compression;
+import org.apache.jackrabbit.oak.plugins.metric.MetricStatisticsProvider;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 import java.io.BufferedWriter;
@@ -29,10 +32,17 @@
 import java.nio.file.Paths;
 import java.util.Collections;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
+import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedMetrics.OAK_INDEXER_PIPELINED_MERGE_SORT_EAGER_MERGES_RUNS;
+import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedMetrics.OAK_INDEXER_PIPELINED_MERGE_SORT_FINAL_MERGE_FILES_COUNT;
+import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedMetrics.OAK_INDEXER_PIPELINED_MERGE_SORT_FINAL_MERGE_TIME;
+import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedMetrics.OAK_INDEXER_PIPELINED_MERGE_SORT_INTERMEDIATE_FILES_COUNT;
 import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedStrategy.FLATFILESTORE_CHARSET;
 import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedStrategy.SENTINEL_SORTED_FILES_QUEUE;
 import static org.junit.Assert.assertArrayEquals;
@@ -40,8 +50,19 @@
 import static org.junit.Assert.assertTrue;
 
 public class PipelinedMergeSortTaskTest extends PipelinedMergeSortTaskTestBase {
-    private final ClassLoader classLoader = getClass().getClassLoader();
-    private final Compression algorithm = Compression.NONE;
+    private static ScheduledExecutorService metricsExecutor;
+    private static final ClassLoader classLoader = PipelinedMergeSortTaskTest.class.getClassLoader();
+    private static final Compression algorithm = Compression.NONE;
+
+    @BeforeClass
+    public static void init() {
+        metricsExecutor = Executors.newSingleThreadScheduledExecutor();
+    }
+
+    @AfterClass
+    public static void shutdown() {
+        metricsExecutor.shutdown();
+    }
 
     @Test
     public void noFileToMerge() throws Exception {
@@ -89,27 +110,40 @@
         Path sortRoot = sortFolder.getRoot().toPath();
         // +1 for the Sentinel.
         ArrayBlockingQueue<Path> sortedFilesQueue = new ArrayBlockingQueue<>(files.length + 1);
-        PipelinedMergeSortTask mergeSortTask = new PipelinedMergeSortTask(sortRoot, pathComparator, algorithm, sortedFilesQueue);
-        // Enqueue all the files that are to be merged
-        for (Path file : files) {
-            // The intermediate files are deleted after being merged, so we should copy them to the temporary sort root folder
-            Path workDirCopy = Files.copy(file, sortRoot.resolve(file.getFileName()));
-            sortedFilesQueue.put(workDirCopy);
-        }
-        // Signal end of files to merge
-        sortedFilesQueue.put(SENTINEL_SORTED_FILES_QUEUE);
-        // Run the merge task
-        PipelinedMergeSortTask.Result result = mergeSortTask.call();
+        try (MetricStatisticsProvider metricStatisticsProvider = new MetricStatisticsProvider(null, metricsExecutor)) {
+            PipelinedMergeSortTask mergeSortTask = new PipelinedMergeSortTask(sortRoot,
+                    pathComparator,
+                    algorithm,
+                    sortedFilesQueue,
+                    metricStatisticsProvider);
+            // Enqueue all the files that are to be merged
+            for (Path file : files) {
+                // The intermediate files are deleted after being merged, so we should copy them to the temporary sort root folder
+                Path workDirCopy = Files.copy(file, sortRoot.resolve(file.getFileName()));
+                sortedFilesQueue.put(workDirCopy);
+            }
+            // Signal end of files to merge
+            sortedFilesQueue.put(SENTINEL_SORTED_FILES_QUEUE);
+            // Run the merge task
+            PipelinedMergeSortTask.Result result = mergeSortTask.call();
 
-        try (Stream<Path> fileStream = Files.list(sortRoot)) {
-            List<String> filesInWorkDir = fileStream
-                    .map(path -> path.getFileName().toString())
-                    .collect(Collectors.toList());
-            assertEquals("The sort work directory should contain only the flat file store, the intermediate files should have been deleted after merged. Instead it contains: " + filesInWorkDir,
-                    1, filesInWorkDir.size());
+            try (Stream<Path> fileStream = Files.list(sortRoot)) {
+                List<String> filesInWorkDir = fileStream
+                        .map(path -> path.getFileName().toString())
+                        .collect(Collectors.toList());
+                assertEquals("The sort work directory should contain only the flat file store, the intermediate files should have been deleted after merged. Instead it contains: " + filesInWorkDir,
+                        1, filesInWorkDir.size());
+            }
+            assertTrue(Files.exists(result.getFlatFileStoreFile()));
+            Set<String> metricNames = metricStatisticsProvider.getRegistry().getCounters().keySet();
+            assertEquals(metricNames, Set.of(
+                    OAK_INDEXER_PIPELINED_MERGE_SORT_FINAL_MERGE_TIME,
+                    OAK_INDEXER_PIPELINED_MERGE_SORT_INTERMEDIATE_FILES_COUNT,
+                    OAK_INDEXER_PIPELINED_MERGE_SORT_EAGER_MERGES_RUNS,
+                    OAK_INDEXER_PIPELINED_MERGE_SORT_FINAL_MERGE_FILES_COUNT
+            ));
+            return result;
         }
-        assertTrue(Files.exists(result.getFlatFileStoreFile()));
-        return result;
     }
 
     @Test(expected = IllegalStateException.class)
diff --git a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMergeSortTaskTestBase.java b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMergeSortTaskTestBase.java
index b135ccf..eada531 100644
--- a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMergeSortTaskTestBase.java
+++ b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMergeSortTaskTestBase.java
@@ -19,6 +19,7 @@
 package org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined;
 
 import org.apache.jackrabbit.oak.commons.Compression;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
 import org.junit.Rule;
 import org.junit.contrib.java.lang.system.RestoreSystemProperties;
 import org.junit.rules.TemporaryFolder;
@@ -56,11 +57,10 @@
     protected List<NodeStateHolder> sortAsNodeStateEntries(List<String> ffsLines) {
         Comparator<NodeStateHolder> comparatorBinary = (e1, e2) -> pathComparator.compare(e1.getPathElements(), e2.getPathElements());
         NodeStateHolderFactory nodeFactory = new NodeStateHolderFactory();
-        List<NodeStateHolder> nodesOrdered = ffsLines.stream()
+        return ffsLines.stream()
                 .map(ffsLine -> nodeFactory.apply(ffsLine.getBytes(FLATFILESTORE_CHARSET)))
                 .sorted(comparatorBinary)
                 .collect(Collectors.toList());
-        return nodesOrdered;
     }
 
 
@@ -108,7 +108,12 @@
         Path sortRoot = sortFolder.getRoot().toPath();
         // +1 for the Sentinel.
         ArrayBlockingQueue<Path> sortedFilesQueue = new ArrayBlockingQueue<>(files.length + 1);
-        PipelinedMergeSortTask mergeSortTask = new PipelinedMergeSortTask(sortRoot, pathComparator, algorithm, sortedFilesQueue);
+        PipelinedMergeSortTask mergeSortTask = new PipelinedMergeSortTask(
+                sortRoot,
+                pathComparator,
+                algorithm,
+                sortedFilesQueue,
+                StatisticsProvider.NOOP);
         // Enqueue all the files that are to be merged
         for (Path file : files) {
             sortedFilesQueue.put(file);
diff --git a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMongoDownloadTaskTest.java b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMongoDownloadTaskTest.java
index f1d082b..82e9f6d 100644
--- a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMongoDownloadTaskTest.java
+++ b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedMongoDownloadTaskTest.java
@@ -28,6 +28,7 @@
 import org.apache.jackrabbit.oak.plugins.document.DocumentStore;
 import org.apache.jackrabbit.oak.plugins.document.NodeDocument;
 import org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore;
+import org.apache.jackrabbit.oak.plugins.metric.MetricStatisticsProvider;
 import org.apache.jackrabbit.oak.spi.filter.PathFilter;
 import org.bson.BsonDocument;
 import org.bson.conversions.Bson;
@@ -36,8 +37,11 @@
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
 import java.util.stream.Collectors;
 
 import static org.junit.Assert.assertEquals;
@@ -100,17 +104,29 @@
         BlockingQueue<NodeDocument[]> queue = new ArrayBlockingQueue<>(100);
         MongoDocumentStore mongoDocumentStore = mock(MongoDocumentStore.class);
 
-        PipelinedMongoDownloadTask task = new PipelinedMongoDownloadTask(mongoDatabase, mongoDocumentStore, batchMaxMemorySize, batchMaxElements, queue, null);
+        ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
+        try {
+            try (MetricStatisticsProvider metricStatisticsProvider = new MetricStatisticsProvider(null, executor)) {
+                PipelinedMongoDownloadTask task = new PipelinedMongoDownloadTask(mongoDatabase, mongoDocumentStore,
+                        batchMaxMemorySize, batchMaxElements, queue, null,
+                        metricStatisticsProvider);
 
-        // Execute
-        PipelinedMongoDownloadTask.Result result = task.call();
+                // Execute
+                PipelinedMongoDownloadTask.Result result = task.call();
 
-        // Verify results
-        assertEquals(documents.size(), result.getDocumentsDownloaded());
-        ArrayList<NodeDocument[]> c = new ArrayList<>();
-        queue.drainTo(c);
-        List<NodeDocument> actualDocuments = c.stream().flatMap(Arrays::stream).collect(Collectors.toList());
-        assertEquals(documents, actualDocuments);
+                // Verify results
+                assertEquals(documents.size(), result.getDocumentsDownloaded());
+                ArrayList<NodeDocument[]> c = new ArrayList<>();
+                queue.drainTo(c);
+                List<NodeDocument> actualDocuments = c.stream().flatMap(Arrays::stream).collect(Collectors.toList());
+                assertEquals(documents, actualDocuments);
+
+                Set<String> metricNames = metricStatisticsProvider.getRegistry().getCounters().keySet();
+                assertEquals(metricNames, Set.of(PipelinedMetrics.OAK_INDEXER_PIPELINED_MONGO_DOWNLOAD_ENQUEUE_DELAY_PERCENTAGE));
+            }
+        } finally {
+            executor.shutdown();
+        }
 
         verify(dbCollection).find(BsonDocument.parse("{\"_modified\": {\"$gte\": 0}}"));
         verify(dbCollection).find(BsonDocument.parse("{\"_modified\": {\"$gte\": 123000, \"$lt\": 123001}, \"_id\": {\"$gt\": \"3:/content/dam/asset1\"}}"));
diff --git a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedSortBatchTaskTest.java b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedSortBatchTaskTest.java
index 892b7a3..2e88536 100644
--- a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedSortBatchTaskTest.java
+++ b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedSortBatchTaskTest.java
@@ -19,18 +19,13 @@
 package org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined;
 
 import org.apache.jackrabbit.oak.commons.Compression;
-import org.apache.jackrabbit.oak.index.indexer.document.flatfile.NodeStateEntryWriter;
-import org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
+import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.util.Set;
@@ -65,7 +60,6 @@
 
     private final PathElementComparator pathComparator = new PathElementComparator(Set.of());
     private final Compression algorithm = Compression.NONE;
-    private final NodeStateEntryWriter nodeStateEntryWriter = new NodeStateEntryWriter(new MemoryBlobStore());
 
     @Test
     public void noBatch() throws Exception {
@@ -81,6 +75,7 @@
     @Test
     public void emptyBatch() throws Exception {
         NodeStateEntryBatch batch = NodeStateEntryBatch.createNodeStateEntryBatch(NodeStateEntryBatch.MIN_BUFFER_SIZE, 10);
+        batch.flip();
 
         TestResult testResult = runTest(batch);
 
@@ -99,6 +94,7 @@
         addEntry(batch, "/a0/b1", "{\"key\":5}");
         addEntry(batch, "/a0/b0/c1", "{\"key\":4}");
         addEntry(batch, "/a0/b0/c0", "{\"key\":3}");
+        batch.flip();
 
         TestResult testResult = runTest(batch);
 
@@ -125,11 +121,13 @@
         addEntry(batch1, "/a0/b0", "{\"key\":2}");
         addEntry(batch1, "/a0", "{\"key\":1}");
         addEntry(batch1, "/a1/b0", "{\"key\":6}");
+        batch1.flip();
 
         NodeStateEntryBatch batch2 = NodeStateEntryBatch.createNodeStateEntryBatch(NodeStateEntryBatch.MIN_BUFFER_SIZE, 10);
         addEntry(batch2, "/a0/b1", "{\"key\":5}");
         addEntry(batch2, "/a0/b0/c1", "{\"key\":4}");
         addEntry(batch2, "/a0/b0/c0", "{\"key\":3}");
+        batch2.flip();
 
         TestResult testResult = runTest(batch1, batch2);
 
@@ -154,12 +152,8 @@
         );
     }
 
-    private void addEntry(NodeStateEntryBatch batch, String path, String entry) throws IOException {
-        ByteArrayOutputStream baos = new ByteArrayOutputStream();
-        Writer writer = new OutputStreamWriter(baos);
-        nodeStateEntryWriter.writeTo(writer, path, entry);
-        writer.close();
-        batch.addEntry(path, baos.toByteArray());
+    private void addEntry(NodeStateEntryBatch batch, String path, String entry) {
+        batch.addEntry(path, entry.getBytes(StandardCharsets.UTF_8));
     }
 
     private TestResult runTest(NodeStateEntryBatch... nodeStateEntryBatches) throws Exception {
diff --git a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedUtilsTest.java b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedUtilsTest.java
new file mode 100644
index 0000000..6749f0e
--- /dev/null
+++ b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/flatfile/pipelined/PipelinedUtilsTest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined;
+
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedUtils.formatAsPercentage;
+import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedUtils.formatAsTransferSpeedMBs;
+import static org.apache.jackrabbit.oak.index.indexer.document.flatfile.pipelined.PipelinedUtils.toPercentage;
+import static org.junit.Assert.assertEquals;
+
+public class PipelinedUtilsTest {
+    @Test
+    public void testFormatAsPercentage() {
+        assertEquals("0.00", formatAsPercentage(0, 100));
+        assertEquals("1.00", formatAsPercentage(1, 100));
+        assertEquals("0.10", formatAsPercentage(1, 1000));
+        assertEquals("0.01", formatAsPercentage(1, 10_000));
+        assertEquals("N/A", formatAsPercentage(1, 0));
+        assertEquals("100.00", formatAsPercentage(100, 100));
+        assertEquals("120.00", formatAsPercentage(120, 100));
+        assertEquals("314.16", formatAsPercentage(355, 113));
+    }
+
+    @Test
+    public void testFormatAsTransferSpeedMBs() {
+        assertEquals("0.95 MB/s", formatAsTransferSpeedMBs(1_000_000, TimeUnit.SECONDS.toMillis(1)));
+        assertEquals("0.00 MB/s", formatAsTransferSpeedMBs(0, TimeUnit.SECONDS.toMillis(1)));
+        assertEquals("0.00 MB/s", formatAsTransferSpeedMBs(1, TimeUnit.SECONDS.toMillis(1)));
+        assertEquals("N/A", formatAsTransferSpeedMBs(1_000_000, TimeUnit.SECONDS.toMillis(0)));
+        assertEquals("8796093022208.00 MB/s", formatAsTransferSpeedMBs(Long.MAX_VALUE, TimeUnit.SECONDS.toMillis(1)));
+        assertEquals("-8796093022208.00 MB/s", formatAsTransferSpeedMBs(Long.MIN_VALUE, TimeUnit.SECONDS.toMillis(1)));
+        assertEquals("0.00 MB/s", formatAsTransferSpeedMBs(1_000_000, Long.MAX_VALUE));
+        assertEquals("-0.00 MB/s", formatAsTransferSpeedMBs(1_000_000, Long.MIN_VALUE));
+    }
+
+    @Test
+    public void testToPercentage() {
+        assertEquals(0, toPercentage (0, 100));
+        assertEquals(1, toPercentage(1, 100));
+        assertEquals(0, toPercentage(1, 1000));
+        assertEquals(0, toPercentage(1, 10_000));
+        assertEquals(-1, toPercentage(1, 0));
+        assertEquals(100, toPercentage(100, 100));
+        assertEquals(120, toPercentage(120, 100));
+        assertEquals(314, toPercentage(355, 113));
+    }
+}
\ No newline at end of file
diff --git a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/incrementalstore/IncrementalStoreIT.java b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/incrementalstore/IncrementalStoreIT.java
index a6902e5..65efaf0 100644
--- a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/incrementalstore/IncrementalStoreIT.java
+++ b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/incrementalstore/IncrementalStoreIT.java
@@ -49,6 +49,7 @@
 import org.apache.jackrabbit.oak.spi.filter.PathFilter;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
 import org.apache.jackrabbit.oak.spi.state.NodeStore;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
 import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Assume;
@@ -309,7 +310,8 @@
                 algorithm,
                 pathPredicate,
                 pathFilters,
-                checkpoint);
+                checkpoint,
+                StatisticsProvider.NOOP);
     }
 
     private IncrementalFlatFileStoreStrategy createIncrementalStrategy(Backend backend,
diff --git a/oak-run-elastic/pom.xml b/oak-run-elastic/pom.xml
index e7e12b7..316b9b4 100644
--- a/oak-run-elastic/pom.xml
+++ b/oak-run-elastic/pom.xml
@@ -37,8 +37,9 @@
         105 MB : Setting constraint to default oak-run jar post adding the build plugin to rename the fat jar with embedded dependencies as the default jar.
         121 MB : add Elasticsearch Java client along with RHLC: the latter can be removed when the code can be fully migrated to use the new client
         125 MB : shaded Guava
+        85 MB : remove Elasticsearch RHLC
         -->
-        <max.jar.size>125000000</max.jar.size>
+        <max.jar.size>85000000</max.jar.size>
                       
     </properties>
 
diff --git a/oak-run-elastic/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/ElasticIndexerTest.java b/oak-run-elastic/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/ElasticIndexerTest.java
index 2b5270b..e447aa8 100644
--- a/oak-run-elastic/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/ElasticIndexerTest.java
+++ b/oak-run-elastic/src/test/java/org/apache/jackrabbit/oak/index/indexer/document/ElasticIndexerTest.java
@@ -18,6 +18,10 @@
  */
 package org.apache.jackrabbit.oak.index.indexer.document;
 
+import co.elastic.clients.elasticsearch.ElasticsearchAsyncClient;
+import co.elastic.clients.json.JsonpMapper;
+import jakarta.json.spi.JsonProvider;
+import jakarta.json.stream.JsonGenerator;
 import org.apache.jackrabbit.oak.index.IndexHelper;
 import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticConnection;
 import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticIndexDefinition;
@@ -34,17 +38,19 @@
 import org.apache.jackrabbit.oak.spi.state.NodeState;
 import org.junit.Test;
 
+import java.io.OutputStream;
+
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.mock;
 
 import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
 import static org.apache.jackrabbit.oak.InitialContentHelper.INITIAL_CONTENT;
+import static org.mockito.Mockito.when;
 
 public class ElasticIndexerTest {
 
-    private NodeState root = INITIAL_CONTENT;
-
     @Test
     public void nodeIndexed_WithIncludedPaths() throws Exception {
         ElasticIndexDefinitionBuilder idxb = new ElasticIndexDefinitionBuilder();
@@ -52,11 +58,20 @@
         idxb.includedPaths("/content");
 
         NodeState defn = idxb.build();
-        IndexDefinition idxDefn = new ElasticIndexDefinition(root, defn, "/oak:index/testIndex", "testPrefix");
+        IndexDefinition idxDefn = new ElasticIndexDefinition(INITIAL_CONTENT, defn, "/oak:index/testIndex", "testPrefix");
 
-        NodeBuilder builder = root.builder();
+        NodeBuilder builder = INITIAL_CONTENT.builder();
 
-        FulltextIndexWriter indexWriter = new ElasticIndexWriterFactory(mock(ElasticConnection.class),
+        ElasticConnection elasticConnectionMock = mock(ElasticConnection.class);
+        ElasticsearchAsyncClient elasticsearchAsyncClientMock = mock(ElasticsearchAsyncClient.class);
+        JsonpMapper jsonMapperMock = mock(JsonpMapper.class);
+        JsonProvider jsonProviderMock = mock(JsonProvider.class);
+        when(jsonProviderMock.createGenerator(any(OutputStream.class))).thenReturn(mock(JsonGenerator.class));
+        when(jsonMapperMock.jsonProvider()).thenReturn(jsonProviderMock);
+        when(elasticsearchAsyncClientMock._jsonpMapper()).thenReturn(jsonMapperMock);
+        when(elasticConnectionMock.getAsyncClient()).thenReturn(elasticsearchAsyncClientMock);
+
+        FulltextIndexWriter indexWriter = new ElasticIndexWriterFactory(elasticConnectionMock,
                 mock(ElasticIndexTracker.class)).newInstance(idxDefn, defn.builder(), CommitInfo.EMPTY, false);
         ElasticIndexer indexer = new ElasticIndexer(idxDefn, mock(FulltextBinaryTextExtractor.class), builder,
                 mock(IndexingProgressReporter.class), indexWriter, mock(ElasticIndexEditorProvider.class), mock(IndexHelper.class));
diff --git a/oak-run-elastic/src/test/java/org/apache/jackrabbit/oak/indexversion/ElasticPurgeOldIndexVersionTest.java b/oak-run-elastic/src/test/java/org/apache/jackrabbit/oak/indexversion/ElasticPurgeOldIndexVersionTest.java
index d7bd3b6..6922d32 100644
--- a/oak-run-elastic/src/test/java/org/apache/jackrabbit/oak/indexversion/ElasticPurgeOldIndexVersionTest.java
+++ b/oak-run-elastic/src/test/java/org/apache/jackrabbit/oak/indexversion/ElasticPurgeOldIndexVersionTest.java
@@ -46,13 +46,12 @@
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 
 import static org.apache.jackrabbit.commons.JcrUtils.getOrCreateByPath;
 import static org.hamcrest.CoreMatchers.containsString;
-import static org.junit.Assert.assertThat;
+import static org.hamcrest.MatcherAssert.assertThat;
 
 public class ElasticPurgeOldIndexVersionTest extends ElasticAbstractIndexCommandTest {
 
diff --git a/oak-run/src/test/java/org/apache/jackrabbit/oak/index/DocumentStoreIndexerIT.java b/oak-run/src/test/java/org/apache/jackrabbit/oak/index/DocumentStoreIndexerIT.java
index 2b632d6..b895e6c 100644
--- a/oak-run/src/test/java/org/apache/jackrabbit/oak/index/DocumentStoreIndexerIT.java
+++ b/oak-run/src/test/java/org/apache/jackrabbit/oak/index/DocumentStoreIndexerIT.java
@@ -19,12 +19,14 @@
 
 package org.apache.jackrabbit.oak.index;
 
+import com.codahale.metrics.Counter;
 import com.mongodb.client.MongoDatabase;
 import org.apache.jackrabbit.guava.common.collect.Iterators;
 import org.apache.jackrabbit.oak.InitialContent;
 import org.apache.jackrabbit.oak.api.CommitFailedException;
 import org.apache.jackrabbit.oak.index.indexer.document.CompositeIndexer;
 import org.apache.jackrabbit.oak.index.indexer.document.DocumentStoreIndexer;
+import org.apache.jackrabbit.oak.index.indexer.document.DocumentStoreIndexerBase;
 import org.apache.jackrabbit.oak.index.indexer.document.IndexerConfiguration;
 import org.apache.jackrabbit.oak.index.indexer.document.NodeStateEntry;
 import org.apache.jackrabbit.oak.index.indexer.document.NodeStateIndexer;
@@ -47,6 +49,7 @@
 import org.apache.jackrabbit.oak.plugins.index.lucene.directory.LocalIndexDir;
 import org.apache.jackrabbit.oak.plugins.index.lucene.util.LuceneIndexDefinitionBuilder;
 import org.apache.jackrabbit.oak.plugins.index.progress.IndexingProgressReporter;
+import org.apache.jackrabbit.oak.plugins.metric.MetricStatisticsProvider;
 import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
 import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
@@ -80,6 +83,9 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.Set;
+import java.util.SortedMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Predicate;
 
@@ -153,6 +159,7 @@
                 "--doc-traversal-mode",
                 "--checkpoint=" + checkpoint,
                 "--reindex",
+                "--metrics",
                 "--", // -- indicates that options have ended and rest needs to be treated as non option
                 MongoUtils.URL
         };
@@ -381,6 +388,98 @@
     }
 
     @Test
+    public void metrics() throws Exception {
+        MongoConnection mongoConnection = getConnection();
+        DocumentNodeStoreBuilder<?> docBuilder = builderProvider.newBuilder()
+                .setMongoDB(mongoConnection.getMongoClient(), mongoConnection.getDBName());
+        DocumentNodeStore store = docBuilder.build();
+
+        Whiteboard wb = new DefaultWhiteboard();
+        MongoDocumentStore ds = (MongoDocumentStore) docBuilder.getDocumentStore();
+        Registration r1 = wb.register(MongoDocumentStore.class, ds, emptyMap());
+
+        ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
+        try {
+            MetricStatisticsProvider metricsStatisticsProvider = new MetricStatisticsProvider(null, executor);
+            wb.register(StatisticsProvider.class, metricsStatisticsProvider, emptyMap());
+            Registration c1Registration = wb.register(MongoDatabase.class, mongoConnection.getDatabase(), emptyMap());
+
+            configureIndex(store);
+
+            NodeBuilder builder = store.getRoot().builder();
+            NodeBuilder appNB = newNode("app:Asset");
+            createChild(appNB,
+                    "jcr:content",
+                    "jcr:content/comments",
+                    "jcr:content/metadata",
+                    "jcr:content/metadata/xmp",
+                    "jcr:content/renditions",
+                    "jcr:content/renditions/original",
+                    "jcr:content/renditions/original/jcr:content"
+            );
+            builder.child("test").setChildNode("book.jpg", appNB.getNodeState());
+            store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+
+            String checkpoint = store.checkpoint(100000);
+
+            //Shut down this store and restart in readOnly mode
+            store.dispose();
+            r1.unregister();
+            c1Registration.unregister();
+
+            MongoConnection c2 = connectionFactory.getConnection();
+            DocumentNodeStoreBuilder<?> docBuilderRO = builderProvider.newBuilder().setReadOnlyMode()
+                    .setMongoDB(c2.getMongoClient(), c2.getDBName());
+            ds = (MongoDocumentStore) docBuilderRO.getDocumentStore();
+            store = docBuilderRO.build();
+            wb.register(MongoDocumentStore.class, ds, emptyMap());
+            wb.register(MongoDatabase.class, c2.getDatabase(), emptyMap());
+
+            ExtendedIndexHelper helper = new ExtendedIndexHelper(store, store.getBlobStore(), wb, temporaryFolder.newFolder(),
+                    temporaryFolder.newFolder(), List.of(TEST_INDEX_PATH));
+            IndexerSupport support = new IndexerSupport(helper, checkpoint);
+
+            CollectingIndexer testIndexer = new CollectingIndexer(p -> p.startsWith("/test"));
+            DocumentStoreIndexer index = new DocumentStoreIndexer(helper, support) {
+                @Override
+                protected CompositeIndexer prepareIndexers(NodeStore nodeStore, NodeBuilder builder,
+                                                           IndexingProgressReporter progressReporter) {
+                    return new CompositeIndexer(List.of(testIndexer));
+                }
+            };
+
+            index.reindex();
+
+            assertThat(testIndexer.paths, containsInAnyOrder(
+                    "/test",
+                    "/test/book.jpg",
+                    "/test/book.jpg/jcr:content",
+                    "/test/book.jpg/jcr:content/comments",
+                    "/test/book.jpg/jcr:content/metadata",
+                    "/test/book.jpg/jcr:content/metadata/xmp",
+                    "/test/book.jpg/jcr:content/renditions",
+                    "/test/book.jpg/jcr:content/renditions/original",
+                    "/test/book.jpg/jcr:content/renditions/original/jcr:content"
+            ));
+
+            store.dispose();
+
+            SortedMap<String, Counter> counters = metricsStatisticsProvider.getRegistry().getCounters();
+            assertMetric(counters, DocumentStoreIndexerBase.METRIC_INDEXING_DURATION_SECONDS);
+            assertMetric(counters, DocumentStoreIndexerBase.METRIC_MERGE_NODE_STORE_DURATION_SECONDS);
+            assertMetric(counters, DocumentStoreIndexerBase.METRIC_FULL_INDEX_CREATION_DURATION_SECONDS);
+        } finally {
+            executor.shutdown();
+        }
+    }
+
+    private void assertMetric(SortedMap<String, Counter> counters, String metricName) {
+        Counter counter = counters.get(metricName);
+        assertNotNull(counter);
+        LOG.info("{} {}", metricName, counter.getCount());
+    }
+
+    @Test
     @Ignore("OAK-10495")
     public void testParallelIndexing() throws Exception {
         System.setProperty(IndexerConfiguration.PROP_OAK_INDEXER_PARALLEL_INDEX, "true");
diff --git a/oak-search-elastic/pom.xml b/oak-search-elastic/pom.xml
index fd19bfd..981419b 100644
--- a/oak-search-elastic/pom.xml
+++ b/oak-search-elastic/pom.xml
@@ -33,8 +33,8 @@
   <description>Oak Elasticsearch integration subproject</description>
 
   <properties>
-    <elasticsearch.hlrc.version>7.17.13</elasticsearch.hlrc.version>
-    <elasticsearch.java.client.version>8.7.1</elasticsearch.java.client.version>
+    <elasticsearch.java.client.version>8.11.1</elasticsearch.java.client.version>
+    <lucene.version>9.8.0</lucene.version>
   </properties>
 
   <build>
@@ -48,29 +48,20 @@
               org.apache.jackrabbit.oak.plugins.index.elastic.ElasticIndexProviderService
             </_exportcontents>
             <Import-Package>
+              !io.opentelemetry.sdk.autoconfigure.*,
               !jdk.net.*,
-              !org.apache.avalon.framework.logger.*,
               !org.apache.log.*,
               !org.apache.logging.*,
-              !org.elasticsearch.geometry.*,
-              !org.joda.convert.*,
-              !org.locationtech.jts.geom.*,
-              !org.locationtech.spatial4j.*,
               !sun.misc.*,
-              !org.apache.lucene.search.similarity.*,
               !org.apache.lucene.analysis.*,
               !org.glassfish.json.*,
               !com.sun.management.*,
-              !joptsimple.*,
-              !org.apache.lucene.search.suggest.document.*,
               !jakarta.json.bind.*,
-              !com.carrotsearch.randomizedtesting.*,
-              !org.yaml.snakeyaml.*,
               *
             </Import-Package>
             <Embed-Dependency>
               oak-search;scope=compile|runtime;inline=true,
-              elasticsearch;groupId=org.elasticsearch,
+              elasticsearch-java;groupId=co.elastic.clients,
               *;scope=compile|runtime
             </Embed-Dependency>
             <Embed-Transitive>true</Embed-Transitive>
@@ -122,16 +113,6 @@
     </dependency>
 
     <!-- Elastic/Lucene -->
-    <dependency>
-      <groupId>org.elasticsearch.client</groupId>
-      <artifactId>elasticsearch-rest-high-level-client</artifactId>
-      <version>${elasticsearch.hlrc.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.elasticsearch.client</groupId>
-      <artifactId>elasticsearch-rest-client</artifactId>
-      <version>${elasticsearch.hlrc.version}</version>
-    </dependency>
 	<dependency>
       <groupId>co.elastic.clients</groupId>
       <artifactId>elasticsearch-java</artifactId>
@@ -143,72 +124,14 @@
       <version>${jackson.version}</version>
     </dependency>
     <dependency>
-      <groupId>org.elasticsearch</groupId>
-      <artifactId>elasticsearch</artifactId>
-      <version>${elasticsearch.hlrc.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>org.elasticsearch</groupId>
-          <artifactId>elasticsearch-cli</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.elasticsearch</groupId>
-          <artifactId>elasticsearch-geo</artifactId>
-        </exclusion>
-
-        <!-- https://github.com/elastic/elasticsearch/issues/29184#issuecomment-662480046 -->
-        <exclusion>
-          <groupId>org.apache.lucene</groupId>
-          <artifactId>lucene-backward-codecs</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.lucene</groupId>
-          <artifactId>lucene-grouping</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.lucene</groupId>
-          <artifactId>lucene-memory</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.lucene</groupId>
-          <artifactId>lucene-misc</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.lucene</groupId>
-          <artifactId>lucene-queryparser</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.lucene</groupId>
-          <artifactId>lucene-sandbox</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.lucene</groupId>
-          <artifactId>lucene-spatial</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.lucene</groupId>
-          <artifactId>lucene-spatial-extras</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.lucene</groupId>
-          <artifactId>lucene-spatial3d</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.lucene</groupId>
-          <artifactId>lucene-suggest</artifactId>
-        </exclusion>
-        <!--
-          elasticsearch-xcontent depends on snakeyaml 1.33 which is vulnerable to remote code execution
-          (https://nvd.nist.gov/vuln/detail/CVE-2022-1471)
-          elasticsearch-xcontent is used in this module but only with the json parser which does not use snakeyaml
-          so we can safely exclude it.
-          This exclusion, like the others above, can be removed once we will remove the elasticsearch high level rest client
-        -->
-        <exclusion>
-          <groupId>org.yaml</groupId>
-          <artifactId>snakeyaml</artifactId>
-        </exclusion>
-      </exclusions>
+      <groupId>org.apache.lucene</groupId>
+      <artifactId>lucene-core</artifactId>
+      <version>${lucene.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.lucene</groupId>
+      <artifactId>lucene-analysis-common</artifactId>
+      <version>${lucene.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.tika</groupId>
diff --git a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticConnection.java b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticConnection.java
index a677a27..66e3544 100644
--- a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticConnection.java
+++ b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticConnection.java
@@ -23,8 +23,6 @@
 import org.apache.http.message.BasicHeader;
 import org.elasticsearch.client.RestClient;
 import org.elasticsearch.client.RestClientBuilder;
-import org.elasticsearch.client.RestHighLevelClient;
-import org.elasticsearch.client.RestHighLevelClientBuilder;
 import org.jetbrains.annotations.NotNull;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -123,14 +121,12 @@
                             requestConfigBuilder -> requestConfigBuilder.setSocketTimeout(ES_SOCKET_TIMEOUT));
 
                     RestClient httpClient = builder.build();
-                    RestHighLevelClient hlClient = new RestHighLevelClientBuilder(httpClient)
-                            .setApiCompatibilityMode(true).build();
 
                     ElasticsearchTransport transport = new RestClientTransport(
                             httpClient, new JacksonJsonpMapper());
                     ElasticsearchClient esClient = new ElasticsearchClient(transport);
                     ElasticsearchAsyncClient esAsyncClient = new ElasticsearchAsyncClient(transport);
-                    clients = new Clients(esClient, esAsyncClient, hlClient);
+                    clients = new Clients(esClient, esAsyncClient);
                 }
             }
         }
@@ -153,14 +149,6 @@
         return getClients().asyncClient;
     }
 
-    /**
-     * @deprecated
-     * @return the old Elasticsearch client
-     */
-    public RestHighLevelClient getOldClient() {
-        return getClients().rhlClient;
-    }
-
     public String getIndexPrefix() {
         return indexPrefix;
     }
@@ -187,9 +175,6 @@
                 // standard client
                 clients.client._transport().close();
             }
-            if (clients.rhlClient != null) {
-                clients.rhlClient.close();
-            }
         }
         isClosed.set(true);
     }
@@ -217,12 +202,10 @@
     private static class Clients {
         public final ElasticsearchClient client;
         public final ElasticsearchAsyncClient asyncClient;
-        public final RestHighLevelClient rhlClient;
 
-        Clients(ElasticsearchClient client, ElasticsearchAsyncClient asyncClient, RestHighLevelClient rhlClient) {
+        Clients(ElasticsearchClient client, ElasticsearchAsyncClient asyncClient) {
             this.client = client;
             this.asyncClient = asyncClient;
-            this.rhlClient = rhlClient;
         }
     }
 
@@ -271,7 +254,7 @@
         /**
          * This is the final step in charge of building the {@link ElasticConnection}.
          * Validation should be here.
-         *
+         * <p>
          * It adds support for {@link OptionalSteps}.
          */
         public interface BuildStep extends OptionalSteps {
diff --git a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexDefinition.java b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexDefinition.java
index 70a9757..82d0374 100644
--- a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexDefinition.java
+++ b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticIndexDefinition.java
@@ -45,17 +45,11 @@
     public static final int BULK_ACTIONS_DEFAULT = 250;
 
     public static final String BULK_SIZE_BYTES = "bulkSizeBytes";
-    public static final long BULK_SIZE_BYTES_DEFAULT = 1 * 1024 * 1024; // 1MB
+    public static final long BULK_SIZE_BYTES_DEFAULT = 1024 * 1024; // 1MB
 
     public static final String BULK_FLUSH_INTERVAL_MS = "bulkFlushIntervalMs";
     public static final long BULK_FLUSH_INTERVAL_MS_DEFAULT = 3000;
 
-    public static final String BULK_RETRIES = "bulkRetries";
-    public static final int BULK_RETRIES_DEFAULT = 3;
-
-    public static final String BULK_RETRIES_BACKOFF = "bulkRetriesBackoff";
-    public static final long BULK_RETRIES_BACKOFF_DEFAULT = 200;
-
     public static final String NUMBER_OF_SHARDS = "numberOfShards";
     public static final int NUMBER_OF_SHARDS_DEFAULT = 1;
 
@@ -82,6 +76,12 @@
     public static final boolean FAIL_ON_ERROR_DEFAULT = true;
 
     /**
+     * When 0, the index name gets dynamically generated by adding a random suffix to the index name.
+     */
+    public static final String INDEX_NAME_SEED = "indexNameSeed";
+    public static final long INDEX_NAME_SEED_DEFAULT = 0L;
+
+    /**
      * Hidden property for storing a seed value to be used as suffix in remote index name.
      */
     public static final String PROP_INDEX_NAME_SEED = ":nameSeed";
@@ -133,8 +133,6 @@
     public final int bulkActions;
     public final long bulkSizeBytes;
     public final long bulkFlushIntervalMs;
-    public final int bulkRetries;
-    public final long bulkRetriesBackoff;
     private final boolean similarityTagsEnabled;
     private final float similarityTagsBoost;
     public final int numberOfShards;
@@ -143,6 +141,7 @@
     public final Integer trackTotalHits;
     public final String dynamicMapping;
     public final boolean failOnError;
+    public final long indexNameSeed;
 
     private final Map<String, List<PropertyDefinition>> propertiesByName;
     private final List<PropertyDefinition> dynamicBoostProperties;
@@ -157,8 +156,6 @@
         this.bulkActions = getOptionalValue(defn, BULK_ACTIONS, BULK_ACTIONS_DEFAULT);
         this.bulkSizeBytes = getOptionalValue(defn, BULK_SIZE_BYTES, BULK_SIZE_BYTES_DEFAULT);
         this.bulkFlushIntervalMs = getOptionalValue(defn, BULK_FLUSH_INTERVAL_MS, BULK_FLUSH_INTERVAL_MS_DEFAULT);
-        this.bulkRetries = getOptionalValue(defn, BULK_RETRIES, BULK_RETRIES_DEFAULT);
-        this.bulkRetriesBackoff = getOptionalValue(defn, BULK_RETRIES_BACKOFF, BULK_RETRIES_BACKOFF_DEFAULT);
         this.numberOfShards = getOptionalValue(defn, NUMBER_OF_SHARDS, NUMBER_OF_SHARDS_DEFAULT);
         this.numberOfReplicas = getOptionalValue(defn, NUMBER_OF_REPLICAS, NUMBER_OF_REPLICAS_DEFAULT);
         this.similarityTagsEnabled = getOptionalValue(defn, SIMILARITY_TAGS_ENABLED, SIMILARITY_TAGS_ENABLED_DEFAULT);
@@ -170,6 +167,7 @@
         this.failOnError = getOptionalValue(defn, FAIL_ON_ERROR,
                 Boolean.parseBoolean(System.getProperty(TYPE_ELASTICSEARCH + "." + FAIL_ON_ERROR, Boolean.toString(FAIL_ON_ERROR_DEFAULT)))
         );
+        this.indexNameSeed = getOptionalValue(defn, INDEX_NAME_SEED, INDEX_NAME_SEED_DEFAULT);
         this.similarityTagsFields = getOptionalValues(defn, SIMILARITY_TAGS_FIELDS, Type.STRINGS, String.class, SIMILARITY_TAGS_FIELDS_DEFAULT);
 
         this.propertiesByName = getDefinedRules()
diff --git a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticBulkProcessorHandler.java b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticBulkProcessorHandler.java
index 035fd7b..b822a7b 100644
--- a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticBulkProcessorHandler.java
+++ b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticBulkProcessorHandler.java
@@ -16,6 +16,13 @@
  */
 package org.apache.jackrabbit.oak.plugins.index.elastic.index;
 
+import co.elastic.clients.elasticsearch._helpers.bulk.BulkIngester;
+import co.elastic.clients.elasticsearch._helpers.bulk.BulkListener;
+import co.elastic.clients.elasticsearch._types.ErrorCause;
+import co.elastic.clients.elasticsearch.core.BulkRequest;
+import co.elastic.clients.elasticsearch.core.BulkResponse;
+import co.elastic.clients.elasticsearch.core.bulk.BulkOperation;
+import co.elastic.clients.elasticsearch.core.bulk.BulkResponseItem;
 import org.apache.jackrabbit.oak.api.PropertyState;
 import org.apache.jackrabbit.oak.api.Type;
 import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticConnection;
@@ -23,37 +30,23 @@
 import org.apache.jackrabbit.oak.plugins.index.search.IndexDefinition;
 import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
-import org.elasticsearch.action.ActionListener;
-import org.elasticsearch.action.DocWriteRequest;
-import org.elasticsearch.action.bulk.BackoffPolicy;
-import org.elasticsearch.action.bulk.BulkItemResponse;
-import org.elasticsearch.action.bulk.BulkProcessor;
-import org.elasticsearch.action.bulk.BulkRequest;
-import org.elasticsearch.action.bulk.BulkResponse;
-import org.elasticsearch.action.support.WriteRequest;
-import org.elasticsearch.client.RequestOptions;
-import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.unit.ByteSizeValue;
-import org.elasticsearch.core.TimeValue;
 import org.jetbrains.annotations.NotNull;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
 import java.util.LinkedHashSet;
+import java.util.List;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.Phaser;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.function.BiConsumer;
 import java.util.stream.Collectors;
 
-import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS;
-import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
-
 class ElasticBulkProcessorHandler {
 
     private static final Logger LOG = LoggerFactory.getLogger(ElasticBulkProcessorHandler.class);
@@ -68,13 +61,13 @@
     protected final String indexName;
     protected final ElasticIndexDefinition indexDefinition;
     private final NodeBuilder definitionBuilder;
-    protected final BulkProcessor bulkProcessor;
+    protected final BulkIngester<String> bulkIngester;
     private final boolean waitForESAcknowledgement;
 
     /**
      * Coordinates communication between bulk processes. It has a main controller registered at creation time and
      * de-registered on {@link ElasticIndexWriter#close(long)}. Each bulk request register a new party in
-     * this Phaser in {@link OakBulkProcessorListener#beforeBulk(long, BulkRequest)} and de-register itself when
+     * this Phaser in {@link OakBulkListener#beforeBulk(long, BulkRequest, List)} and de-register itself when
      * the request returns.
      */
     private final Phaser phaser = new Phaser(1); // register main controller
@@ -82,7 +75,7 @@
     /**
      * Exceptions occurred while trying to update index in elasticsearch
      */
-    private final ConcurrentLinkedQueue<Throwable> suppressedExceptions = new ConcurrentLinkedQueue<>();
+    private final ConcurrentLinkedQueue<ErrorCause> suppressedErrorCauses = new ConcurrentLinkedQueue<>();
 
     /**
      * Key-value structure to keep the history of bulk requests. Keys are the bulk execution ids, the boolean
@@ -102,7 +95,7 @@
         this.indexDefinition = indexDefinition;
         this.definitionBuilder = definitionBuilder;
         this.waitForESAcknowledgement = waitForESAcknowledgement;
-        this.bulkProcessor = initBulkProcessor();
+        this.bulkIngester = initBulkIngester();
     }
 
     /**
@@ -142,44 +135,57 @@
         return new ElasticBulkProcessorHandler(elasticConnection, indexName, indexDefinition, definitionBuilder, waitForESAcknowledgement);
     }
 
-    private BulkProcessor initBulkProcessor() {
-        return BulkProcessor.builder(requestConsumer(),
-                new OakBulkProcessorListener(), this.indexName + "-bulk-processor")
-                .setBulkActions(indexDefinition.bulkActions)
-                .setConcurrentRequests(BULK_PROCESSOR_CONCURRENCY)
-                .setBulkSize(new ByteSizeValue(indexDefinition.bulkSizeBytes))
-                .setFlushInterval(TimeValue.timeValueMillis(indexDefinition.bulkFlushIntervalMs))
-                .setBackoffPolicy(BackoffPolicy.exponentialBackoff(
-                        TimeValue.timeValueMillis(indexDefinition.bulkRetriesBackoff), indexDefinition.bulkRetries)
-                )
-                .build();
+    private BulkIngester<String> initBulkIngester() {
+        // BulkIngester does not support retry policies. Some retries though are already implemented in the transport layer.
+        // More details here: https://github.com/elastic/elasticsearch-java/issues/478
+        return BulkIngester.of(b -> {
+            b = b.client(elasticConnection.getAsyncClient())
+                    .listener(new OakBulkListener());
+            if (indexDefinition.bulkActions > 0) {
+                b = b.maxOperations(indexDefinition.bulkActions);
+            }
+            if (indexDefinition.bulkSizeBytes > 0) {
+                b = b.maxSize(indexDefinition.bulkSizeBytes);
+            }
+            if (indexDefinition.bulkFlushIntervalMs > 0) {
+                b = b.flushInterval(indexDefinition.bulkFlushIntervalMs, TimeUnit.MILLISECONDS);
+            }
+            return b.maxConcurrentRequests(BULK_PROCESSOR_CONCURRENCY);
+        });
     }
 
     private void checkFailures() throws IOException {
-        if (!suppressedExceptions.isEmpty()) {
+        if (!suppressedErrorCauses.isEmpty()) {
             IOException ioe = new IOException("Exception while indexing. See suppressed for details");
-            suppressedExceptions.forEach(ioe::addSuppressed);
+            suppressedErrorCauses.stream().map(ec -> new IllegalStateException(ec.reason())).forEach(ioe::addSuppressed);
             throw ioe;
         }
     }
 
-    protected BiConsumer<BulkRequest, ActionListener<BulkResponse>> requestConsumer() {
-        // TODO: migrate to ES Java client https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/current/indexing-bulk.html
-        return (request, bulkListener) -> elasticConnection.getOldClient().bulkAsync(request, RequestOptions.DEFAULT, bulkListener);
+    public void update(String id, ElasticDocument document) throws IOException {
+        add(BulkOperation.of(op -> op.index(idx -> idx.index(indexName).id(id).document(document))), id);
     }
 
-    public void add(DocWriteRequest<?> request) throws IOException {
+    public void delete(String id) throws IOException {
+        add(BulkOperation.of(op -> op.delete(idx -> idx.index(indexName).id(id))), id);
+    }
+
+    private void add(BulkOperation operation, String context) throws IOException {
         // fail fast: we don't want to wait until the processor gets closed to fail
         checkFailures();
-
-        bulkProcessor.add(request);
+        bulkIngester.add(operation, context);
         totalOperations++;
     }
 
+    /**
+     * Closes the bulk ingester and waits for all the bulk requests to return.
+     * @return {@code true} if at least one update was performed, {@code false} otherwise
+     * @throws IOException if an error happened while processing the bulk requests
+     */
     public boolean close() throws IOException {
-        LOG.trace("Calling close on bulk processor {}", bulkProcessor);
-        bulkProcessor.close();
-        LOG.trace("Bulk Processor {} closed", bulkProcessor);
+        LOG.trace("Calling close on bulk ingester {}", bulkIngester);
+        bulkIngester.close();
+        LOG.trace("Bulk Ingester {} closed", bulkIngester);
 
         // de-register main controller
         int phase = phaser.arriveAndDeregister();
@@ -192,8 +198,11 @@
         if (waitForESAcknowledgement) {
             try {
                 phaser.awaitAdvanceInterruptibly(phase, indexDefinition.bulkFlushIntervalMs * 5, TimeUnit.MILLISECONDS);
-            } catch (InterruptedException | TimeoutException e) {
+            } catch (TimeoutException e) {
                 LOG.error("Error waiting for bulk requests to return", e);
+            } catch (InterruptedException e) {
+                LOG.warn("Interrupted while waiting for bulk processor to close", e);
+                Thread.currentThread().interrupt();  // restore interrupt status
             }
         }
 
@@ -205,91 +214,96 @@
         return updatesMap.containsValue(Boolean.TRUE);
     }
 
-    private class OakBulkProcessorListener implements BulkProcessor.Listener {
+    private class OakBulkListener implements BulkListener<String> {
 
         @Override
-        public void beforeBulk(long executionId, BulkRequest bulkRequest) {
+        public void beforeBulk(long executionId, BulkRequest request, List<String> contexts) {
             // register new bulk party
             phaser.register();
 
             // init update status
             updatesMap.put(executionId, Boolean.FALSE);
 
-            bulkRequest.timeout(TimeValue.timeValueMinutes(2));
-
-            LOG.debug("Sending bulk with id {} -> {}", executionId, bulkRequest.getDescription());
+            LOG.debug("Sending bulk with id {} -> {}", executionId, contexts);
             if (LOG.isTraceEnabled()) {
-                LOG.trace("Bulk Requests: \n{}", bulkRequest.requests()
+                LOG.trace("Bulk Requests: \n{}", request.operations()
                         .stream()
-                        .map(DocWriteRequest::toString)
+                        .map(BulkOperation::toString)
                         .collect(Collectors.joining("\n"))
                 );
             }
         }
 
         @Override
-        public void afterBulk(long executionId, BulkRequest bulkRequest, BulkResponse bulkResponse) {
-            LOG.debug("Bulk with id {} processed with status {} in {}", executionId, bulkResponse.status(), bulkResponse.getTook());
-            if (LOG.isTraceEnabled()) {
-                try {
-                    LOG.trace(Strings.toString(bulkResponse.toXContent(jsonBuilder(), EMPTY_PARAMS)));
-                } catch (IOException e) {
-                    LOG.error("Error decoding bulk response", e);
+        public void afterBulk(long executionId, BulkRequest request, List<String> contexts, BulkResponse response) {
+            try {
+                LOG.debug("Bulk with id {} processed in {} ms", executionId, response.took());
+                if (LOG.isTraceEnabled()) {
+                    LOG.trace(response.toString());
                 }
-            }
-            if (bulkResponse.hasFailures()) { // check if some operations failed to execute
-                Set<String> failedDocSet = new LinkedHashSet<>();
-                NodeBuilder status = definitionBuilder.child(IndexDefinition.STATUS_NODE);
-                // Read the current failed paths (if any) on the :status node into failedDocList
-                if (status.hasProperty(IndexDefinition.FAILED_DOC_PATHS)) {
-                    for (String str : status.getProperty(IndexDefinition.FAILED_DOC_PATHS).getValue(Type.STRINGS)) {
-                        failedDocSet.add(str);
-                    }
-                }
-
-                int initialSize = failedDocSet.size();
-                boolean isFailedDocSetFull = false;
-
-                boolean hasSuccesses = false;
-                for (BulkItemResponse bulkItemResponse : bulkResponse) {
-                    if (bulkItemResponse.isFailed()) {
-                        BulkItemResponse.Failure failure = bulkItemResponse.getFailure();
-                        if (indexDefinition.failOnError && failure.getCause() != null) {
-                            suppressedExceptions.add(failure.getCause());
+                if (response.items().stream().anyMatch(i -> i.error() != null)) { // check if some operations failed to execute
+                    Set<String> failedDocSet = new LinkedHashSet<>();
+                    NodeBuilder status = definitionBuilder.child(IndexDefinition.STATUS_NODE);
+                    // Read the current failed paths (if any) on the :status node into failedDocList
+                    if (status.hasProperty(IndexDefinition.FAILED_DOC_PATHS)) {
+                        for (String str : status.getProperty(IndexDefinition.FAILED_DOC_PATHS).getValue(Type.STRINGS)) {
+                            failedDocSet.add(str);
                         }
-                        if (!isFailedDocSetFull && failedDocSet.size() < FAILED_DOC_COUNT_FOR_STATUS_NODE) {
-                            failedDocSet.add(bulkItemResponse.getId());
-                        } else {
-                            isFailedDocSetFull = true;
-                        }
-                        // Log entry to be used to parse logs to get the failed doc id/path if needed
-                        LOG.error("ElasticIndex Update Doc Failure: Error while adding/updating doc with id: [{}]", bulkItemResponse.getId());
-                        LOG.error("Failure Details: BulkItem ID: {}, Index: {}, Failure Cause: {}",
-                                failure.getId(), failure.getIndex(), failure.getCause());
-                    } else if (!hasSuccesses) {
-                        // Set indexUpdated to true even if 1 item was updated successfully
-                        updatesMap.put(executionId, Boolean.TRUE);
-                        hasSuccesses = true;
                     }
-                }
 
-                if (isFailedDocSetFull) {
-                    LOG.info("Cannot store all new Failed Docs because {} has been filled up. " +
-                            "See previous log entries to find out the details of failed paths", IndexDefinition.FAILED_DOC_PATHS);
-                } else if (failedDocSet.size() != initialSize) {
-                    status.setProperty(IndexDefinition.FAILED_DOC_PATHS, failedDocSet, Type.STRINGS);
+                    int initialSize = failedDocSet.size();
+                    boolean isFailedDocSetFull = false;
+
+                    boolean hasSuccesses = false;
+                    for (int i = 0; i < contexts.size(); i++) {
+                        BulkResponseItem item = response.items().get(i);
+                        if (item.error() != null) {
+                            if (indexDefinition.failOnError) {
+                                suppressedErrorCauses.add(item.error());
+                            }
+                            if (!isFailedDocSetFull && failedDocSet.size() < FAILED_DOC_COUNT_FOR_STATUS_NODE) {
+                                failedDocSet.add(contexts.get(i));
+                            } else {
+                                isFailedDocSetFull = true;
+                            }
+                            // Log entry to be used to parse logs to get the failed doc id/path if needed
+                            LOG.error("ElasticIndex Update Doc Failure: Error while adding/updating doc with id: [{}]", contexts.get(i));
+                            LOG.error("Failure Details: BulkItem ID: {}, Index: {}, Failure Cause: {}",
+                                    item.id(), item.index(), item.error());
+                        } else if (!hasSuccesses) {
+                            // Set indexUpdated to true even if 1 item was updated successfully
+                            updatesMap.put(executionId, Boolean.TRUE);
+                            hasSuccesses = true;
+                        }
+                    }
+
+                    if (isFailedDocSetFull) {
+                        LOG.info("Cannot store all new Failed Docs because {} has been filled up. " +
+                                "See previous log entries to find out the details of failed paths", IndexDefinition.FAILED_DOC_PATHS);
+                    } else if (failedDocSet.size() != initialSize) {
+                        status.setProperty(IndexDefinition.FAILED_DOC_PATHS, failedDocSet, Type.STRINGS);
+                    }
+                } else {
+                    updatesMap.put(executionId, Boolean.TRUE);
                 }
-            } else {
-                updatesMap.put(executionId, Boolean.TRUE);
+            } finally {
+                phaser.arriveAndDeregister();
             }
-            phaser.arriveAndDeregister();
         }
 
         @Override
-        public void afterBulk(long executionId, BulkRequest bulkRequest, Throwable throwable) {
-            LOG.error("ElasticIndex Update Bulk Failure : Bulk with id {} threw an error", executionId, throwable);
-            suppressedExceptions.add(throwable);
-            phaser.arriveAndDeregister();
+        public void afterBulk(long executionId, BulkRequest request, List<String> contexts, Throwable failure) {
+            try {
+                LOG.error("ElasticIndex Update Bulk Failure : Bulk with id {} threw an error", executionId, failure);
+                suppressedErrorCauses.add(ErrorCause.of(ec -> {
+                    StringWriter sw = new StringWriter();
+                    PrintWriter pw = new PrintWriter(sw);
+                    failure.printStackTrace(pw);
+                    return ec.reason(failure.getMessage()).stackTrace(sw.toString());
+                }));
+            } finally {
+                phaser.arriveAndDeregister();
+            }
         }
     }
 
@@ -297,12 +311,14 @@
      * {@link ElasticBulkProcessorHandler} extension with real time behaviour.
      * It also uses the same async bulk processor as the parent except for the last flush that waits until the
      * indexed documents are searchable.
+     * <p>
+     * BulkIngester does not support customization of intermediate requests. This means we cannot intercept the last
+     * request and apply a WAIT_UNTIL refresh policy. The workaround is to force a refresh when the handler is closed.
+     * We can improve this when this issue gets fixed:
+     * <a href="https://github.com/elastic/elasticsearch-java/issues/703">elasticsearch-java#703</a>
      */
     protected static class RealTimeBulkProcessorHandler extends ElasticBulkProcessorHandler {
 
-        private final AtomicBoolean isClosed = new AtomicBoolean(false);
-        private final AtomicBoolean isDataSearchable = new AtomicBoolean(false);
-
         private RealTimeBulkProcessorHandler(@NotNull ElasticConnection elasticConnection,
                                              @NotNull String indexName,
                                              @NotNull ElasticIndexDefinition indexDefinition,
@@ -312,26 +328,12 @@
         }
 
         @Override
-        protected BiConsumer<BulkRequest, ActionListener<BulkResponse>> requestConsumer() {
-            return (request, bulkListener) -> {
-                if (isClosed.get()) {
-                    LOG.debug("Processor is closing. Next request with {} actions will block until the data is searchable",
-                            request.requests().size());
-                    request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL);
-                    isDataSearchable.set(true);
-                }
-                elasticConnection.getOldClient().bulkAsync(request, RequestOptions.DEFAULT, bulkListener);
-            };
-        }
-
-        @Override
         public boolean close() throws IOException {
-            isClosed.set(true);
             // calling super closes the bulk processor. If not empty it calls #requestConsumer for the last time
             boolean closed = super.close();
             // it could happen that close gets called when the bulk has already been flushed. In these cases we trigger
             // an actual refresh to make sure the docs are searchable before returning from the method
-            if (totalOperations > 0 && !isDataSearchable.get()) {
+            if (totalOperations > 0) {
                 LOG.debug("Forcing refresh");
                 try {
                 	this.elasticConnection.getClient().indices().refresh(b -> b.index(indexName));
diff --git a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java
index 4e2c26d..9eb4e8f 100644
--- a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java
+++ b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java
@@ -37,11 +37,11 @@
 import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
 import org.apache.jackrabbit.oak.spi.state.NodeState;
 import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.CharFilterFactory;
+import org.apache.lucene.analysis.TokenFilterFactory;
 import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
-import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
-import org.apache.lucene.analysis.util.CharFilterFactory;
-import org.apache.lucene.analysis.util.ResourceLoader;
-import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.apache.lucene.util.ResourceLoader;
 import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
 import org.slf4j.Logger;
diff --git a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzerMappings.java b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzerMappings.java
index ea79c50..e8adc6f 100644
--- a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzerMappings.java
+++ b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzerMappings.java
@@ -16,6 +16,7 @@
  */
 package org.apache.jackrabbit.oak.plugins.index.elastic.index;
 
+import org.apache.lucene.analysis.AbstractAnalysisFactory;
 import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
 import org.apache.lucene.analysis.cjk.CJKBigramFilterFactory;
 import org.apache.lucene.analysis.commongrams.CommonGramsFilterFactory;
@@ -33,7 +34,6 @@
 import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory;
 import org.apache.lucene.analysis.shingle.ShingleFilterFactory;
 import org.apache.lucene.analysis.synonym.SynonymFilterFactory;
-import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
 import org.apache.lucene.analysis.util.ElisionFilterFactory;
 import org.jetbrains.annotations.Nullable;
 
@@ -76,7 +76,7 @@
     static {
         CONTENT_TRANSFORMERS = new LinkedHashMap<>();
         CONTENT_TRANSFORMERS.put("mapping", line -> {
-            if (line.length() == 0 || line.startsWith("#")) {
+            if (line.isEmpty() || line.startsWith("#")) {
                 return null;
             } else {
                 return line.replaceAll("\"", "");
diff --git a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticDocument.java b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticDocument.java
index c81d2c2..06aa073 100644
--- a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticDocument.java
+++ b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticDocument.java
@@ -16,16 +16,14 @@
  */
 package org.apache.jackrabbit.oak.plugins.index.elastic.index;
 
+import com.fasterxml.jackson.annotation.JsonAnyGetter;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
 import org.apache.jackrabbit.oak.api.Blob;
 import org.apache.jackrabbit.oak.commons.PathUtils;
 import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticIndexDefinition;
 import org.apache.jackrabbit.oak.plugins.index.search.FieldNames;
 import org.apache.jackrabbit.oak.plugins.index.search.spi.binary.BlobByteSource;
-import org.elasticsearch.common.Strings;
-import org.elasticsearch.xcontent.XContentBuilder;
-import org.elasticsearch.xcontent.XContentFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.Map;
@@ -35,17 +33,23 @@
 
 import static org.apache.jackrabbit.oak.plugins.index.elastic.util.ElasticIndexUtils.toDoubles;
 
+@JsonInclude(JsonInclude.Include.NON_EMPTY)
 public class ElasticDocument {
-    private static final Logger LOG = LoggerFactory.getLogger(ElasticDocument.class);
 
-    private final String path;
-    private final Set<String> fulltext;
-    private final Set<String> suggest;
-    private final Set<String> spellcheck;
-    private final Map<String, Set<Object>> properties;
-    private final Map<String, Object> similarityFields;
-    private final Map<String, Map<String, Double>> dynamicBoostFields;
-    private final Set<String> similarityTags;
+    @JsonProperty(FieldNames.PATH)
+    public final String path;
+    @JsonProperty(FieldNames.FULLTEXT)
+    public final Set<String> fulltext;
+    @JsonProperty(FieldNames.SUGGEST)
+    public final Set<Map<String, String>> suggest;
+    @JsonProperty(FieldNames.SPELLCHECK)
+    public final Set<String> spellcheck;
+    @JsonProperty(ElasticIndexDefinition.DYNAMIC_BOOST_FULLTEXT)
+    public final Set<String> dbFullText;
+    @JsonProperty(ElasticIndexDefinition.SIMILARITY_TAGS)
+    public final Set<String> similarityTags;
+    // these are dynamic properties that need to be added to the document unwrapped. See the use of @JsonAnyGetter in the getter
+    private final Map<String, Object> properties;
 
     ElasticDocument(String path) {
         this.path = path;
@@ -53,8 +57,7 @@
         this.suggest = new LinkedHashSet<>();
         this.spellcheck = new LinkedHashSet<>();
         this.properties = new HashMap<>();
-        this.similarityFields = new HashMap<>();
-        this.dynamicBoostFields = new HashMap<>();
+        this.dbFullText = new LinkedHashSet<>();
         this.similarityTags = new LinkedHashSet<>();
     }
 
@@ -67,24 +70,40 @@
     }
 
     void addSuggest(String value) {
-        suggest.add(value);
+        suggest.add(Map.of(ElasticIndexHelper.SUGGEST_NESTED_VALUE, value));
     }
 
     void addSpellcheck(String value) {
         spellcheck.add(value);
     }
 
-    // ES for String values (that are not interpreted as date or numbers etc) would analyze in the same
+    // ES for String values (that are not interpreted as date or numbers etc.) would analyze in the same
     // field and would index a sub-field "keyword" for non-analyzed value.
     // ref: https://www.elastic.co/blog/strings-are-dead-long-live-strings
-    // (interpretation of date etc: https://www.elastic.co/guide/en/elasticsearch/reference/current/dynamic-field-mapping.html)
+    // (interpretation of date etc.: https://www.elastic.co/guide/en/elasticsearch/reference/current/dynamic-field-mapping.html)
     void addProperty(String fieldName, Object value) {
-        properties.computeIfAbsent(fieldName, s -> new LinkedHashSet<>()).add(value);
+        Object existingValue = properties.get(fieldName);
+        Object finalValue;
+
+        if (existingValue == null) {
+            finalValue = value;
+        } else if (existingValue instanceof Set) {
+            Set<Object> existingSet = (Set<Object>) existingValue;
+            existingSet.add(value);
+            finalValue = existingSet;
+        } else {
+            Set<Object> set = new LinkedHashSet<>();
+            set.add(existingValue);
+            set.add(value);
+            finalValue = set.size() == 1 ? set.iterator().next() : set;
+        }
+
+        properties.put(fieldName, finalValue);
     }
 
     void addSimilarityField(String name, Blob value) throws IOException {
         byte[] bytes = new BlobByteSource(value).read();
-        similarityFields.put(FieldNames.createSimilarityFieldName(name), toDoubles(bytes));
+        addProperty(FieldNames.createSimilarityFieldName(name), toDoubles(bytes));
     }
 
     void indexAncestors(String path) {
@@ -96,76 +115,25 @@
     }
 
     void addDynamicBoostField(String propName, String value, double boost) {
-        dynamicBoostFields.computeIfAbsent(propName, s -> new HashMap<>())
-                .putIfAbsent(value, boost);
+        addProperty(propName,
+                Map.of(
+                        ElasticIndexHelper.DYNAMIC_BOOST_NESTED_VALUE, value,
+                        ElasticIndexHelper.DYNAMIC_BOOST_NESTED_BOOST, boost
+                )
+        );
+
+        // add value into the dynamic boost specific fulltext field. We cannot add this in the standard
+        // field since dynamic boosted terms require lower weight compared to standard terms
+        dbFullText.add(value);
     }
 
     void addSimilarityTag(String value) {
         similarityTags.add(value);
     }
 
-    public String build() {
-        String ret;
-        try {
-            XContentBuilder builder = XContentFactory.jsonBuilder();
-            builder.startObject();
-            {
-                builder.field(FieldNames.PATH, path);
-                Set<String> dbFullText = new LinkedHashSet<>();
-                for (Map.Entry<String, Map<String, Double>> f : dynamicBoostFields.entrySet()) {
-                    builder.startArray(f.getKey());
-                    for (Map.Entry<String, Double> v : f.getValue().entrySet()) {
-                        builder.startObject();
-                        builder.field("value", v.getKey());
-                        builder.field("boost", v.getValue());
-                        builder.endObject();
-                        // add value into the dynamic boost specific fulltext field. We cannot add this in the standard
-                        // field since dynamic boosted terms require lower weight compared to standard terms
-                        dbFullText.add(v.getKey());
-                    }
-                    builder.endArray();
-                }
-                if (dbFullText.size() > 0) {
-                    builder.field(ElasticIndexDefinition.DYNAMIC_BOOST_FULLTEXT, dbFullText);
-                }
-                if (fulltext.size() > 0) {
-                    builder.field(FieldNames.FULLTEXT, fulltext);
-                }
-                if (suggest.size() > 0) {
-                    builder.startArray(FieldNames.SUGGEST);
-                    for (String val : suggest) {
-                        builder.startObject().field("value", val).endObject();
-                    }
-                    builder.endArray();
-                }
-                if (spellcheck.size() > 0) {
-                    builder.field(FieldNames.SPELLCHECK, spellcheck);
-                }
-                for (Map.Entry<String, Object> simProp: similarityFields.entrySet()) {
-                    builder.field(simProp.getKey(), simProp.getValue());
-                }
-                for (Map.Entry<String, Set<Object>> prop : properties.entrySet()) {
-                    builder.field(prop.getKey(), prop.getValue().size() == 1 ? prop.getValue().iterator().next() : prop.getValue());
-                }
-                if (!similarityTags.isEmpty()) {
-                    builder.field(ElasticIndexDefinition.SIMILARITY_TAGS, similarityTags);
-                }
-            }
-            builder.endObject();
-
-            ret = Strings.toString(builder);
-        } catch (IOException e) {
-            LOG.error("Error serializing document - path: {}, properties: {}, fulltext: {}, suggest: {}",
-                    path, properties, fulltext, suggest, e);
-            ret = null;
-        }
-
-        return ret;
-    }
-
-    @Override
-    public String toString() {
-        return build();
+    @JsonAnyGetter
+    public Map<String, Object> getProperties() {
+        return properties;
     }
 
 }
diff --git a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticIndexHelper.java b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticIndexHelper.java
index 187f49b..254d3a0 100644
--- a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticIndexHelper.java
+++ b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticIndexHelper.java
@@ -23,6 +23,7 @@
 import co.elastic.clients.elasticsearch.indices.CreateIndexRequest;
 import co.elastic.clients.elasticsearch.indices.IndexSettings;
 import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.elasticsearch.indices.PutIndicesSettingsRequest;
 import co.elastic.clients.json.JsonData;
 import co.elastic.clients.util.ObjectBuilder;
 import org.apache.jackrabbit.oak.api.Type;
@@ -30,8 +31,6 @@
 import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticPropertyDefinition;
 import org.apache.jackrabbit.oak.plugins.index.search.FieldNames;
 import org.apache.jackrabbit.oak.plugins.index.search.PropertyDefinition;
-import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
-import org.elasticsearch.common.settings.Settings;
 import org.jetbrains.annotations.NotNull;
 
 import java.io.Reader;
@@ -59,10 +58,17 @@
     // Unset the refresh interval and disable replicas at index creation to optimize for initial loads
     // https://www.elastic.co/guide/en/elasticsearch/reference/current/tune-for-indexing-speed.html
     private static final Time INITIAL_REFRESH_INTERVAL = Time.of(b -> b.time("-1"));
+
     private static final String INITIAL_NUMBER_OF_REPLICAS = "0";
 
     private static final String OAK_WORD_DELIMITER_GRAPH_FILTER = "oak_word_delimiter_graph_filter";
 
+    protected static final String SUGGEST_NESTED_VALUE = "value";
+
+    protected static final String DYNAMIC_BOOST_NESTED_VALUE = "value";
+
+    protected static final String DYNAMIC_BOOST_NESTED_BOOST = "boost";
+
     /**
      * Returns a {@code CreateIndexRequest} with settings and mappings translated from the specified {@code ElasticIndexDefinition}.
      * The returned object can be used to create and index optimized for bulk loads (eg: reindexing) but not for queries.
@@ -121,29 +127,28 @@
 
 
     /**
-     * Returns a {@code UpdateSettingsRequest} to make an index ready to be queried and updated in near real time.
+     * Returns a {@code PutIndicesSettingsRequest} to make an index ready to be queried and updated in near real time.
      *
      * @param remoteIndexName the final index name (no alias)
      * @param indexDefinition the definition used to read settings/mappings
-     * @return an {@code UpdateSettingsRequest}
-     * <p>
-     * TODO: migrate to Elasticsearch Java client when the following issue will be fixed
-     * <a href="https://github.com/elastic/elasticsearch-java/issues/283">https://github.com/elastic/elasticsearch-java/issues/283</a>
+     * @return an {@code PutIndicesSettingsRequest}
      */
-    public static UpdateSettingsRequest enableIndexRequest(String remoteIndexName, ElasticIndexDefinition indexDefinition) {
-        UpdateSettingsRequest request = new UpdateSettingsRequest(remoteIndexName);
+    public static PutIndicesSettingsRequest enableIndexRequest(String remoteIndexName, ElasticIndexDefinition indexDefinition) {
+        IndexSettings indexSettings = IndexSettings.of(is -> is
+                .numberOfReplicas(Integer.toString(indexDefinition.numberOfReplicas))
+                // TODO: we should pass null to reset the refresh interval to the default value but the following bug prevents it. We need to wait for a fix
+                // <a href="https://github.com/elastic/elasticsearch-java/issues/283">https://github.com/elastic/elasticsearch-java/issues/283</a>
+                .refreshInterval(Time.of(t -> t.time("1s"))));
 
-        Settings.Builder settingsBuilder = Settings.builder()
-                .putNull("index.refresh_interval") // null=reset a setting back to the default value
-                .put("index.number_of_replicas", indexDefinition.numberOfReplicas);
-
-        return request.settings(settingsBuilder);
+        return PutIndicesSettingsRequest.of(pisr -> pisr
+                .index(remoteIndexName)
+                .settings(indexSettings));
     }
 
 
     private static ObjectBuilder<IndexSettings> loadSettings(@NotNull IndexSettings.Builder builder,
                                                              @NotNull ElasticIndexDefinition indexDefinition) {
-        if (indexDefinition.getSimilarityProperties().size() > 0) {
+        if (!indexDefinition.getSimilarityProperties().isEmpty()) {
             builder.otherSettings(ElasticIndexDefinition.ELASTIKNN, JsonData.of(true));
         }
 
@@ -246,7 +251,7 @@
                 builder.properties(FieldNames.SUGGEST,
                         b1 -> b1.nested(
                                 // TODO: evaluate https://www.elastic.co/guide/en/elasticsearch/reference/current/faster-prefix-queries.html
-                                b2 -> b2.properties("value",
+                                b2 -> b2.properties(SUGGEST_NESTED_VALUE,
                                         b3 -> b3.text(
                                                 b4 -> b4.analyzer("oak_analyzer")
                                         )
@@ -258,10 +263,10 @@
             for (PropertyDefinition pd : indexDefinition.getDynamicBoostProperties()) {
                 builder.properties(pd.nodeName,
                         b1 -> b1.nested(
-                                b2 -> b2.properties("value",
+                                b2 -> b2.properties(DYNAMIC_BOOST_NESTED_VALUE,
                                                 b3 -> b3.text(
                                                         b4 -> b4.analyzer("oak_analyzer")))
-                                        .properties("boost",
+                                        .properties(DYNAMIC_BOOST_NESTED_BOOST,
                                                 b3 -> b3.double_(f -> f)
                                         )
                         )
diff --git a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticIndexWriter.java b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticIndexWriter.java
index 3ae8ec3..3bbd7d3 100644
--- a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticIndexWriter.java
+++ b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticIndexWriter.java
@@ -17,11 +17,14 @@
 package org.apache.jackrabbit.oak.plugins.index.elastic.index;
 
 import co.elastic.clients.elasticsearch._types.AcknowledgedResponseBase;
+import co.elastic.clients.elasticsearch._types.ElasticsearchException;
 import co.elastic.clients.elasticsearch.indices.CreateIndexRequest;
 import co.elastic.clients.elasticsearch.indices.CreateIndexResponse;
 import co.elastic.clients.elasticsearch.indices.DeleteIndexResponse;
 import co.elastic.clients.elasticsearch.indices.ElasticsearchIndicesClient;
 import co.elastic.clients.elasticsearch.indices.GetAliasResponse;
+import co.elastic.clients.elasticsearch.indices.PutIndicesSettingsRequest;
+import co.elastic.clients.elasticsearch.indices.PutIndicesSettingsResponse;
 import co.elastic.clients.elasticsearch.indices.UpdateAliasesRequest;
 import co.elastic.clients.elasticsearch.indices.UpdateAliasesResponse;
 import co.elastic.clients.json.JsonpUtils;
@@ -38,15 +41,6 @@
 import org.apache.jackrabbit.oak.plugins.index.search.spi.editor.FulltextIndexWriter;
 import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
-import org.elasticsearch.ElasticsearchStatusException;
-import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
-import org.elasticsearch.action.delete.DeleteRequest;
-import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.support.master.AcknowledgedResponse;
-import org.elasticsearch.client.IndicesClient;
-import org.elasticsearch.client.RequestOptions;
-import org.elasticsearch.common.Strings;
-import org.elasticsearch.xcontent.XContentType;
 import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.TestOnly;
 import org.slf4j.Logger;
@@ -57,18 +51,13 @@
 import java.util.Set;
 import java.util.UUID;
 
-import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS;
-import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
-
 class ElasticIndexWriter implements FulltextIndexWriter<ElasticDocument> {
     private static final Logger LOG = LoggerFactory.getLogger(ElasticIndexWriter.class);
 
     private final ElasticIndexTracker indexTracker;
     private final ElasticConnection elasticConnection;
     private final ElasticIndexDefinition indexDefinition;
-
     private final ElasticBulkProcessorHandler bulkProcessorHandler;
-
     private final boolean reindex;
     private final String indexName;
 
@@ -87,7 +76,7 @@
         // old index until the new one gets enabled) during incremental reindexing
         if (this.reindex) {
             try {
-                long seed = UUID.randomUUID().getMostSignificantBits();
+                long seed = indexDefinition.indexNameSeed == 0L ? UUID.randomUUID().getMostSignificantBits() : indexDefinition.indexNameSeed;
                 // merge gets called on node store later in the indexing flow
                 definitionBuilder.setProperty(ElasticIndexDefinition.PROP_INDEX_NAME_SEED, seed);
                 // let's store the current mapping version in the index definition
@@ -134,16 +123,12 @@
 
     @Override
     public void updateDocument(String path, ElasticDocument doc) throws IOException {
-        IndexRequest request = new IndexRequest(indexName)
-                .id(ElasticIndexUtils.idFromPath(path))
-                .source(doc.build(), XContentType.JSON);
-        bulkProcessorHandler.add(request);
+        bulkProcessorHandler.update(ElasticIndexUtils.idFromPath(path), doc);
     }
 
     @Override
     public void deleteDocuments(String path) throws IOException {
-        DeleteRequest request = new DeleteRequest(indexName).id(ElasticIndexUtils.idFromPath(path));
-        bulkProcessorHandler.add(request);
+        bulkProcessorHandler.delete(ElasticIndexUtils.idFromPath(path));
     }
 
     @Override
@@ -196,12 +181,12 @@
             final CreateIndexResponse response = esClient.create(request);
             LOG.info("Created index {}. Response acknowledged: {}", indexName, response.acknowledged());
             checkResponseAcknowledgement(response, "Create index call not acknowledged for index " + indexName);
-        } catch (ElasticsearchStatusException ese) {
+        } catch (ElasticsearchException ese) {
             // We already check index existence as first thing in this method, if we get here it means we have got into
             // a conflict (eg: multiple cluster nodes provision concurrently).
             // Elasticsearch does not have a CREATE IF NOT EXIST, need to inspect exception
             // https://github.com/elastic/elasticsearch/issues/19862
-            if (ese.status().getStatus() == 400 && ese.getDetailedMessage().contains("resource_already_exists_exception")) {
+            if (ese.status() == 400 && ese.getMessage().contains("resource_already_exists_exception")) {
                 LOG.warn("Index {} already exists. Ignoring error", indexName);
             } else {
                 throw ese;
@@ -216,14 +201,12 @@
             throw new IllegalStateException("cannot enable an index that does not exist");
         }
 
-        UpdateSettingsRequest request = ElasticIndexHelper.enableIndexRequest(indexName, indexDefinition);
+        PutIndicesSettingsRequest request = ElasticIndexHelper.enableIndexRequest(indexName, indexDefinition);
         if (LOG.isDebugEnabled()) {
-            final String requestMsg = Strings.toString(request.toXContent(jsonBuilder(), EMPTY_PARAMS));
-            LOG.debug("Updating Index Settings with request {}", requestMsg);
+            LOG.debug("Updating Index Settings with request {}", request);
         }
-        IndicesClient oldClient = elasticConnection.getOldClient().indices();
-        AcknowledgedResponse response = oldClient.putSettings(request, RequestOptions.DEFAULT);
-        LOG.info("Updated settings for index {}. Response acknowledged: {}", indexName, response.isAcknowledged());
+        PutIndicesSettingsResponse response = client.putSettings(request);
+        LOG.info("Updated settings for index {}. Response acknowledged: {}", indexName, response.acknowledged());
         checkResponseAcknowledgement(response, "Update index settings call not acknowledged for index " + indexName);
 
         // update the alias
@@ -246,12 +229,6 @@
         deleteOldIndices(client, aliasResponse.result().keySet());
     }
 
-    private void checkResponseAcknowledgement(AcknowledgedResponse response, String exceptionMessage) {
-        if (!response.isAcknowledged()) {
-            throw new IllegalStateException(exceptionMessage);
-        }
-    }
-
     private void checkResponseAcknowledgement(AcknowledgedResponseBase response, String exceptionMessage) {
         if (!response.acknowledged()) {
             throw new IllegalStateException(exceptionMessage);
@@ -265,7 +242,7 @@
     }
 
     private void deleteOldIndices(ElasticsearchIndicesClient indicesClient, Set<String> indices) throws IOException {
-        if (indices.size() == 0)
+        if (indices.isEmpty())
             return;
         DeleteIndexResponse deleteIndexResponse = indicesClient.delete(db -> db.index(new ArrayList<>(indices)));
         checkResponseAcknowledgement(deleteIndexResponse, "Delete index call not acknowledged for indices " + indices);
diff --git a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/ElasticRequestHandler.java b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/ElasticRequestHandler.java
index 3ec9e51..e4cd77b 100644
--- a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/ElasticRequestHandler.java
+++ b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/ElasticRequestHandler.java
@@ -63,6 +63,7 @@
 import org.apache.jackrabbit.oak.api.Type;
 import org.apache.jackrabbit.oak.commons.PathUtils;
 import org.apache.jackrabbit.oak.plugins.index.IndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticConnection;
 import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticIndexDefinition;
 import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticPropertyDefinition;
 import org.apache.jackrabbit.oak.plugins.index.elastic.query.async.facets.ElasticFacetProvider;
@@ -150,70 +151,69 @@
     }
 
     public Query baseQuery() {
-        return Query.of(fn -> {
-                    fn.bool(fnb -> {
+        return Query.of(q -> q.bool(baseQueryBuilder().build()));
+    }
 
-                        FullTextExpression ft = filter.getFullTextConstraint();
+    public BoolQuery.Builder baseQueryBuilder() {
+        BoolQuery.Builder bqb = new BoolQuery.Builder();
+        FullTextExpression ft = filter.getFullTextConstraint();
 
-                        if (ft != null) {
-                            fnb.must(fullTextQuery(ft, planResult));
-                        }
+        if (ft != null) {
+            bqb.must(fullTextQuery(ft, planResult));
+        }
 
-                        if (propertyRestrictionQuery != null) {
-                            if (propertyRestrictionQuery.startsWith("mlt?")) {
-                                List<PropertyDefinition> sp = elasticIndexDefinition.getSimilarityProperties();
-                                String mltQueryString = propertyRestrictionQuery.substring("mlt?".length());
-                                Map<String, String> mltParams = MoreLikeThisHelperUtil.getParamMapFromMltQuery(mltQueryString);
-                                String queryNodePath = mltParams.get(MoreLikeThisHelperUtil.MLT_STREAM_BODY);
+        if (propertyRestrictionQuery != null) {
+            if (propertyRestrictionQuery.startsWith("mlt?")) {
+                List<PropertyDefinition> sp = elasticIndexDefinition.getSimilarityProperties();
+                String mltQueryString = propertyRestrictionQuery.substring("mlt?".length());
+                Map<String, String> mltParams = MoreLikeThisHelperUtil.getParamMapFromMltQuery(mltQueryString);
+                String queryNodePath = mltParams.get(MoreLikeThisHelperUtil.MLT_STREAM_BODY);
 
-                                if (queryNodePath == null) {
-                                    // TODO : See if we might want to support like Text here (passed as null in
-                                    // above constructors)
-                                    // IT is not supported in our lucene implementation.
-                                    throw new IllegalArgumentException(
-                                            "Missing required field stream.body in MLT query: " + mltQueryString);
-                                }
-                                if (sp.isEmpty()) {
-                                    // SimilarityImpl in oak-core sets property restriction for sim search and the
-                                    // query is something like
-                                    // mlt?mlt.fl=:path&mlt.mindf=0&stream.body=<path> . We need parse this query
-                                    // string and turn into a query
-                                    // elastic can understand.
-                                    fnb.must(m -> m.moreLikeThis(mltQuery(mltParams)));
-                                } else {
-                                    fnb.must(m -> m.bool(similarityQuery(queryNodePath, sp)));
-                                }
-
-                                // Add should clause to improve relevance using similarity tags only when similarity is
-                                // enabled and there is at least one similarity tag property
-                                if (elasticIndexDefinition.areSimilarityTagsEnabled() &&
-                                        !elasticIndexDefinition.getSimilarityTagsProperties().isEmpty()) {
-                                    // add should clause to improve relevance using similarity tags
-                                    fnb.should(s -> s
-                                            .moreLikeThis(m -> m
-                                                    .fields(ElasticIndexDefinition.SIMILARITY_TAGS)
-                                                    .like(l -> l.document(d -> d.id(ElasticIndexUtils.idFromPath(queryNodePath))))
-                                                    .minTermFreq(1)
-                                                    .minDocFreq(1)
-                                                    .boost(elasticIndexDefinition.getSimilarityTagsBoost())
-                                            )
-                                    );
-                                }
-
-                            } else {
-                                fnb.must(m -> m.queryString(qs -> qs.query(propertyRestrictionQuery)));
-                            }
-
-                        } else if (planResult.evaluateNonFullTextConstraints()) {
-                            for (Query constraint : nonFullTextConstraints(indexPlan, planResult)) {
-                                fnb.filter(constraint);
-                            }
-                        }
-                        return fnb;
-                    });
-                    return fn;
+                if (queryNodePath == null) {
+                    // TODO : See if we might want to support like Text here (passed as null in
+                    // above constructors)
+                    // IT is not supported in our lucene implementation.
+                    throw new IllegalArgumentException(
+                            "Missing required field stream.body in MLT query: " + mltQueryString);
                 }
-        );
+                if (sp.isEmpty()) {
+                    // SimilarityImpl in oak-core sets property restriction for sim search and the
+                    // query is something like
+                    // mlt?mlt.fl=:path&mlt.mindf=0&stream.body=<path> . We need parse this query
+                    // string and turn into a query
+                    // elastic can understand.
+                    bqb.must(m -> m.moreLikeThis(mltQuery(mltParams)));
+                } else {
+                    bqb.must(m -> m.bool(similarityQuery(queryNodePath, sp)));
+                }
+
+                // Add should clause to improve relevance using similarity tags only when similarity is
+                // enabled and there is at least one similarity tag property
+                if (elasticIndexDefinition.areSimilarityTagsEnabled() &&
+                        !elasticIndexDefinition.getSimilarityTagsProperties().isEmpty()) {
+                    // add should clause to improve relevance using similarity tags
+                    bqb.should(s -> s
+                            .moreLikeThis(m -> m
+                                    .fields(ElasticIndexDefinition.SIMILARITY_TAGS)
+                                    .like(l -> l.document(d -> d.id(ElasticIndexUtils.idFromPath(queryNodePath))))
+                                    .minTermFreq(1)
+                                    .minDocFreq(1)
+                                    .boost(elasticIndexDefinition.getSimilarityTagsBoost())
+                            )
+                    );
+                }
+
+            } else {
+                bqb.must(m -> m.queryString(qs -> qs.query(propertyRestrictionQuery)));
+            }
+
+        } else if (planResult.evaluateNonFullTextConstraints()) {
+            for (Query constraint : nonFullTextConstraints(indexPlan, planResult)) {
+                bqb.filter(constraint);
+            }
+        }
+
+        return bqb;
     }
 
     public @NotNull List<SortOptions> baseSorts() {
@@ -271,10 +271,10 @@
         return propertyRestrictionQuery != null && propertyRestrictionQuery.startsWith(SUGGEST_PREFIX);
     }
 
-    public ElasticFacetProvider getAsyncFacetProvider(ElasticResponseHandler responseHandler) {
+    public ElasticFacetProvider getAsyncFacetProvider(ElasticConnection connection, ElasticResponseHandler responseHandler) {
         return requiresFacets()
-                ? ElasticFacetProvider.getProvider(planResult.indexDefinition.getSecureFacetConfiguration(), this,
-                        responseHandler, filter::isAccessible)
+                ? ElasticFacetProvider.getProvider(planResult.indexDefinition.getSecureFacetConfiguration(), connection,
+                        elasticIndexDefinition, this, responseHandler, filter::isAccessible)
                 : null;
     }
 
diff --git a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/ElasticResultRowAsyncIterator.java b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/ElasticResultRowAsyncIterator.java
index 05fc293..b350762 100644
--- a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/ElasticResultRowAsyncIterator.java
+++ b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/ElasticResultRowAsyncIterator.java
@@ -95,7 +95,7 @@
         this.indexPlan = indexPlan;
         this.rowInclusionPredicate = rowInclusionPredicate;
         this.metricHandler = metricHandler;
-        this.elasticFacetProvider = elasticRequestHandler.getAsyncFacetProvider(elasticResponseHandler);
+        this.elasticFacetProvider = elasticRequestHandler.getAsyncFacetProvider(indexNode.getConnection(), elasticResponseHandler);
         this.elasticQueryScanner = initScanner();
     }
 
@@ -110,6 +110,7 @@
             try {
                 nextRow = queue.take();
             } catch (InterruptedException e) {
+                Thread.currentThread().interrupt();  // restore interrupt status
                 throw new IllegalStateException("Error reading next result from Elastic", e);
             }
         }
@@ -155,6 +156,7 @@
                 queue.put(new FulltextResultRow(path, searchHit.score() != null ? searchHit.score() : 0.0,
                         elasticResponseHandler.excerpts(searchHit), elasticFacetProvider, null));
             } catch (InterruptedException e) {
+                Thread.currentThread().interrupt();  // restore interrupt status
                 throw new IllegalStateException("Error producing results into the iterator queue", e);
             }
         }
@@ -165,6 +167,7 @@
         try {
             queue.put(POISON_PILL);
         } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();  // restore interrupt status
             throw new IllegalStateException("Error inserting poison pill into the iterator queue", e);
         }
     }
@@ -173,8 +176,8 @@
         List<ElasticResponseListener> listeners = new ArrayList<>();
         // TODO: we could avoid to register this listener when the client is interested in facets only. It would save space and time
         listeners.add(this);
-        if (elasticFacetProvider != null) {
-            listeners.add(elasticFacetProvider);
+        if (elasticFacetProvider != null && elasticFacetProvider instanceof ElasticResponseListener) {
+            listeners.add((ElasticResponseListener) elasticFacetProvider);
         }
 
         return new ElasticQueryScanner(listeners);
diff --git a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticFacetProvider.java b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticFacetProvider.java
index 9b1c3a5..8b3058d 100644
--- a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticFacetProvider.java
+++ b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticFacetProvider.java
@@ -16,6 +16,8 @@
  */
 package org.apache.jackrabbit.oak.plugins.index.elastic.query.async.facets;
 
+import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticConnection;
+import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticIndexDefinition;
 import org.apache.jackrabbit.oak.plugins.index.elastic.query.ElasticRequestHandler;
 import org.apache.jackrabbit.oak.plugins.index.elastic.query.ElasticResponseHandler;
 import org.apache.jackrabbit.oak.plugins.index.elastic.query.async.ElasticResponseListener;
@@ -25,13 +27,15 @@
 import java.util.function.Predicate;
 
 /**
- * Provider of facets through an {@link ElasticResponseListener}
+ * Provider of facets for Elasticsearch
  */
-public interface ElasticFacetProvider extends FulltextIndex.FacetProvider, ElasticResponseListener {
+public interface ElasticFacetProvider extends FulltextIndex.FacetProvider {
 
     /**
      * Returns the appropriate provider based on the {@link SecureFacetConfiguration}
      * @param facetConfiguration the {@link SecureFacetConfiguration} to extract facet options
+     * @param connection the {@link ElasticConnection} to perform requests
+     * @param indexDefinition the {@link ElasticIndexDefinition} to extract index options
      * @param requestHandler the {@link ElasticRequestHandler} to perform actions at request time
      * @param responseHandler the {@link ElasticResponseHandler} to decode responses
      * @param isAccessible a {@link Predicate} to check if a node is accessible
@@ -41,6 +45,8 @@
      */
     static ElasticFacetProvider getProvider(
             SecureFacetConfiguration facetConfiguration,
+            ElasticConnection connection,
+            ElasticIndexDefinition indexDefinition,
             ElasticRequestHandler requestHandler,
             ElasticResponseHandler responseHandler,
             Predicate<String> isAccessible
@@ -51,9 +57,9 @@
                 facetProvider = new ElasticInsecureFacetAsyncProvider();
                 break;
             case STATISTICAL:
-                facetProvider = new ElasticStatisticalFacetAsyncProvider(
-                        requestHandler, responseHandler, isAccessible,
-                        facetConfiguration.getRandomSeed(), facetConfiguration.getStatisticalFacetSampleSize()
+                facetProvider = new ElasticStatisticalFacetAsyncProvider(connection, indexDefinition,
+                        requestHandler, responseHandler, isAccessible, facetConfiguration.getRandomSeed(),
+                        facetConfiguration.getStatisticalFacetSampleSize()
                 );
                 break;
             case SECURE:
diff --git a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticInsecureFacetAsyncProvider.java b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticInsecureFacetAsyncProvider.java
index 50f1bd1..ef207e4 100644
--- a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticInsecureFacetAsyncProvider.java
+++ b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticInsecureFacetAsyncProvider.java
@@ -45,8 +45,12 @@
     public List<FulltextIndex.Facet> getFacets(int numberOfFacets, String columnName) {
         LOG.trace("Requested facets for {} - Latch count: {}", columnName, latch.getCount());
         try {
-            latch.await(15, TimeUnit.SECONDS);
+            boolean completed = latch.await(15, TimeUnit.SECONDS);
+            if (!completed) {
+                throw new IllegalStateException("Timed out while waiting for facets");
+            }
         } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();  // restore interrupt status
             throw new IllegalStateException("Error while waiting for facets", e);
         }
         LOG.trace("Reading facets for {} from aggregations {}", columnName, aggregations);
diff --git a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticSecureFacetAsyncProvider.java b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticSecureFacetAsyncProvider.java
index 2cc1b3e..a02a6af 100644
--- a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticSecureFacetAsyncProvider.java
+++ b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticSecureFacetAsyncProvider.java
@@ -41,15 +41,14 @@
  */
 class ElasticSecureFacetAsyncProvider implements ElasticFacetProvider, ElasticResponseListener.SearchHitListener {
 
-    protected static final Logger LOG = LoggerFactory.getLogger(ElasticSecureFacetAsyncProvider.class);
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticSecureFacetAsyncProvider.class);
 
-    protected final Set<String> facetFields;
-    private final Map<String, Map<String, Integer>> facetsMap = new ConcurrentHashMap<>();
-    private Map<String, List<FulltextIndex.Facet>> facets;
-    protected final ElasticResponseHandler elasticResponseHandler;
-    protected final Predicate<String> isAccessible;
-
+    private final Set<String> facetFields;
+    private final Map<String, Map<String, Integer>> accessibleFacetCounts = new ConcurrentHashMap<>();
+    private final ElasticResponseHandler elasticResponseHandler;
+    private final Predicate<String> isAccessible;
     private final CountDownLatch latch = new CountDownLatch(1);
+    private Map<String, List<FulltextIndex.Facet>> facets;
 
     ElasticSecureFacetAsyncProvider(
             ElasticRequestHandler elasticRequestHandler,
@@ -78,7 +77,7 @@
             for (String field: facetFields) {
                 JsonNode value = searchHit.source().get(field);
                 if (value != null) {
-                    facetsMap.compute(field, (column, facetValues) -> {
+                    accessibleFacetCounts.compute(field, (column, facetValues) -> {
                         if (facetValues == null) {
                             Map<String, Integer> values = new HashMap<>();
                             values.put(value.asText(), 1);
@@ -96,7 +95,7 @@
     @Override
     public void endData() {
         // create Facet objects, order by count (desc) and then by label (asc)
-        facets = facetsMap.entrySet()
+        facets = accessibleFacetCounts.entrySet()
                 .stream()
                 .collect(Collectors.toMap
                         (Map.Entry::getKey, x -> x.getValue().entrySet()
@@ -112,7 +111,7 @@
                                 .collect(Collectors.toList())
                         )
                 );
-        LOG.trace("End data {}", facetsMap);
+        LOG.trace("End data {}", facets);
         latch.countDown();
     }
 
@@ -120,8 +119,12 @@
     public List<FulltextIndex.Facet> getFacets(int numberOfFacets, String columnName) {
         LOG.trace("Requested facets for {} - Latch count: {}", columnName, latch.getCount());
         try {
-            latch.await(15, TimeUnit.SECONDS);
+            boolean completed = latch.await(15, TimeUnit.SECONDS);
+            if (!completed) {
+                throw new IllegalStateException("Timed out while waiting for facets");
+            }
         } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();  // restore interrupt status
             throw new IllegalStateException("Error while waiting for facets", e);
         }
         LOG.trace("Reading facets for {} from {}", columnName, facets);
diff --git a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticStatisticalFacetAsyncProvider.java b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticStatisticalFacetAsyncProvider.java
index 4611062..bc79385 100644
--- a/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticStatisticalFacetAsyncProvider.java
+++ b/oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/query/async/facets/ElasticStatisticalFacetAsyncProvider.java
@@ -18,145 +18,178 @@
 
 import co.elastic.clients.elasticsearch._types.aggregations.Aggregate;
 import co.elastic.clients.elasticsearch._types.aggregations.StringTermsBucket;
+import co.elastic.clients.elasticsearch._types.query_dsl.BoolQuery;
+import co.elastic.clients.elasticsearch._types.query_dsl.Query;
+import co.elastic.clients.elasticsearch.core.SearchRequest;
+import co.elastic.clients.elasticsearch.core.SearchResponse;
 import co.elastic.clients.elasticsearch.core.search.Hit;
+import co.elastic.clients.elasticsearch.core.search.SourceConfig;
+import com.fasterxml.jackson.databind.JsonNode;
 import com.fasterxml.jackson.databind.node.ObjectNode;
+import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticConnection;
+import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticIndexDefinition;
 import org.apache.jackrabbit.oak.plugins.index.elastic.query.ElasticRequestHandler;
 import org.apache.jackrabbit.oak.plugins.index.elastic.query.ElasticResponseHandler;
-import org.apache.jackrabbit.oak.plugins.index.elastic.query.async.ElasticResponseListener;
+import org.apache.jackrabbit.oak.plugins.index.search.FieldNames;
 import org.apache.jackrabbit.oak.plugins.index.search.spi.query.FulltextIndex;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
 
 /**
- * An {@link ElasticSecureFacetAsyncProvider} extension that subscribes also on Elastic Aggregation events.
+ * An {@link ElasticFacetProvider} extension that performs random sampling on the result set to compute facets.
  * SearchHit events are sampled and then used to adjust facets coming from Aggregations in order to minimize
- * access checks. This provider could improve facets performance but only when the result set is quite big.
+ * access checks. This provider could improve facets performance especially when the result set is quite big.
  */
-public class ElasticStatisticalFacetAsyncProvider extends ElasticSecureFacetAsyncProvider
-        implements ElasticResponseListener.AggregationListener {
+public class ElasticStatisticalFacetAsyncProvider implements ElasticFacetProvider {
 
-    private final int sampleSize;
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticStatisticalFacetAsyncProvider.class);
+
+    private final ElasticResponseHandler elasticResponseHandler;
+    private final Predicate<String> isAccessible;
+    private final Set<String> facetFields;
+    private final Map<String, List<FulltextIndex.Facet>> allFacets = new HashMap<>();
+    private final Map<String, Map<String, Integer>> accessibleFacetCounts = new ConcurrentHashMap<>();
+    private Map<String, List<FulltextIndex.Facet>> facets;
+    private final CountDownLatch latch = new CountDownLatch(1);
+    private int sampled;
     private long totalHits;
 
-    private final Random rGen;
-    private int sampled = 0;
-    private int seen = 0;
-    private long accessibleCount = 0;
+    ElasticStatisticalFacetAsyncProvider(ElasticConnection connection, ElasticIndexDefinition indexDefinition,
+                                         ElasticRequestHandler elasticRequestHandler, ElasticResponseHandler elasticResponseHandler,
+                                         Predicate<String> isAccessible, long randomSeed, int sampleSize) {
 
-    private final Map<String, List<FulltextIndex.Facet>> facetMap = new HashMap<>();
+        this.elasticResponseHandler = elasticResponseHandler;
+        this.isAccessible = isAccessible;
+        this.facetFields = elasticRequestHandler.facetFields().collect(Collectors.toSet());
 
-    private final CountDownLatch latch = new CountDownLatch(1);
+        BoolQuery.Builder builder = elasticRequestHandler.baseQueryBuilder();
+        builder.should(sb -> sb.functionScore(fsb ->
+                fsb.functions(f -> f.randomScore(rsb -> rsb.seed("" + randomSeed).field(FieldNames.PATH)))
+        ));
 
-    ElasticStatisticalFacetAsyncProvider(ElasticRequestHandler elasticRequestHandler,
-                                         ElasticResponseHandler elasticResponseHandler,
-                                         Predicate<String> isAccessible,
-                                         long randomSeed, int sampleSize) {
-        super(elasticRequestHandler, elasticResponseHandler, isAccessible);
-        this.sampleSize = sampleSize;
-        this.rGen = new Random(randomSeed);
-    }
+        SearchRequest searchRequest = SearchRequest.of(srb -> srb.index(indexDefinition.getIndexAlias())
+                .trackTotalHits(thb -> thb.enabled(true))
+                .source(SourceConfig.of(scf -> scf.filter(ff -> ff.includes(FieldNames.PATH).includes(new ArrayList<>(facetFields)))))
+                .query(Query.of(qb -> qb.bool(builder.build())))
+                .aggregations(elasticRequestHandler.aggregations())
+                .size(sampleSize)
+        );
 
-    @Override
-    public void startData(long totalHits) {
-        this.totalHits = totalHits;
-    }
+        LOG.trace("Kicking search query with random sampling {}", searchRequest);
+        CompletableFuture<SearchResponse<ObjectNode>> searchFuture =
+                connection.getAsyncClient().search(searchRequest, ObjectNode.class);
 
-    @Override
-    public void on(Hit<ObjectNode> searchHit) {
-        if (totalHits < sampleSize) {
-            super.on(searchHit);
-        } else {
-            if (sampleSize == sampled) {
-                return;
+        searchFuture.whenCompleteAsync((searchResponse, throwable) -> {
+            try {
+                if (throwable != null) {
+                    LOG.error("Error while retrieving sample documents", throwable);
+                } else {
+                    List<Hit<ObjectNode>> searchHits = searchResponse.hits().hits();
+                    this.sampled = searchHits != null ? searchHits.size() : 0;
+                    if (sampled > 0) {
+                        this.totalHits = searchResponse.hits().total().value();
+                        processAggregations(searchResponse.aggregations());
+                        searchResponse.hits().hits().forEach(this::processHit);
+                        computeStatisticalFacets();
+                    }
+                }
+            } finally {
+                latch.countDown();
             }
-            int r = rGen.nextInt((int) (totalHits - seen)) + 1;
-            seen++;
+        });
+    }
 
-            if (r <= sampleSize - sampled) {
-                sampled++;
-                final String path = elasticResponseHandler.getPath(searchHit);
-                if (path != null && isAccessible.test(path)) {
-                    accessibleCount++;
+    @Override
+    public List<FulltextIndex.Facet> getFacets(int numberOfFacets, String columnName) {
+        LOG.trace("Requested facets for {} - Latch count: {}", columnName, latch.getCount());
+        try {
+            boolean completed = latch.await(15, TimeUnit.SECONDS);
+            if (!completed) {
+                throw new IllegalStateException("Timed out while waiting for facets");
+            }
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();  // restore interrupt status
+            throw new IllegalStateException("Error while waiting for facets", e);
+        }
+        LOG.trace("Reading facets for {} from {}", columnName, facets);
+        return facets != null ? facets.get(FulltextIndex.parseFacetField(columnName)) : null;
+    }
+
+    private void processHit(Hit<ObjectNode> searchHit) {
+        final String path = elasticResponseHandler.getPath(searchHit);
+        if (path != null && isAccessible.test(path)) {
+            for (String field : facetFields) {
+                JsonNode value = searchHit.source().get(field);
+                if (value != null) {
+                    accessibleFacetCounts.compute(field, (column, facetValues) -> {
+                        if (facetValues == null) {
+                            Map<String, Integer> values = new HashMap<>();
+                            values.put(value.asText(), 1);
+                            return values;
+                        } else {
+                            facetValues.merge(value.asText(), 1, Integer::sum);
+                            return facetValues;
+                        }
+                    });
                 }
             }
         }
     }
 
-    @Override
-    public void on(Map<String, Aggregate> aggregations) {
+    private void processAggregations(Map<String, Aggregate> aggregations) {
         for (String field : facetFields) {
             List<StringTermsBucket> buckets = aggregations.get(field).sterms().buckets().array();
-            facetMap.put(field, buckets.stream()
+            allFacets.put(field, buckets.stream()
                     .map(b -> new FulltextIndex.Facet(b.key().stringValue(), (int) b.docCount()))
                     .collect(Collectors.toList())
             );
         }
     }
 
-    @Override
-    public void endData() {
-        if (totalHits < sampleSize) {
-            super.endData();
-        } else {
-            for (String facet: facetMap.keySet()) {
-                facetMap.compute(facet, (s, facets1) -> updateLabelAndValueIfRequired(facets1));
-            }
-            latch.countDown();
-        }
-    }
-
-    @Override
-    public List<FulltextIndex.Facet> getFacets(int numberOfFacets, String columnName) {
-        if (totalHits < sampleSize) {
-            return super.getFacets(numberOfFacets, columnName);
-        } else {
-            LOG.trace("Requested facets for {} - Latch count: {}", columnName, latch.getCount());
-            try {
-                latch.await(15, TimeUnit.SECONDS);
-            } catch (InterruptedException e) {
-                throw new IllegalStateException("Error while waiting for facets", e);
-            }
-            LOG.trace("Reading facets for {} from {}", columnName, facetMap);
-            return facetMap.get(FulltextIndex.parseFacetField(columnName));
-        }
-    }
-
-    private List<FulltextIndex.Facet> updateLabelAndValueIfRequired(List<FulltextIndex.Facet> labelAndValues) {
-        if (accessibleCount < sampleSize) {
-            int numZeros = 0;
-            List<FulltextIndex.Facet> newValues;
-            {
-                List<FulltextIndex.Facet> proportionedLVs = new LinkedList<>();
-                for (FulltextIndex.Facet labelAndValue : labelAndValues) {
-                    long count = labelAndValue.getCount() * accessibleCount / sampleSize;
-                    if (count == 0) {
-                        numZeros++;
-                    }
-                    proportionedLVs.add(new FulltextIndex.Facet(labelAndValue.getLabel(), Math.toIntExact(count)));
-                }
-                labelAndValues = proportionedLVs;
-            }
-            if (numZeros > 0) {
-                newValues = new LinkedList<>();
-                for (FulltextIndex.Facet lv : labelAndValues) {
-                    if (lv.getCount() > 0) {
-                        newValues.add(lv);
+    private void computeStatisticalFacets() {
+        for (String facetKey : allFacets.keySet()) {
+            if (accessibleFacetCounts.containsKey(facetKey)) {
+                Map<String, Integer> accessibleFacet = accessibleFacetCounts.get(facetKey);
+                List<FulltextIndex.Facet> uncheckedFacet = allFacets.get(facetKey);
+                for (FulltextIndex.Facet facet : uncheckedFacet) {
+                    if (accessibleFacet.containsKey(facet.getLabel())) {
+                        double sampleProportion = (double) accessibleFacet.get(facet.getLabel()) / sampled;
+                        // returned count is the minimum between the accessible count and the count computed from the sample
+                        accessibleFacet.put(facet.getLabel(), Math.min(facet.getCount(), (int) (sampleProportion * totalHits)));
                     }
                 }
-            } else {
-                newValues = labelAndValues;
             }
-            return newValues;
-        } else {
-            return labelAndValues;
         }
+        // create Facet objects, order by count (desc) and then by label (asc)
+        facets = accessibleFacetCounts.entrySet()
+                .stream()
+                .collect(Collectors.toMap
+                        (Map.Entry::getKey, x -> x.getValue().entrySet()
+                                .stream()
+                                .map(e -> new FulltextIndex.Facet(e.getKey(), e.getValue()))
+                                .sorted((f1, f2) -> {
+                                    int f1Count = f1.getCount();
+                                    int f2Count = f2.getCount();
+                                    if (f1Count == f2Count) {
+                                        return f1.getLabel().compareTo(f2.getLabel());
+                                    } else return f2Count - f1Count;
+                                })
+                                .collect(Collectors.toList())
+                        )
+                );
+        LOG.trace("Statistical facets {}", facets);
     }
+
 }
diff --git a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticConnectionRule.java b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticConnectionRule.java
index 9bb4cf0..d47210d 100644
--- a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticConnectionRule.java
+++ b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticConnectionRule.java
@@ -48,8 +48,16 @@
     private ElasticConnectionModel elasticConnectionModel;
 
     public ElasticConnectionRule(String elasticConnectionString) {
+        this(elasticConnectionString,
+                "elastic_test_" +
+                        RandomStringUtils.random(5, true, false).toLowerCase() +
+                        System.currentTimeMillis()
+        );
+    }
+
+    public ElasticConnectionRule(String elasticConnectionString, String indexPrefix) {
         this.elasticConnectionString = elasticConnectionString;
-        indexPrefix = "elastic_test_" + RandomStringUtils.random(5, true, false).toLowerCase();
+        this.indexPrefix = indexPrefix;
     }
 
     public ElasticsearchContainer elastic;
@@ -105,7 +113,7 @@
             elasticConnectionModel.elasticPort = port;
             elasticConnectionModel.elasticApiKey = apiKey;
             elasticConnectionModel.elasticApiSecret = apiSecret;
-            elasticConnectionModel.indexPrefix = indexPrefix + System.currentTimeMillis();
+            elasticConnectionModel.indexPrefix = indexPrefix;
         } catch (URISyntaxException e) {
             LOG.error("Provided elastic connection string is not valid ", e);
         }
@@ -118,7 +126,7 @@
         elasticConnectionModel.elasticPort = elastic.getMappedPort(ElasticConnection.DEFAULT_PORT);
         elasticConnectionModel.elasticApiKey = null;
         elasticConnectionModel.elasticApiSecret = null;
-        elasticConnectionModel.indexPrefix = indexPrefix + System.currentTimeMillis();
+        elasticConnectionModel.indexPrefix = indexPrefix;
     }
 
     private Map<String, String> getUriQueryParams(URI uri) {
@@ -145,7 +153,7 @@
             String apiSecret = queryParams.get("key_secret");
 
             return ElasticConnection.newBuilder()
-                    .withIndexPrefix(indexPrefix + System.currentTimeMillis())
+                    .withIndexPrefix(indexPrefix)
                     .withConnectionParameters(scheme, host, port)
                     .withApiKeys(apiKey, apiSecret)
                     .build();
diff --git a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticFacetTest.java b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticFacetTest.java
index 6ede34a..9a42b9c 100644
--- a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticFacetTest.java
+++ b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticFacetTest.java
@@ -28,7 +28,7 @@
 
     @ClassRule
     public static final ElasticConnectionRule elasticRule =
-            new ElasticConnectionRule(ElasticTestUtils.ELASTIC_CONNECTION_STRING);
+            new ElasticConnectionRule(ElasticTestUtils.ELASTIC_CONNECTION_STRING, "elastic_test_");
 
     protected Repository createJcrRepository() {
         indexOptions = new ElasticIndexOptions();
diff --git a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticPropertyIndexTest.java b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticPropertyIndexTest.java
index cef7ae6..64c21c7 100644
--- a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticPropertyIndexTest.java
+++ b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticPropertyIndexTest.java
@@ -36,7 +36,7 @@
 public class ElasticPropertyIndexTest extends ElasticAbstractQueryTest {
 
     @Test
-    public void testBulkProcessorEventsFlushLimit() throws Exception {
+    public void bulkProcessorEventsFlushLimit() throws Exception {
         setIndex("test1", createIndex("propa"));
 
         Tree test = root.getTree("/").addChild("test");
@@ -46,7 +46,7 @@
         root.commit();
 
         // 250 is the default flush limit for bulk processor, and we added just less than 250 nodes
-        // So once the index writer is closed , bulk Processor would be closed and all the 248 entries should be flushed.
+        // So once the index writer is closed, bulk Processor would be closed and all the 248 entries should be flushed.
         // Make sure that the last entry is indexed correctly.
         String propaQuery = "select [jcr:path] from [nt:base] where [propa] = 'foo248'";
         assertEventually(() -> {
@@ -68,7 +68,7 @@
     }
 
     @Test
-    public void testBulkProcessorSizeFlushLimit() throws Exception {
+    public void bulkProcessorSizeFlushLimit() throws Exception {
         LogCustomizer customLogger = LogCustomizer
                 .forLogger(
                         "org.apache.jackrabbit.oak.plugins.index.elastic.index.ElasticBulkProcessorHandler")
@@ -105,8 +105,8 @@
                 assertQuery(propaQuery, List.of("/test/a" + docCountBreachingBulkSize));
             });
 
-            Assert.assertEquals(1, customLogger.getLogs().stream().filter(n -> n.contains("Bulk with id 2 processed with status OK in")).count());
-            Assert.assertEquals(0, customLogger.getLogs().stream().filter(n -> n.contains("Bulk with id 3 processed with status OK in")).count());
+            Assert.assertEquals(1, customLogger.getLogs().stream().filter(n -> n.contains("Bulk with id 2 processed in")).count());
+            Assert.assertEquals(0, customLogger.getLogs().stream().filter(n -> n.contains("Bulk with id 3 processed in")).count());
         } finally {
             customLogger.finished();
         }
diff --git a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticReliabilityTest.java b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticReliabilityTest.java
index c457670..7c5f792 100644
--- a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticReliabilityTest.java
+++ b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticReliabilityTest.java
@@ -22,7 +22,11 @@
 import eu.rekawek.toxiproxy.model.toxic.LimitData;
 import org.apache.jackrabbit.oak.api.Tree;
 import org.junit.After;
+import org.junit.Ignore;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.contrib.java.lang.system.ProvideSystemProperty;
+import org.junit.contrib.java.lang.system.RestoreSystemProperties;
 import org.testcontainers.containers.ToxiproxyContainer;
 import org.testcontainers.utility.DockerImageName;
 
@@ -36,6 +40,15 @@
 
 public class ElasticReliabilityTest extends ElasticAbstractQueryTest {
 
+    // set cache expiration and refresh to low values to avoid cached results in tests
+    @Rule
+    public final ProvideSystemProperty updateSystemProperties
+            = new ProvideSystemProperty("oak.elastic.statsExpireSeconds", "5")
+            .and("oak.elastic.statsRefreshSeconds", "1");
+
+    @Rule
+    public final RestoreSystemProperties restoreSystemProperties = new RestoreSystemProperties();
+
     private static final DockerImageName TOXIPROXY_IMAGE = DockerImageName.parse("ghcr.io/shopify/toxiproxy:2.6.0");
 
     private ToxiproxyContainer toxiproxy;
@@ -73,6 +86,7 @@
     }
 
     @Test
+    @Ignore("OAK-10558")
     public void connectionCutOnQuery() throws Exception {
         setIndex("test1", createIndex("propa", "propb"));
 
@@ -89,7 +103,7 @@
                 .limitData("CUT_CONNECTION_UPSTREAM", ToxicDirection.UPSTREAM, 0L);
 
         // elastic is down, query should not use it
-        assertThat(explain(query), not(containsString("elasticsearch:test1")));
+        assertEventually(() -> assertThat(explain(query), not(containsString("elasticsearch:test1"))));
 
         // result set should be correct anyway since traversal is enabled
         assertQuery(query, Arrays.asList("/test/a", "/test/b"));
@@ -98,7 +112,7 @@
         cutConnectionUpstream.remove();
 
         // result set should be the same as before but this time elastic should be used
-        assertThat(explain(query), containsString("elasticsearch:test1"));
+        assertEventually(() -> assertThat(explain(query), containsString("elasticsearch:test1")));
         assertQuery(query, Arrays.asList("/test/a", "/test/b"));
     }
 }
diff --git a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticTestServer.java b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticTestServer.java
index f0cf5da..f0a2824 100644
--- a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticTestServer.java
+++ b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticTestServer.java
@@ -42,16 +42,16 @@
 public class ElasticTestServer implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(ElasticTestServer.class);
     private static final Map<String, String> PLUGIN_OFFICIAL_RELEASES_DIGEST_MAP = Map.of(
-            "7.17.3.0", "5e3b40bb72b2813f927be9bf6ecdf88668d89d2ef20c7ebafaa51ab8407fd179",
-            "7.17.6.0", "326893bb98ef1a0c569d9f4c4a9a073e53361924f990b17e87077985ce8a7478",
             "7.17.7.0", "4252eb55cc7775f1b889d624ac335abfa2e357931c40d0accb4d520144246b8b",
             "8.3.3.0", "14d3223456f4b9f00f86628ec8400cb46513935e618ae0f5d0d1088739ccc233",
-            "8.4.1.0", "56797a1bac6ceeaa36d2358f818b14633124d79c5e04630fa3544603d82eaa01",
             "8.4.2.0", "5ce81ad043816900a496ad5b3cce7de1d99547ebf92aa1f9856343e48580c71c",
             "8.4.3.0", "5c00d43cdd56c5c5d8e9032ad507acea482fb5ca9445861c5cc12ad63af66425",
             "8.5.3.0", "d4c13f68650f9df5ff8c74ec83abc2e416de9c45f991d459326e0e2baf7b0e3f",
             "8.7.0.0", "7aeac9b7ac4dea1ded3f8e477e26bcc7fe62e313edf6352f4bdf973c43d25819",
-            "8.7.1.0", "80c8d34334b0cf4def79835ea6dab78b59ba9ee54c8f5f3cba0bde53123d7820");
+            "8.7.1.0", "80c8d34334b0cf4def79835ea6dab78b59ba9ee54c8f5f3cba0bde53123d7820",
+            "8.10.4.0", "b2ae8faf1e272319594b4d47a72580fa4f61a5c11cbc8d3f13453fd34b153441",
+            "8.11.0.0", "8d4d80b850c4da4da6dfe2d675b2e2355d2014307f8bdc54cc1b34323c81c7ae",
+            "8.11.1.0", "a00a920d4bc29f0deacde7c2ef3d3f70692b00b62bf7fb82b0fe18eeb1dafee9");
 
     private static final ElasticTestServer SERVER = new ElasticTestServer();
     private static volatile ElasticsearchContainer CONTAINER;
diff --git a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticBulkProcessorHandlerTest.java b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticBulkProcessorHandlerTest.java
index 1f9575b..eb37ce4 100644
--- a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticBulkProcessorHandlerTest.java
+++ b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticBulkProcessorHandlerTest.java
@@ -16,6 +16,7 @@
  */
 package org.apache.jackrabbit.oak.plugins.index.elastic.index;
 
+import co.elastic.clients.elasticsearch.ElasticsearchAsyncClient;
 import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticConnection;
 import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticIndexDefinition;
 import org.apache.jackrabbit.oak.plugins.memory.MultiStringPropertyState;
@@ -23,6 +24,7 @@
 import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
 import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mock;
@@ -32,7 +34,7 @@
 import java.util.Collections;
 
 import static org.hamcrest.CoreMatchers.instanceOf;
-import static org.junit.Assert.assertThat;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.mockito.Mockito.eq;
 import static org.mockito.Mockito.when;
 
@@ -48,16 +50,27 @@
     private ElasticConnection elasticConnectionMock;
 
     @Mock
+    private ElasticsearchAsyncClient esAsyncClientMock;
+
+    @Mock
     private NodeBuilder definitionBuilder;
 
     @Mock
     private CommitInfo commitInfo;
 
+    private AutoCloseable closeable;
+
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
+        closeable = MockitoAnnotations.openMocks(this);
         when(indexDefinitionMock.getDefinitionNodeState()).thenReturn(definitionNodeStateMock);
         when(commitInfo.getInfo()).thenReturn(Collections.emptyMap());
+        when(elasticConnectionMock.getAsyncClient()).thenReturn(esAsyncClientMock);
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        closeable.close();
     }
 
     @Test
diff --git a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticIndexWriterTest.java b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticIndexWriterTest.java
index a51eda1..8997b88 100644
--- a/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticIndexWriterTest.java
+++ b/oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticIndexWriterTest.java
@@ -19,9 +19,7 @@
 import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticConnection;
 import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticIndexDefinition;
 import org.apache.jackrabbit.oak.plugins.index.elastic.ElasticIndexTracker;
-import org.elasticsearch.action.DocWriteRequest;
-import org.elasticsearch.action.delete.DeleteRequest;
-import org.elasticsearch.action.index.IndexRequest;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
@@ -35,6 +33,8 @@
 import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.number.OrderingComparison.lessThan;
 import static org.junit.Assert.assertEquals;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
@@ -55,35 +55,41 @@
 
     private ElasticIndexWriter indexWriter;
 
+    private AutoCloseable closeable;
+
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
+        closeable = MockitoAnnotations.openMocks(this);
         when(indexDefinitionMock.getIndexAlias()).thenReturn("test-index");
         indexWriter = new ElasticIndexWriter(indexTrackerMock, elasticConnectionMock, indexDefinitionMock, bulkProcessorHandlerMock);
     }
 
+    @After
+    public void tearDown() throws Exception {
+        closeable.close();
+    }
+
     @Test
     public void singleUpdateDocument() throws IOException {
         indexWriter.updateDocument("/foo", new ElasticDocument("/foo"));
 
-        ArgumentCaptor<IndexRequest> acIndexRequest = ArgumentCaptor.forClass(IndexRequest.class);
-        verify(bulkProcessorHandlerMock).add(acIndexRequest.capture());
+        ArgumentCaptor<ElasticDocument> esDocumentCaptor = ArgumentCaptor.forClass(ElasticDocument.class);
+        ArgumentCaptor<String> idCaptor = ArgumentCaptor.forClass(String.class);
+        verify(bulkProcessorHandlerMock).update(idCaptor.capture(), esDocumentCaptor.capture());
 
-        IndexRequest request = acIndexRequest.getValue();
-        assertEquals("test-index", request.index());
-        assertEquals("/foo", request.id());
+        assertEquals("/foo", idCaptor.getValue());
+        assertEquals("/foo", esDocumentCaptor.getValue().path);
     }
 
     @Test
     public void singleDeleteDocument() throws IOException {
         indexWriter.deleteDocuments("/bar");
 
-        ArgumentCaptor<DeleteRequest> acDeleteRequest = ArgumentCaptor.forClass(DeleteRequest.class);
-        verify(bulkProcessorHandlerMock).add(acDeleteRequest.capture());
+        ArgumentCaptor<String> idCaptor = ArgumentCaptor.forClass(String.class);
+        verify(bulkProcessorHandlerMock).delete(idCaptor.capture());
 
-        DeleteRequest request = acDeleteRequest.getValue();
-        assertEquals("test-index", request.index());
-        assertEquals("/bar", request.id());
+        String id = idCaptor.getValue();
+        assertEquals("/bar", id);
     }
 
     @Test
@@ -93,8 +99,8 @@
         indexWriter.deleteDocuments("/foo");
         indexWriter.deleteDocuments("/bar");
 
-        ArgumentCaptor<DocWriteRequest<?>> request = ArgumentCaptor.forClass(DocWriteRequest.class);
-        verify(bulkProcessorHandlerMock, times(4)).add(request.capture());
+        verify(bulkProcessorHandlerMock, times(2)).update(anyString(), any(ElasticDocument.class));
+        verify(bulkProcessorHandlerMock, times(2)).delete(anyString());
     }
 
     @Test
@@ -103,12 +109,12 @@
 
         indexWriter.updateDocument(generatedPath, new ElasticDocument(generatedPath));
 
-        ArgumentCaptor<IndexRequest> acIndexRequest = ArgumentCaptor.forClass(IndexRequest.class);
-        verify(bulkProcessorHandlerMock).add(acIndexRequest.capture());
+        ArgumentCaptor<String> idCaptor = ArgumentCaptor.forClass(String.class);
+        verify(bulkProcessorHandlerMock).update(idCaptor.capture(), any(ElasticDocument.class));
 
-        IndexRequest request = acIndexRequest.getValue();
-        assertThat(request.id(), not(generatedPath));
-        assertThat(request.id().length(), lessThan(513));
+        String id = idCaptor.getValue();
+        assertThat(id, not(generatedPath));
+        assertThat(id.length(), lessThan(513));
     }
 
     @Test
diff --git a/oak-search/src/main/java/org/apache/jackrabbit/oak/plugins/index/search/spi/query/FulltextIndex.java b/oak-search/src/main/java/org/apache/jackrabbit/oak/plugins/index/search/spi/query/FulltextIndex.java
index 38dd82d..4b469f5 100644
--- a/oak-search/src/main/java/org/apache/jackrabbit/oak/plugins/index/search/spi/query/FulltextIndex.java
+++ b/oak-search/src/main/java/org/apache/jackrabbit/oak/plugins/index/search/spi/query/FulltextIndex.java
@@ -615,7 +615,26 @@
         }
     }
 
+    /**
+     * Get the facet name from a column name.
+     *
+     * This method silently assumes(!) that the column name starts with "rep:facet("
+     * and ends with ")".
+     *
+     * @param columnName the column name, e.g. "rep:facet(abc)"
+     * @return the facet name, e.g. "abc"
+     */
     public static String parseFacetField(String columnName) {
         return columnName.substring(QueryConstants.REP_FACET.length() + 1, columnName.length() - 1);
     }
+
+    /**
+     * Convert the facet name to a column name.
+     *
+     * @param facetFieldName the facet field name, e.g. "abc"
+     * @return the column name, e.g. "rep:facet(abc)"
+     */
+    public static String convertFacetFieldNameToColumnName(String facetFieldName) {
+        return QueryConstants.REP_FACET + "(" + facetFieldName + ")";
+    }
 }
diff --git a/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/FacetCommonTest.java b/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/FacetCommonTest.java
index 09e9bea..92ef62c 100644
--- a/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/FacetCommonTest.java
+++ b/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/FacetCommonTest.java
@@ -32,16 +32,17 @@
 import javax.jcr.RepositoryException;
 import javax.jcr.query.Query;
 import javax.jcr.query.QueryResult;
+import javax.jcr.query.RowIterator;
 import javax.jcr.security.Privilege;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Random;
-import java.util.UUID;
 import java.util.stream.Collectors;
 
 import static org.apache.jackrabbit.commons.JcrUtils.getOrCreateByPath;
 import static org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants.FACETS;
+import static org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants.PROP_RANDOM_SEED;
 import static org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants.PROP_REFRESH_DEFN;
 import static org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants.PROP_SECURE_FACETS;
 import static org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants.PROP_SECURE_FACETS_VALUE_INSECURE;
@@ -67,14 +68,19 @@
     private final Map<String, Integer> actualLabelCount = new HashMap<>();
     private final Map<String, Integer> actualAclLabelCount = new HashMap<>();
     private final Map<String, Integer> actualAclPar1LabelCount = new HashMap<>();
+    private static final Random INDEX_SUFFIX_RANDOMIZER = new Random(7);
 
 
     @Before
     public void createIndex() throws RepositoryException {
-        String indexName = UUID.randomUUID().toString();
         IndexDefinitionBuilder builder = indexOptions.createIndex(indexOptions.createIndexDefinitionBuilder(), false);
         builder.noAsync().evaluatePathRestrictions();
         builder.getBuilderTree().setProperty("jcr:primaryType", "oak:QueryIndexDefinition", Type.NAME);
+        // Statistical facets in Elasticsearch use a random function with a fixed seed but the results are not
+        // consistent when the index name changes. So we set the index name to a fixed values.
+        String indexName = "FacetCommonTestIndex" + INDEX_SUFFIX_RANDOMIZER.nextInt(1000);
+        builder.getBuilderTree().setProperty(PROP_RANDOM_SEED, 3000L, Type.LONG);
+        builder.getBuilderTree().setProperty("indexNameSeed", 300L, Type.LONG);
         IndexDefinitionBuilder.IndexRule indexRule = builder.indexRule(JcrConstants.NT_BASE);
         indexRule.property("cons").propertyIndex();
         indexRule.property("foo").propertyIndex().getBuilderTree().setProperty(FACET_PROP, true, Type.BOOLEAN);
@@ -173,18 +179,34 @@
 
         createDataset(NUM_LEAF_NODES_FOR_LARGE_DATASET);
 
-        assertEventually(() -> assertEquals("Unexpected number of facets", actualAclLabelCount.size(), getFacets().size()));
+        assertEventually(() -> {
+            Map<String, Integer> facets = getFacets();
+            assertEquals("Unexpected number of facets", actualAclLabelCount.size(), facets.size());
 
-        for (Map.Entry<String, Integer> facet : actualAclLabelCount.entrySet()) {
-            String facetLabel = facet.getKey();
-            assertEventually(() -> {
-                int facetCount = getFacets().get(facetLabel);
-                float ratio = ((float) facetCount) / facet.getValue();
-                assertTrue("Facet count for label: " + facetLabel + " is outside of 10% margin of error. " +
-                                "Expected: " + facet.getValue() + "; Got: " + facetCount + "; Ratio: " + ratio,
-                        Math.abs(ratio - 1) < 0.1);
-            });
-        }
+            for (Map.Entry<String, Integer> facet : actualAclLabelCount.entrySet()) {
+                String facetLabel = facet.getKey();
+                assertEventually(() -> {
+                    int facetCount = facets.get(facetLabel);
+                    float ratio = ((float) facetCount) / facet.getValue();
+                    assertTrue("Facet count for label: " + facetLabel + " is outside of 10% margin of error. " +
+                                    "Expected: " + facet.getValue() + "; Got: " + facetCount + "; Ratio: " + ratio,
+                            Math.abs(ratio - 1) < 0.1);
+                });
+            }
+
+            try {
+                // Verify that the query result is not affected by the facet sampling
+                int rowCounter = 0;
+                RowIterator rows = getQueryResult(null).getRows();
+                while (rows.hasNext()) {
+                    rows.nextRow();
+                    rowCounter++;
+                }
+                assertEquals("Unexpected number of rows", 3000, rowCounter);
+            } catch (RepositoryException e) {
+                throw new RuntimeException(e);
+            }
+        });
     }
 
     @Test
@@ -196,11 +218,14 @@
 
         createDataset(NUM_LEAF_NODES_FOR_SMALL_DATASET);
 
-        assertEventually(() -> assertEquals("Unexpected number of facets", actualAclLabelCount.size(), getFacets().size()));
+        assertEventually(() -> {
+            Map<String, Integer> facets = getFacets();
+            assertEquals("Unexpected number of facets", actualAclLabelCount.size(), facets.size());
 
-        // Since the hit count is less than sample size -> flow should have switched to secure facet count instead of statistical
-        // and thus the count should be exactly equal
-        assertEventually(() -> assertEquals(actualAclLabelCount, getFacets()));
+            // Since the hit count is less than sample size -> flow should have switched to secure facet count instead of statistical
+            // and thus the count should be exactly equal
+            assertEquals(actualAclLabelCount, facets);
+        });
     }
 
     @Test
@@ -242,19 +267,16 @@
         assertEventually(() -> {
             Map<String, Integer> facets = getFacets();
             assertEquals("Unexpected number of facets", actualAclLabelCount.size(), facets.size());
-        });
 
-        for (Map.Entry<String, Integer> facet : actualAclLabelCount.entrySet()) {
-
-            assertEventually(() -> {
+            for (Map.Entry<String, Integer> facet : actualAclLabelCount.entrySet()) {
                 String facetLabel = facet.getKey();
-                int facetCount = getFacets().get(facetLabel);
+                int facetCount = facets.get(facetLabel);
                 float ratio = ((float) facetCount) / facet.getValue();
                 assertTrue("Facet count for label: " + facetLabel + " is outside of 10% margin of error. " +
                                 "Expected: " + facet.getValue() + "; Got: " + facetCount + "; Ratio: " + ratio,
                         Math.abs(ratio - 1) < 0.1);
-            });
-        }
+            }
+        });
     }
 
     @Test
@@ -279,18 +301,16 @@
         assertEventually(() -> {
             Map<String, Integer> facets = getFacets();
             assertEquals("Unexpected number of facets", actualLabelCount.size(), facets.size());
-        });
 
-        for (Map.Entry<String, Integer> facet : actualLabelCount.entrySet()) {
-            assertEventually(() -> {
+            for (Map.Entry<String, Integer> facet : actualLabelCount.entrySet()) {
                 String facetLabel = facet.getKey();
-                int facetCount = getFacets().get(facetLabel);
+                int facetCount = facets.get(facetLabel);
                 float ratio = ((float) facetCount) / facet.getValue();
                 assertTrue("Facet count for label: " + facetLabel + " is outside of 5% margin of error. " +
                                 "Expected: " + facet.getValue() + "; Got: " + facetCount + "; Ratio: " + ratio,
                         Math.abs(ratio - 1) < 0.05);
-            });
-        }
+            }
+        });
     }
 
     private Map<String, Integer> getFacets() {
@@ -308,19 +328,7 @@
     }
 
     private Map<String, Integer> getFacets(String path) {
-        String pathCons = "";
-        if (path != null) {
-            pathCons = " AND ISDESCENDANTNODE('" + path + "')";
-        }
-        String query = "SELECT [rep:facet(foo)], [rep:facet(bar)], [rep:facet(baz)] FROM [nt:base] WHERE [cons] = 'val'" + pathCons;
-        Query q;
-        QueryResult queryResult;
-        try {
-            q = qm.createQuery(query, Query.JCR_SQL2);
-            queryResult = q.execute();
-        } catch (RepositoryException e) {
-            throw new RuntimeException(e);
-        }
+        QueryResult queryResult = getQueryResult(path);
         long start = LOG_PERF.start("Getting the Facet Results...");
         FacetResult facetResult = new FacetResult(queryResult);
         LOG_PERF.end(start, -1, "Facet Results fetched");
@@ -331,6 +339,23 @@
                 .collect(Collectors.toMap(FacetResult.Facet::getLabel, FacetResult.Facet::getCount));
     }
 
+    private QueryResult getQueryResult(String path) {
+        String pathCons = "";
+        if (path != null) {
+            pathCons = " AND ISDESCENDANTNODE('" + path + "')";
+        }
+        String query = "SELECT [jcr:path], [rep:facet(foo)], [rep:facet(bar)], [rep:facet(baz)] FROM [nt:base] WHERE [cons] = 'val'" + pathCons;
+        Query q;
+        QueryResult queryResult;
+        try {
+            q = qm.createQuery(query, Query.JCR_SQL2);
+            queryResult = q.execute();
+        } catch (RepositoryException e) {
+            throw new RuntimeException(e);
+        }
+        return queryResult;
+    }
+
     protected void assertEventually(Runnable r) {
         TestUtil.assertEventually(r, ((repositoryOptionsUtil.isAsync() ? repositoryOptionsUtil.defaultAsyncIndexingTimeInSeconds : 0) + 3000) * 5);
     }
diff --git a/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/search/spi/query/FulltextIndexTest.java b/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/search/spi/query/FulltextIndexTest.java
index 316081b..17dc399 100644
--- a/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/search/spi/query/FulltextIndexTest.java
+++ b/oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/search/spi/query/FulltextIndexTest.java
@@ -59,6 +59,19 @@
         assertEquals("jcr:primaryType", field);
     }
 
+    @Test
+    public void testConvertParseFacetField() {
+        assertEquals("rep:facet(text)",
+                FulltextIndex.convertFacetFieldNameToColumnName(
+                        "text"));
+        assertEquals("rep:facet(jcr:title)",
+                FulltextIndex.convertFacetFieldNameToColumnName(
+                        "jcr:title"));
+        assertEquals("rep:facet(jcr:primaryType)",
+                FulltextIndex.convertFacetFieldNameToColumnName(
+                        "jcr:primaryType"));
+    }
+
     /**
      * Test that we can read the rows first, and then read the data from the rows.
      */
diff --git a/oak-segment-remote/pom.xml b/oak-segment-remote/pom.xml
index 04385cc..0be3392 100644
--- a/oak-segment-remote/pom.xml
+++ b/oak-segment-remote/pom.xml
@@ -151,9 +151,9 @@
             <scope>test</scope>
         </dependency>
         <dependency>
-            <groupId>com.github.kstyrc</groupId>
+            <groupId>com.github.codemonstur</groupId>
             <artifactId>embedded-redis</artifactId>
-            <version>0.6</version>
+            <version>1.0.0</version>
             <scope>test</scope>
         </dependency>
         <dependency>
diff --git a/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/persistentcache/AbstractPersistentCacheTest.java b/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/persistentcache/AbstractPersistentCacheTest.java
index 3ba8fce..529dc24 100644
--- a/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/persistentcache/AbstractPersistentCacheTest.java
+++ b/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/persistentcache/AbstractPersistentCacheTest.java
@@ -19,6 +19,7 @@
 
 import org.apache.jackrabbit.oak.commons.Buffer;
 import org.apache.jackrabbit.oak.segment.spi.persistence.persistentcache.AbstractPersistentCache;
+import org.junit.Ignore;
 import org.junit.Test;
 
 import java.util.*;
@@ -67,6 +68,7 @@
     }
 
     @Test
+    @Ignore("OAK-10543")
     public void writeAndReadManySegments() {
         final List<TestSegment> testSegments = new ArrayList<>(SEGMENTS);
         final List<Map<String, Buffer>> segmentsRead = new ArrayList<>(THREADS);
diff --git a/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/persistentcache/PersistentRedisCacheTest.java b/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/persistentcache/PersistentRedisCacheTest.java
index fe7c0a8..17804ed 100644
--- a/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/persistentcache/PersistentRedisCacheTest.java
+++ b/oak-segment-remote/src/test/java/org/apache/jackrabbit/oak/segment/remote/persistentcache/PersistentRedisCacheTest.java
@@ -20,14 +20,15 @@
 import org.apache.jackrabbit.oak.commons.Buffer;
 import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter;
 import org.junit.After;
+import org.junit.Assume;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 
-import redis.embedded.RedisExecProvider;
 import redis.embedded.RedisServer;
+import redis.embedded.core.ExecutableProviderBuilder;
 
 import java.io.File;
+import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.StandardCopyOption;
@@ -41,27 +42,30 @@
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
 
 public class PersistentRedisCacheTest extends AbstractPersistentCacheTest {
 
+    private static final String REDIS_HOST = "127.0.0.1";
+
     private RedisServer redisServer;
     private IOMonitorAdapter ioMonitorAdapter;
 
     @Before
     public void setUp() throws Exception {
-        Path redisTempExecutable = RedisExecProvider.defaultProvider().get().toPath();
+        Path redisTempExecutable = new ExecutableProviderBuilder().addProvidedVersions().build().get().toPath();
         Path redisTargetExecutable = new File("target", redisTempExecutable.getFileName().toString()).toPath();
         Files.copy(redisTempExecutable, redisTargetExecutable, StandardCopyOption.REPLACE_EXISTING);
-        RedisExecProvider execProvider = mock(RedisExecProvider.class);
-        when(execProvider.get()).thenReturn(redisTargetExecutable.toFile());
-        redisServer = RedisServer.builder().setting("maxmemory 768mb").redisExecProvider(execProvider).build();
-        redisServer.start();
+        redisServer = RedisServer.newRedisServer().setting("maxmemory 768mb").bind(REDIS_HOST).executableProvider(redisTargetExecutable::toFile).build();
+        try {
+            redisServer.start();
+        } catch (IOException e) {
+            Assume.assumeNoException(e);
+        }
         int port = redisServer.ports().get(0);
         ioMonitorAdapter = mock(IOMonitorAdapter.class);
 
         persistentCache = new PersistentRedisCache(
-                "localhost",
+                REDIS_HOST,
                 port,
                 -1,
                 10000,
@@ -75,7 +79,7 @@
     }
 
     @After
-    public void tearDown() {
+    public void tearDown() throws IOException {
         redisServer.stop();
     }
 
@@ -99,10 +103,4 @@
 
         verify(ioMonitorAdapter, times(1)).afterSegmentRead(any(), eq(msb), eq(lsb), anyInt(), anyLong());
     }
-
-    @Test
-    @Ignore("OAK-10543")
-    public void writeAndReadManySegments() {
-        super.writeAndReadManySegments();
-    }
 }
\ No newline at end of file
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreBackupImpl.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreBackupImpl.java
index d2a1adc..d314447 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreBackupImpl.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreBackupImpl.java
@@ -38,6 +38,7 @@
 import org.apache.jackrabbit.oak.segment.file.FileStore;
 import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
 import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor;
+import org.apache.jackrabbit.oak.segment.file.CompactionWriter;
 import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
 import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
 import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
@@ -86,15 +87,11 @@
                     bufferWriter,
                     backup.getBinariesInlineThreshold()
             );
-            ClassicCompactor compactor = new ClassicCompactor(
-                    backup.getReader(),
-                    writer,
-                    backup.getBlobStore(),
-                    GCNodeWriteMonitor.EMPTY
-            );
+            CompactionWriter compactionWriter = new CompactionWriter(backup.getReader(), backup.getBlobStore(), gen, writer);
+            ClassicCompactor compactor = new ClassicCompactor(compactionWriter, GCNodeWriteMonitor.EMPTY);
             SegmentNodeState head = backup.getHead();
-            SegmentNodeState after = compactor.compact(head, current, head, Canceller.newCanceller());
-            writer.flush();
+            SegmentNodeState after = compactor.compactUp(head, current, Canceller.newCanceller());
+            compactionWriter.flush();
 
             if (after != null) {
                 backup.getRevisions().setHead(head.getRecordId(), after.getRecordId());
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreRestoreImpl.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreRestoreImpl.java
index c4b04ba..cbc0c84 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreRestoreImpl.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/backup/impl/FileStoreRestoreImpl.java
@@ -19,14 +19,12 @@
 
 package org.apache.jackrabbit.oak.backup.impl;
 
-import static org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.defaultGCOptions;
 import static org.apache.jackrabbit.oak.segment.file.FileStoreBuilder.fileStoreBuilder;
 
 import java.io.File;
 import java.io.IOException;
 
 import org.apache.jackrabbit.guava.common.base.Stopwatch;
-import org.apache.jackrabbit.guava.common.base.Suppliers;
 import org.apache.jackrabbit.oak.backup.FileStoreRestore;
 import org.apache.jackrabbit.oak.segment.DefaultSegmentWriter;
 import org.apache.jackrabbit.oak.segment.ClassicCompactor;
@@ -34,9 +32,9 @@
 import org.apache.jackrabbit.oak.segment.SegmentNodeState;
 import org.apache.jackrabbit.oak.segment.SegmentWriter;
 import org.apache.jackrabbit.oak.segment.WriterCacheManager;
-import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions;
 import org.apache.jackrabbit.oak.segment.file.FileStore;
 import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor;
+import org.apache.jackrabbit.oak.segment.file.CompactionWriter;
 import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
 import org.apache.jackrabbit.oak.segment.file.ReadOnlyFileStore;
 import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
@@ -82,15 +80,10 @@
                     bufferWriter,
                     store.getBinariesInlineThreshold()
             );
-            SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
-            ClassicCompactor compactor = new ClassicCompactor(
-                    store.getReader(),
-                    writer,
-                    store.getBlobStore(),
-                    GCNodeWriteMonitor.EMPTY
-            );
-            SegmentNodeState after = compactor.compact(current, head, current, Canceller.newCanceller());
-            writer.flush();
+            CompactionWriter compactionWriter = new CompactionWriter(store.getReader(), store.getBlobStore(), gen, writer);
+            ClassicCompactor compactor = new ClassicCompactor(compactionWriter, GCNodeWriteMonitor.EMPTY);
+            SegmentNodeState after = compactor.compactUp(current, head, Canceller.newCanceller());
+            compactionWriter.flush();
             store.getRevisions().setHead(current.getRecordId(), after.getRecordId());
         } finally {
             restore.close();
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java
index e9d63ce..2b24d14 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CheckpointCompactor.java
@@ -19,6 +19,7 @@
 package org.apache.jackrabbit.oak.segment;
 
 import static java.util.Objects.requireNonNull;
+import static org.apache.jackrabbit.guava.common.base.Preconditions.checkState;
 import static org.apache.jackrabbit.guava.common.collect.Lists.newArrayList;
 import static org.apache.jackrabbit.guava.common.collect.Maps.newHashMap;
 import static org.apache.jackrabbit.guava.common.collect.Maps.newLinkedHashMap;
@@ -26,10 +27,12 @@
 import static org.apache.jackrabbit.oak.commons.PathUtils.getName;
 import static org.apache.jackrabbit.oak.commons.PathUtils.getParentPath;
 import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
-import static org.apache.jackrabbit.oak.segment.CompactorUtils.getStableIdBytes;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Date;
+import java.util.HashMap;
+import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
@@ -37,9 +40,10 @@
 
 import org.apache.jackrabbit.oak.commons.Buffer;
 import org.apache.jackrabbit.oak.plugins.memory.MemoryChildNodeEntry;
+import org.apache.jackrabbit.oak.segment.file.CompactedNodeState;
 import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor;
+import org.apache.jackrabbit.oak.segment.file.CompactionWriter;
 import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
-import org.apache.jackrabbit.oak.spi.blob.BlobStore;
 import org.apache.jackrabbit.oak.spi.gc.GCMonitor;
 import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
 import org.apache.jackrabbit.oak.spi.state.DefaultNodeStateDiff;
@@ -59,124 +63,132 @@
  *     the same checkpoint or root state occur again in a later compaction retry cycle.</li>
  * </ul>
  */
-public class CheckpointCompactor implements Compactor {
-    @NotNull
-    protected final GCMonitor gcListener;
+public class CheckpointCompactor extends Compactor {
+    protected final @NotNull GCMonitor gcListener;
 
-    @NotNull
-    private final Map<NodeState, NodeState> cpCache = newHashMap();
+    private final @NotNull Map<NodeState, CompactedNodeState> cpCache = new HashMap<>();
 
-    @NotNull
-    protected final ClassicCompactor compactor;
-
-    @NotNull
-    private final NodeWriter nodeWriter;
-
-    private interface NodeWriter {
-        @NotNull
-        SegmentNodeState writeNode(@NotNull NodeState node, @Nullable Buffer stableId) throws IOException;
-    }
+    protected final @NotNull ClassicCompactor compactor;
 
     /**
      * Create a new instance based on the passed arguments.
-     * @param gcListener listener receiving notifications about the garbage collection process
-     * @param reader     segment reader used to read from the segments
-     * @param writer     segment writer used to serialise to segments
-     * @param blobStore  the blob store or {@code null} if none
-     * @param compactionMonitor   notification call back for each compacted nodes,
-     *                            properties, and binaries
+     *
+     * @param gcListener        listener receiving notifications about the garbage collection process
+     * @param writer           segment writer used to serialise to segments
+     * @param compactionMonitor notification call back for each compacted nodes,
+     *                          properties, and binaries
      */
     public CheckpointCompactor(
             @NotNull GCMonitor gcListener,
-            @NotNull SegmentReader reader,
-            @NotNull SegmentWriter writer,
-            @Nullable BlobStore blobStore,
+            @NotNull CompactionWriter writer,
             @NotNull GCNodeWriteMonitor compactionMonitor) {
         this.gcListener = gcListener;
-        this.compactor = new ClassicCompactor(reader, writer, blobStore, compactionMonitor);
-        this.nodeWriter = (node, stableId) -> {
-            RecordId nodeId = writer.writeNode(node, stableId);
-            return new SegmentNodeState(reader, writer, blobStore, nodeId);
-        };
+        this.compactor = new ClassicCompactor(writer, compactionMonitor);
     }
 
-    /**
-     * Compact {@code uncompacted} on top of an optional {@code base}.
-     * @param base         the base state to compact against
-     * @param uncompacted  the uncompacted state to compact
-     * @param onto         the state onto which to compact the change between {@code base} and
-     *                     {@code uncompacted}
-     * @return  compacted clone of {@code uncompacted} or {@code null} if cancelled.
-     * @throws IOException
-     */
     @Override
-    @Nullable
-    public SegmentNodeState compact(
-        @NotNull NodeState base,
-        @NotNull NodeState uncompacted,
-        @NotNull NodeState onto,
-        Canceller canceller
+    public @Nullable CompactedNodeState compactDown(
+            @NotNull NodeState before,
+            @NotNull NodeState after,
+            @NotNull Canceller hardCanceller,
+            @NotNull Canceller softCanceller
     ) throws IOException {
-        // Collect a chronologically ordered list of roots for the uncompacted
-        // state. This list consists of all checkpoints followed by the root.
-        LinkedHashMap<String, NodeState> uncompactedRoots = collectRoots(base, uncompacted);
+        Iterator<Entry<String, NodeState>> iterator = collectRoots(before, after).entrySet().iterator();
+        Entry<String, NodeState> entry = iterator.next();
+        String path = entry.getKey();
 
-        // Compact the list of uncompacted roots to a list of compacted roots.
-        LinkedHashMap<String, NodeState> compactedRoots = compact(
-            getRoot(base),
-            uncompactedRoots,
-            getRoot(onto),
-            canceller
-        );
-
-        if (compactedRoots == null) {
-            return null;
-        }
-
-        // Build a compacted super root by replacing the uncompacted roots with
-        // the compacted ones in the original node.
-        NodeBuilder builder = uncompacted.builder();
-        for (Entry<String, NodeState> compactedRoot : compactedRoots.entrySet()) {
-            String path = compactedRoot.getKey();
-            NodeState state = compactedRoot.getValue();
-            NodeBuilder childBuilder = getChild(builder, getParentPath(path));
-            childBuilder.setChildNode(getName(path), state);
-        }
-
-        return nodeWriter.writeNode(builder.getNodeState(), requireNonNull(getStableIdBytes(uncompacted)));
-    }
-
-    @NotNull
-    private static NodeState getRoot(@NotNull NodeState node) {
-        return node.hasChildNode("root")
-            ? node.getChildNode("root")
-            : EMPTY_NODE;
-    }
-
-    /**
-     * Compact a list of uncompacted roots on top of base roots of the same key or
-     * an empty node if none.
-     */
-    @Nullable
-    private LinkedHashMap<String, NodeState> compact(
-        @NotNull NodeState base,
-        @NotNull LinkedHashMap<String, NodeState> uncompactedRoots,
-        @NotNull NodeState onto,
-        Canceller canceller
-    ) throws IOException {
-        LinkedHashMap<String, NodeState> compactedRoots = newLinkedHashMap();
-        for (Entry<String, NodeState> uncompactedRoot : uncompactedRoots.entrySet()) {
-            String path = uncompactedRoot.getKey();
-            NodeState uncompacted = uncompactedRoot.getValue();
-            Result result = compactWithCache(base, uncompacted, onto, path, canceller);
-            if (result == null) {
+        // could already be in cache if compactor is reused
+        CompactedNodeState compacted = cpCache.get(entry.getValue());
+        gcListener.info("compacting {}.", path);
+        if (compacted == null) {
+            compacted = compactDownWithDelegate(getRoot(before), entry.getValue(), hardCanceller, softCanceller);
+            if (compacted == null) {
                 return null;
             }
-            base = result.nextBefore;
-            onto = result.nextOnto;
-            compactedRoots.put(path, result.compacted);
         }
-        return compactedRoots;
+
+        NodeBuilder builder = after.builder();
+        Buffer stableIdBytes = requireNonNull(CompactorUtils.getStableIdBytes(after));
+
+        getChild(builder, getParentPath(path)).setChildNode(getName(path), compacted);
+
+        if (compacted.isComplete()) {
+            cpCache.put(entry.getValue(), compacted);
+        } else {
+            return compactor.writeNodeState(builder.getNodeState(), stableIdBytes, false);
+        }
+
+        before = entry.getValue();
+
+        while (iterator.hasNext()) {
+            entry = iterator.next();
+            path = entry.getKey();
+            gcListener.info("compacting {}.", path);
+
+            compacted = compactWithCache(before, entry.getValue(), compacted, hardCanceller);
+            if (compacted == null) {
+                return null;
+            }
+
+            before = entry.getValue();
+            checkState(compacted.isComplete());
+            getChild(builder, getParentPath(path)).setChildNode(getName(path), compacted);
+
+            if (softCanceller.check().isCancelled()) {
+                return compactor.writeNodeState(builder.getNodeState(), stableIdBytes, false);
+            }
+        }
+
+        return compactor.writeNodeState(builder.getNodeState(), stableIdBytes, true);
+    }
+
+    @Override
+    public @Nullable CompactedNodeState compact(
+            @NotNull NodeState before,
+            @NotNull NodeState after,
+            @NotNull NodeState onto,
+            @NotNull Canceller canceller
+    ) throws IOException {
+        LinkedHashMap<String, NodeState> roots = collectRoots(before, after);
+
+        NodeBuilder builder = after.builder();
+        Buffer stableIdBytes = requireNonNull(CompactorUtils.getStableIdBytes(after));
+
+        before = getRoot(before);
+        onto = getRoot(onto);
+
+        for (Entry<String, NodeState> entry : roots.entrySet()) {
+            String path = entry.getKey();
+            after = entry.getValue();
+            CompactedNodeState compacted = compactWithCache(before, after, onto, canceller);
+            if (compacted == null) {
+                return null;
+            }
+            checkState(compacted.isComplete());
+            getChild(builder, getParentPath(path)).setChildNode(getName(path), compacted);
+            before = after;
+            onto = compacted;
+        }
+
+        return compactor.writeNodeState(builder.getNodeState(), stableIdBytes, true);
+    }
+
+    private @Nullable CompactedNodeState compactWithCache(
+            @NotNull NodeState before,
+            @NotNull NodeState after,
+            @NotNull NodeState onto,
+            @NotNull Canceller canceller
+    ) throws IOException {
+        CompactedNodeState compacted = cpCache.get(after);
+        if (compacted == null) {
+            compacted = compactWithDelegate(before, after, onto, canceller);
+            if (compacted != null) {
+                cpCache.put(after, compacted);
+            }
+        } else {
+            gcListener.info("found checkpoint in cache.");
+        }
+        return compacted;
     }
 
     /**
@@ -184,11 +196,10 @@
      * state from a {@code superRoot}. This list consists of all checkpoints followed by
      * the root.
      */
-    @NotNull
-    private LinkedHashMap<String, NodeState> collectRoots(@NotNull NodeState superRootBefore, @NotNull NodeState superRootAfter) {
-        LinkedHashMap<String, NodeState> roots = newLinkedHashMap();
-
-        List<ChildNodeEntry> checkpoints = newArrayList();
+    private @NotNull LinkedHashMap<String, NodeState> collectRoots(
+            @NotNull NodeState superRootBefore,
+            @NotNull NodeState superRootAfter) {
+        List<ChildNodeEntry> checkpoints = new ArrayList<>();
         superRootAfter.getChildNode("checkpoints").compareAgainstBaseState(
                 superRootBefore.getChildNode("checkpoints"), new DefaultNodeStateDiff() {
                     @Override
@@ -205,11 +216,12 @@
             return Long.compare(c1, c2);
         });
 
+        LinkedHashMap<String, NodeState> roots = new LinkedHashMap<>();
         for (ChildNodeEntry checkpoint : checkpoints) {
             String name = checkpoint.getName();
             NodeState node = checkpoint.getNodeState();
-            gcListener.info("found checkpoint {} created at {}.",
-                name, new Date(node.getLong("created")));
+            gcListener.info("found checkpoint {} created on {}.",
+                    name, new Date(node.getLong("created")));
             roots.put("checkpoints/" + name + "/root", node.getChildNode("root"));
         }
         roots.put("root", superRootAfter.getChildNode("root"));
@@ -217,65 +229,35 @@
         return roots;
     }
 
-    @NotNull
-    private static NodeBuilder getChild(NodeBuilder builder, String path) {
+    private static @NotNull NodeState getRoot(@NotNull NodeState node) {
+        return node.hasChildNode("root") ? node.getChildNode("root") : EMPTY_NODE;
+    }
+
+    private static @NotNull NodeBuilder getChild(NodeBuilder builder, String path) {
         for (String name : elements(path)) {
             builder = builder.getChildNode(name);
         }
         return builder;
     }
 
-    private static class Result {
-            final NodeState compacted;
-            final NodeState nextBefore;
-            final NodeState nextOnto;
-
-            Result(@NotNull NodeState compacted, @NotNull NodeState nextBefore, @NotNull NodeState nextOnto) {
-                this.compacted = compacted;
-                this.nextBefore = nextBefore;
-                this.nextOnto = nextOnto;
-            }
-        }
-
     /**
      * Delegate compaction to another, usually simpler, implementation.
      */
-    @Nullable
-    protected SegmentNodeState compactWithDelegate(
+    protected @Nullable CompactedNodeState compactDownWithDelegate(
+            @NotNull NodeState before,
+            @NotNull NodeState after,
+            @NotNull Canceller hardCanceller,
+            @NotNull Canceller softCanceller
+    ) throws IOException {
+        return compactor.compactDown(before, after, hardCanceller, softCanceller);
+    }
+
+    protected @Nullable CompactedNodeState compactWithDelegate(
             @NotNull NodeState before,
             @NotNull NodeState after,
             @NotNull NodeState onto,
-            Canceller canceller
+            @NotNull Canceller canceller
     ) throws IOException {
         return compactor.compact(before, after, onto, canceller);
     }
-
-    /**
-     * Compact {@code after} against {@code before} on top of {@code onto} unless
-     * {@code after} has been compacted before and is found in the cache. In this
-     * case the cached version of the previously compacted {@code before} is returned.
-     */
-    @Nullable
-    private Result compactWithCache(
-        @NotNull NodeState before,
-        @NotNull NodeState after,
-        @NotNull NodeState onto,
-        @NotNull String path,
-        Canceller canceller
-    ) throws IOException {
-        gcListener.info("compacting {}.", path);
-        NodeState compacted = cpCache.get(after);
-        if (compacted == null) {
-            compacted = compactWithDelegate(before, after, onto, canceller);
-            if (compacted == null) {
-                return null;
-            } else {
-                cpCache.put(after, compacted);
-                return new Result(compacted, after, compacted);
-            }
-        } else {
-            gcListener.info("found {} in cache.", path);
-            return new Result(compacted, before, onto);
-        }
-    }
 }
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ClassicCompactor.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ClassicCompactor.java
index 61a2fb6..2d5a3ea 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ClassicCompactor.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ClassicCompactor.java
@@ -28,6 +28,7 @@
 import static org.apache.jackrabbit.oak.plugins.memory.PropertyStates.createProperty;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.jackrabbit.oak.api.Blob;
@@ -35,9 +36,12 @@
 import org.apache.jackrabbit.oak.api.Type;
 import org.apache.jackrabbit.oak.commons.Buffer;
 import org.apache.jackrabbit.oak.plugins.memory.MemoryNodeBuilder;
+import org.apache.jackrabbit.oak.plugins.memory.ModifiedNodeState;
+import org.apache.jackrabbit.oak.segment.file.CompactedNodeState;
 import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor;
+import org.apache.jackrabbit.oak.segment.file.CompactionWriter;
 import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
-import org.apache.jackrabbit.oak.spi.blob.BlobStore;
+import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
 import org.apache.jackrabbit.oak.spi.state.NodeState;
 import org.apache.jackrabbit.oak.spi.state.NodeStateDiff;
 import org.jetbrains.annotations.NotNull;
@@ -51,7 +55,7 @@
  * A node can either be compacted on its own or alternatively the difference between
  * two nodes can be compacted on top of an already compacted node.
  */
-public class ClassicCompactor implements Compactor {
+public class ClassicCompactor extends Compactor {
 
     /**
      * Number of content updates that need to happen before the updates
@@ -60,127 +64,130 @@
     static final int UPDATE_LIMIT =
             Integer.getInteger("compaction.update.limit", 10000);
 
-    @NotNull
-    private final SegmentWriter writer;
+    private final @NotNull CompactionWriter writer;
 
-    @NotNull
-    private final SegmentReader reader;
-
-    @Nullable
-    private final BlobStore blobStore;
-
-    @NotNull
-    private final GCNodeWriteMonitor compactionMonitor;
+    private final @NotNull GCNodeWriteMonitor compactionMonitor;
 
     /**
      * Create a new instance based on the passed arguments.
-     * @param reader     segment reader used to read from the segments
      * @param writer     segment writer used to serialise to segments
-     * @param blobStore  the blob store or {@code null} if none
      * @param compactionMonitor   notification call back for each compacted nodes,
      *                            properties, and binaries
      */
     public ClassicCompactor(
-            @NotNull SegmentReader reader,
-            @NotNull SegmentWriter writer,
-            @Nullable BlobStore blobStore,
+            @NotNull CompactionWriter writer,
             @NotNull GCNodeWriteMonitor compactionMonitor) {
         this.writer = checkNotNull(writer);
-        this.reader = checkNotNull(reader);
-        this.blobStore = blobStore;
         this.compactionMonitor = checkNotNull(compactionMonitor);
     }
 
-    /**
-     * Compact a given {@code state}
-     * @param state  the node state to compact
-     * @return       the compacted node state or {@code null} if cancelled.
-     * @throws IOException
-     */
-    @Nullable
-    public SegmentNodeState compact(@NotNull NodeState state, Canceller canceller) throws IOException {
-        return compact(EMPTY_NODE, state, EMPTY_NODE, canceller);
+    @Override
+    public @Nullable CompactedNodeState compactDown(
+            @NotNull NodeState before,
+            @NotNull NodeState after,
+            @NotNull Canceller hardCanceller,
+            @NotNull Canceller softCanceller
+    ) throws IOException {
+        return compact(before, after, after, hardCanceller, softCanceller);
     }
 
-    /**
-     * compact the differences between {@code after} and {@code before} on top of {@code ont}.
-     * @param before   the node state to diff against from {@code after}
-     * @param after    the node state diffed against {@code before}
-     * @param onto     the node state compacted onto
-     * @return         the compacted node state or {@code null} if cancelled.
-     * @throws IOException
-     */
-    @Nullable
-    public SegmentNodeState compact(
+    @Override
+    public @Nullable CompactedNodeState compact(
+            @NotNull NodeState before,
+            @NotNull NodeState after,
+            @NotNull NodeState onto,
+            @NotNull Canceller canceller
+    ) throws IOException {
+        return compact(before, after, onto, canceller, Canceller.newCanceller());
+    }
+
+    private @Nullable CompactedNodeState compact(
         @NotNull NodeState before,
         @NotNull NodeState after,
         @NotNull NodeState onto,
-        Canceller canceller
+        @NotNull Canceller hardCanceller,
+        @NotNull Canceller softCanceller
     ) throws IOException {
-        checkNotNull(before);
-        checkNotNull(after);
-        checkNotNull(onto);
-        return new CompactDiff(onto, canceller).diff(before, after);
+        CompactedNodeState compactedState = getPreviouslyCompactedState(after);
+        if (compactedState == null) {
+            compactedState = new CompactDiff(onto, hardCanceller, softCanceller).diff(before, after);
+        }
+        return compactedState;
     }
 
-    protected SegmentNodeState writeNodeState(NodeState nodeState, Buffer stableIdBytes) throws IOException {
-        RecordId nodeId = writer.writeNode(nodeState, stableIdBytes);
-        compactionMonitor.onNode();
-        return new SegmentNodeState(reader, writer, blobStore, nodeId);
+    protected @Nullable CompactedNodeState writeNodeState(
+            @NotNull NodeState nodeState,
+            @Nullable Buffer stableIdBytes,
+            boolean complete
+    ) throws IOException {
+        if (complete) {
+            CompactedNodeState compacted = writer.writeFullyCompactedNode(nodeState, stableIdBytes);
+            compactionMonitor.onNode();
+            return compacted;
+        } else {
+            return writer.writePartiallyCompactedNode(nodeState, stableIdBytes);
+        }
+    }
+
+    protected @Nullable CompactedNodeState getPreviouslyCompactedState(NodeState nodeState) {
+        return writer.getPreviouslyCompactedState(nodeState);
     }
 
     private class CompactDiff implements NodeStateDiff {
-        @NotNull
-        private MemoryNodeBuilder builder;
-
-        @NotNull
-        private final NodeState base;
-
-        private final Canceller canceller;
-
-        @Nullable
-        private IOException exception;
-
+        private final @NotNull NodeState base;
+        private final @NotNull Canceller hardCanceller;
+        private final @NotNull Canceller softCanceller;
+        private final @NotNull List<PropertyState> modifiedProperties = new ArrayList<>();
+        private @NotNull NodeBuilder builder;
+        private @Nullable IOException exception;
         private long modCount;
 
         private void updated() throws IOException {
             if (++modCount % UPDATE_LIMIT == 0) {
-                RecordId newBaseId = writer.writeNode(builder.getNodeState(), null);
-                SegmentNodeState newBase = new SegmentNodeState(reader, writer, blobStore, newBaseId);
+                SegmentNodeState newBase = writeNodeState(builder.getNodeState(), null, false);
+                checkNotNull(newBase);
                 builder = new MemoryNodeBuilder(newBase);
             }
         }
 
-        CompactDiff(@NotNull NodeState base, Canceller canceller) {
-            this.builder = new MemoryNodeBuilder(checkNotNull(base));
-            this.canceller = canceller;
-            this.base = base;
+        CompactDiff(@NotNull NodeState base, @NotNull Canceller hardCanceller, @NotNull Canceller softCanceller) {
+            this.base = checkNotNull(base);
+            this.builder = new MemoryNodeBuilder(base);
+            this.hardCanceller = checkNotNull(hardCanceller);
+            this.softCanceller = checkNotNull(softCanceller);
         }
 
-        @Nullable
-        SegmentNodeState diff(@NotNull NodeState before, @NotNull NodeState after) throws IOException {
-            boolean success = after.compareAgainstBaseState(before,
-                    new CancelableDiff(this, () -> canceller.check().isCancelled()));
+        private @NotNull CancelableDiff newCancelableDiff() {
+            return new CancelableDiff(this, () ->
+                    softCanceller.check().isCancelled() || hardCanceller.check().isCancelled());
+        }
+
+        @Nullable CompactedNodeState diff(@NotNull NodeState before, @NotNull NodeState after) throws IOException {
+            boolean success = after.compareAgainstBaseState(before, newCancelableDiff());
             if (exception != null) {
                 throw new IOException(exception);
             } else if (success) {
+                // delay property compaction until the end in case compaction is cancelled
+                modifiedProperties.forEach(property -> builder.setProperty(compact(property)));
                 NodeState nodeState = builder.getNodeState();
-                checkState(modCount == 0 || !(nodeState instanceof SegmentNodeState));
-                return writeNodeState(nodeState, CompactorUtils.getStableIdBytes(after));
-            } else {
+                checkState(modCount == 0 || nodeState instanceof ModifiedNodeState);
+                return writeNodeState(nodeState, CompactorUtils.getStableIdBytes(after), true);
+            } else if (hardCanceller.check().isCancelled()) {
                 return null;
+            } else {
+                return writeNodeState(builder.getNodeState(), CompactorUtils.getStableIdBytes(after), false);
             }
         }
 
         @Override
         public boolean propertyAdded(@NotNull PropertyState after) {
-            builder.setProperty(compact(after));
+            modifiedProperties.add(after);
             return true;
         }
 
         @Override
         public boolean propertyChanged(@NotNull PropertyState before, @NotNull PropertyState after) {
-            builder.setProperty(compact(after));
+            modifiedProperties.add(after);
             return true;
         }
 
@@ -190,17 +197,17 @@
             return true;
         }
 
-        @Override
-        public boolean childNodeAdded(@NotNull String name, @NotNull NodeState after) {
+        private boolean childNodeUpdated(@NotNull String name, @NotNull NodeState before, @NotNull NodeState after) {
             try {
-                SegmentNodeState compacted = compact(after, canceller);
-                if (compacted != null) {
-                    updated();
-                    builder.setChildNode(name, compacted);
-                    return true;
-                } else {
+                NodeState child = base.getChildNode(name);
+                NodeState onto = child.exists() ? child : EMPTY_NODE;
+                CompactedNodeState compacted = compact(before, after, onto, hardCanceller, softCanceller);
+                if (compacted == null) {
                     return false;
                 }
+                updated();
+                builder.setChildNode(name, compacted);
+                return compacted.isComplete();
             } catch (IOException e) {
                 exception = e;
                 return false;
@@ -208,20 +215,13 @@
         }
 
         @Override
+        public boolean childNodeAdded(@NotNull String name, @NotNull NodeState after) {
+            return childNodeUpdated(name, EMPTY_NODE, after);
+        }
+
+        @Override
         public boolean childNodeChanged(@NotNull String name, @NotNull NodeState before, @NotNull NodeState after) {
-            try {
-                SegmentNodeState compacted = compact(before, after, base.getChildNode(name), canceller);
-                if (compacted != null) {
-                    updated();
-                    builder.setChildNode(name, compacted);
-                    return true;
-                } else {
-                    return false;
-                }
-            } catch (IOException e) {
-                exception = e;
-                return false;
-            }
+            return childNodeUpdated(name, before, after);
         }
 
         @Override
@@ -237,8 +237,7 @@
         }
     }
 
-    @NotNull
-    protected PropertyState compact(@NotNull PropertyState property) {
+    protected @NotNull PropertyState compact(@NotNull PropertyState property) {
         compactionMonitor.onProperty();
         String name = property.getName();
         Type<?> type = property.getType();
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Compactor.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Compactor.java
index ebc52ff..e0dc103 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Compactor.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Compactor.java
@@ -19,13 +19,72 @@
 
 package org.apache.jackrabbit.oak.segment;
 
+import org.apache.jackrabbit.oak.segment.file.CompactedNodeState;
 import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
 import org.apache.jackrabbit.oak.spi.state.NodeState;
 import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
 
 import java.io.IOException;
 
-public interface Compactor {
-    SegmentNodeState compact(@NotNull NodeState before, @NotNull NodeState after, @NotNull NodeState onto,
-            Canceller canceller) throws IOException;
-}
\ No newline at end of file
+import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
+
+public abstract class Compactor {
+    public final @Nullable CompactedNodeState compactDown(
+            @NotNull NodeState state,
+            @NotNull Canceller hardCanceller,
+            @NotNull Canceller softCanceller
+    ) throws IOException {
+        return compactDown(EMPTY_NODE, state, hardCanceller, softCanceller);
+    }
+
+    /**
+     * compact the differences between {@code after} and {@code before} on top of {@code after}.
+     * @param before        the node state to diff against from {@code after}
+     * @param after         the node state diffed against {@code before}
+     * @param hardCanceller the trigger for hard cancellation, will abandon compaction if cancelled
+     * @param softCanceller the trigger for soft cancellation, will return partially compacted state if cancelled
+     * @return              the compacted node state or {@code null} if hard-cancelled
+     * @throws IOException  will throw exception if any errors occur during compaction
+     */
+    public abstract @Nullable CompactedNodeState compactDown(
+            @NotNull NodeState before,
+            @NotNull NodeState after,
+            @NotNull Canceller hardCanceller,
+            @NotNull Canceller softCanceller
+    ) throws IOException;
+
+    public final @Nullable CompactedNodeState compactUp(
+            @NotNull NodeState state,
+            @NotNull Canceller canceller
+    ) throws IOException {
+        return compactUp(EMPTY_NODE, state, canceller);
+    }
+
+    /**
+     * compact the differences between {@code after} and {@code before} on top of {@code before}.
+     */
+    public final @Nullable CompactedNodeState compactUp(
+            @NotNull NodeState before,
+            @NotNull NodeState after,
+            @NotNull Canceller canceller
+    ) throws IOException {
+        return compact(before, after, before, canceller);
+    }
+
+    /**
+     * compact the differences between {@code after} and {@code before} on top of {@code onto}.
+     * @param before        the node state to diff against from {@code after}
+     * @param after         the node state diffed against {@code before}
+     * @param onto          the node state to compact to apply the diff to
+     * @param canceller     the trigger for hard cancellation, will abandon compaction if cancelled
+     * @return              the compacted node state or {@code null} if hard-cancelled
+     * @throws IOException  will throw exception if any errors occur during compaction
+     */
+    public abstract @Nullable CompactedNodeState compact(
+            @NotNull NodeState before,
+            @NotNull NodeState after,
+            @NotNull NodeState onto,
+            @NotNull Canceller canceller
+    ) throws IOException;
+}
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CompactorUtils.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CompactorUtils.java
index ef0bae8..5160743 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CompactorUtils.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/CompactorUtils.java
@@ -24,8 +24,7 @@
 import org.jetbrains.annotations.Nullable;
 
 final class CompactorUtils {
-    @Nullable
-    static Buffer getStableIdBytes(@NotNull NodeState state) {
+    static @Nullable Buffer getStableIdBytes(@NotNull NodeState state) {
         if (state instanceof SegmentNodeState) {
             return ((SegmentNodeState) state).getStableIdBytes();
         } else {
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ParallelCompactor.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ParallelCompactor.java
index 9a90be2..3830eb7 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ParallelCompactor.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/ParallelCompactor.java
@@ -19,11 +19,14 @@
 package org.apache.jackrabbit.oak.segment;
 
 import org.apache.jackrabbit.oak.api.PropertyState;
+import org.apache.jackrabbit.oak.commons.Buffer;
 import org.apache.jackrabbit.oak.plugins.memory.MemoryNodeBuilder;
+import org.apache.jackrabbit.oak.segment.file.CompactedNodeState;
 import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor;
+import org.apache.jackrabbit.oak.segment.file.CompactionWriter;
 import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
-import org.apache.jackrabbit.oak.spi.blob.BlobStore;
 import org.apache.jackrabbit.oak.spi.gc.GCMonitor;
+import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
 import org.apache.jackrabbit.oak.spi.state.NodeState;
 import org.apache.jackrabbit.oak.spi.state.NodeStateDiff;
 import org.jetbrains.annotations.NotNull;
@@ -31,9 +34,11 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.HashMap;
+import java.util.Collections;
 import java.util.List;
-import java.util.Map;
+import java.util.Map.Entry;
+import java.util.AbstractMap.SimpleImmutableEntry;
+import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -43,26 +48,25 @@
 import static org.apache.jackrabbit.guava.common.base.Preconditions.checkNotNull;
 import static org.apache.jackrabbit.guava.common.base.Preconditions.checkState;
 import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
-import static org.apache.jackrabbit.oak.segment.CompactorUtils.getStableIdBytes;
 
 /**
  * This compactor implementation leverages the tree structure of the repository for concurrent compaction.
- * It explores the tree breadth-first until the target node count is reached. Every node at this depth will be
- * an entry point for asynchronous compaction. After the exploration phase, the main thread will collect
- * these compaction results and write their parents' node state to disk.
+ * It explores the tree breadth-first until the target node count ({@value EXPLORATION_LOWER_LIMIT}) is reached.
+ * Every node at this depth will be an entry point for asynchronous compaction. After the exploration phase,
+ * the main thread will collect these compaction results and write their parents' node state to disk.
  */
 public class ParallelCompactor extends CheckpointCompactor {
     /**
      * Expand repository tree until there are this many nodes for each worker to compact. Tradeoff
-     * between low efficiency of many small tasks and high risk of at least one of the subtrees being
-     * significantly larger than totalSize / numWorkers (unequal work distribution).
+     * between inefficiency of many small tasks and high risk of at least one of the subtrees being
+     * significantly larger than totalSize / {@code numWorkers} (unequal work distribution).
      */
-    private static final int MIN_NODES_PER_WORKER = 1000;
+    private static final int EXPLORATION_LOWER_LIMIT = 10_000;
 
     /**
-     * Stop expansion if tree size grows beyond this many nodes per worker at the latest.
+     * Stop expansion if tree size grows beyond this many nodes.
      */
-    private static final int MAX_NODES_PER_WORKER = 10_000;
+    private static final int EXPLORATION_UPPER_LIMIT = 100_000;
 
     private final int numWorkers;
 
@@ -71,71 +75,47 @@
     /**
      * Manages workers for asynchronous compaction.
      */
-    @Nullable
-    private ExecutorService executorService;
+    private @Nullable ExecutorService executorService;
 
     /**
      * Create a new instance based on the passed arguments.
-     * @param gcListener listener receiving notifications about the garbage collection process
-     * @param reader     segment reader used to read from the segments
-     * @param writer     segment writer used to serialise to segments
-     * @param blobStore  the blob store or {@code null} if none
-     * @param compactionMonitor   notification call back for each compacted nodes, properties, and binaries
-     * @param nThreads   number of threads to use for parallel compaction,
-     *                   negative numbers are interpreted relative to the number of available processors
+     *
+     * @param gcListener        listener receiving notifications about the garbage collection process
+     * @param writer            segment writer used to serialise to segments
+     * @param compactionMonitor notification call back for each compacted nodes, properties, and binaries
+     * @param nThreads          number of threads to use for parallel compaction,
+     *                          negative numbers are interpreted relative to the number of available processors
      */
     public ParallelCompactor(
             @NotNull GCMonitor gcListener,
-            @NotNull SegmentReader reader,
-            @NotNull SegmentWriter writer,
-            @Nullable BlobStore blobStore,
+            @NotNull CompactionWriter writer,
             @NotNull GCNodeWriteMonitor compactionMonitor,
             int nThreads) {
-        super(gcListener, reader, writer, blobStore, compactionMonitor);
-
-        int availableProcessors = Runtime.getRuntime().availableProcessors();
+        super(gcListener, writer, compactionMonitor);
         if (nThreads < 0) {
-            nThreads += availableProcessors + 1;
+            nThreads += Runtime.getRuntime().availableProcessors() + 1;
         }
         numWorkers = Math.max(0, nThreads - 1);
         totalSizeEstimate = compactionMonitor.getEstimatedTotal();
     }
 
     /**
-     * Calculates the minimum number of entry points for asynchronous compaction.
-     */
-    private int getMinNodeCount() {
-        return numWorkers * MIN_NODES_PER_WORKER;
-    }
-
-    private int getMaxNodeCount() {
-        return numWorkers * MAX_NODES_PER_WORKER;
-    }
-
-    /**
-     * Represents structure of repository changes. Tree is built by exploration process and subsequently
-     * used to collect and merge asynchronous compaction results.
+     * Implementation of {@link NodeStateDiff} to represent structure of repository changes.
+     * Tree is built by exploration process and subsequently used to collect and merge
+     * asynchronous compaction results.
      */
     private class CompactionTree implements NodeStateDiff {
-        @NotNull
-        private final NodeState before;
-        @NotNull
-        private final NodeState after;
-        @NotNull
-        private final NodeState onto;
-        @NotNull
-        private final HashMap<String, CompactionTree> modifiedChildren = new HashMap<>();
-        @NotNull
-        private final List<Property> modifiedProperties = new ArrayList<>();
-        @NotNull
-        private final List<String> removedChildNames = new ArrayList<>();
-        @NotNull
-        private final List<String> removedPropertyNames = new ArrayList<>();
+        private final @NotNull NodeState before;
+        private final @NotNull NodeState after;
+        private final @NotNull NodeState onto;
+        private final @NotNull List<Entry<String, CompactionTree>> modifiedChildren = new ArrayList<>();
+        private final @NotNull List<Property> modifiedProperties = new ArrayList<>();
+        private final @NotNull List<String> removedChildNames = new ArrayList<>();
+        private final @NotNull List<String> removedPropertyNames = new ArrayList<>();
         /**
          * Stores result of asynchronous compaction.
          */
-        @Nullable
-        private Future<SegmentNodeState> compactionFuture;
+        private @Nullable Future<CompactedNodeState> compactionFuture;
 
         CompactionTree(@NotNull NodeState before, @NotNull NodeState after, @NotNull NodeState onto) {
             this.before = checkNotNull(before);
@@ -144,24 +124,35 @@
         }
 
         private class Property {
-            @NotNull
-            private final PropertyState state;
+            private final @NotNull PropertyState state;
 
             Property(@NotNull PropertyState state) {
                 this.state = state;
             }
 
-            @NotNull
-            PropertyState compact() {
+            @NotNull PropertyState compact() {
                 return compactor.compact(state);
             }
         }
 
-        boolean compareStates(Canceller canceller) {
+        private boolean compareState(@NotNull Canceller canceller) {
             return after.compareAgainstBaseState(before,
                     new CancelableDiff(this, () -> canceller.check().isCancelled()));
         }
 
+        @Nullable List<Entry<String, CompactionTree>> expand(@NotNull Canceller hardCanceller) {
+            checkState(compactionFuture == null);
+            CompactedNodeState compactedState = compactor.getPreviouslyCompactedState(after);
+            if (compactedState != null) {
+                compactionFuture = CompletableFuture.completedFuture(compactedState);
+                return Collections.emptyList();
+            } else if (compareState(hardCanceller)) {
+                return modifiedChildren;
+            } else {
+                return null;
+            }
+        }
+
         long getEstimatedSize() {
             return ApproximateCounter.getCountSync(after);
         }
@@ -186,15 +177,17 @@
 
         @Override
         public boolean childNodeAdded(String name, NodeState after) {
-            CompactionTree child = new CompactionTree(EMPTY_NODE, after, EMPTY_NODE);
-            modifiedChildren.put(name, child);
+            NodeState childOnto = onto.getChildNode(name);
+            CompactionTree child = new CompactionTree(EMPTY_NODE, after,
+                    childOnto.exists() ? childOnto : EMPTY_NODE);
+            modifiedChildren.add(new SimpleImmutableEntry<>(name, child));
             return true;
         }
 
         @Override
         public boolean childNodeChanged(String name, NodeState before, NodeState after) {
             CompactionTree child = new CompactionTree(before, after, onto.getChildNode(name));
-            modifiedChildren.put(name, child);
+            modifiedChildren.add(new SimpleImmutableEntry<>(name, child));
             return true;
         }
 
@@ -207,20 +200,38 @@
         /**
          * Start asynchronous compaction.
          */
-        boolean compactAsync(Canceller canceller) {
-            if (compactionFuture != null) {
+        void compactAsync(@NotNull Canceller hardCanceller, @Nullable Canceller softCanceller) {
+            if (compactionFuture == null) {
+                checkNotNull(executorService);
+                if (softCanceller == null) {
+                    compactionFuture = executorService.submit(() ->
+                            compactor.compact(before, after, onto, hardCanceller));
+                } else {
+                    checkState(onto.equals(after));
+                    compactionFuture = executorService.submit(() ->
+                            compactor.compactDown(before, after, hardCanceller, softCanceller));
+                }
+            }
+        }
+
+        /**
+         * Will attempt to cancel pending asynchronous compaction. Already running tasks will not be affected.
+         * Waiting for the compactor to return internally after checking Canceller for all scheduled tasks
+         * causes a lot of overhead which can hereby be avoided.
+         */
+        private boolean tryCancelCompaction() {
+            if (compactionFuture != null && compactionFuture.cancel(false)) {
+                compactionFuture = null;
+                return true;
+            } else {
                 return false;
             }
-            checkNotNull(executorService);
-            compactionFuture = executorService.submit(() -> compactor.compact(before, after, onto, canceller));
-            return true;
         }
 
         /**
          * Start synchronous compaction on tree or collect result of asynchronous compaction if it has been started.
          */
-        @Nullable
-        SegmentNodeState compact() throws IOException {
+        @Nullable CompactedNodeState compact() throws IOException {
             if (compactionFuture != null) {
                 try {
                     return compactionFuture.get();
@@ -231,76 +242,84 @@
                 }
             }
 
-            MemoryNodeBuilder builder = new MemoryNodeBuilder(onto);
+            NodeBuilder builder = new MemoryNodeBuilder(onto);
+            Buffer stableIdBytes = CompactorUtils.getStableIdBytes(after);
 
-            for (Map.Entry<String, CompactionTree> entry : modifiedChildren.entrySet()) {
-                SegmentNodeState compactedState = entry.getValue().compact();
+            for (int i = 0; i < modifiedChildren.size(); i++) {
+                Entry<String, CompactionTree> entry = modifiedChildren.get(i);
+                CompactionTree child = entry.getValue();
+                CompactedNodeState compactedState = child.compact();
                 if (compactedState == null) {
                     return null;
                 }
                 builder.setChildNode(entry.getKey(), compactedState);
+
+                // collect results and cancel unfinished tasks in reverse order
+                // increases cancellation success rate since tasks are executed in order
+                if (!compactedState.isComplete()) {
+                    for (int j = modifiedChildren.size()-1; j > i; j--) {
+                        entry = modifiedChildren.get(j);
+                        if (!entry.getValue().tryCancelCompaction()) {
+                            compactedState = entry.getValue().compact();
+                            if (compactedState == null) {
+                                return null;
+                            }
+                            builder.setChildNode(entry.getKey(), compactedState);
+                        }
+                    }
+                    return compactor.writeNodeState(builder.getNodeState(), stableIdBytes, false);
+                }
             }
-            for (String childName : removedChildNames) {
-                builder.getChildNode(childName).remove();
+
+            for (String name : removedChildNames) {
+                builder.getChildNode(name).remove();
             }
+
             for (Property property : modifiedProperties) {
                 builder.setProperty(property.compact());
             }
-            for (String propertyName : removedPropertyNames) {
-                builder.removeProperty(propertyName);
+
+            for (String name : removedPropertyNames) {
+                builder.removeProperty(name);
             }
-            return compactor.writeNodeState(builder.getNodeState(), getStableIdBytes(after));
+
+            return compactor.writeNodeState(builder.getNodeState(), stableIdBytes, true);
         }
     }
 
     /**
-     * Implementation of {@link NodeStateDiff} to build {@link CompactionTree} and start asynchronous compaction on
+     * Handler class to build {@link CompactionTree} and start asynchronous compaction at
      * suitable entry points. Performs what is referred to as the exploration phase in other comments.
      */
     private class CompactionHandler {
-        @NotNull
-        private final NodeState base;
+        private final @NotNull NodeState base;
+        private final @NotNull Canceller hardCanceller;
+        private final @Nullable Canceller softCanceller;
 
-        @NotNull
-        private final Canceller canceller;
-
-        CompactionHandler(@NotNull NodeState base, @NotNull Canceller canceller) {
+        CompactionHandler(@NotNull NodeState base, @NotNull Canceller hardCanceller) {
             this.base = base;
-            this.canceller = canceller;
+            this.hardCanceller = hardCanceller;
+            this.softCanceller = null;
         }
 
-        @Nullable
-        SegmentNodeState diff(@NotNull NodeState before, @NotNull NodeState after) throws IOException {
+        CompactionHandler(@NotNull NodeState base, @NotNull Canceller hardCanceller, @NotNull Canceller softCanceller) {
+            this.base = base;
+            this.hardCanceller = hardCanceller;
+            this.softCanceller = softCanceller;
+        }
+
+        @Nullable CompactedNodeState diff(@NotNull NodeState before, @NotNull NodeState after) throws IOException {
             checkNotNull(executorService);
             checkState(!executorService.isShutdown());
 
             gcListener.info("compacting with {} threads.", numWorkers + 1);
             gcListener.info("exploring content tree to find subtrees for parallel compaction.");
-            gcListener.info("target node count for expansion is {}, based on {} available workers.",
-                    getMinNodeCount(), numWorkers);
+            gcListener.info("target node count for expansion is {}.", EXPLORATION_LOWER_LIMIT);
 
-            CompactionTree compactionTree = new CompactionTree(before, after, base);
-            if (!compactionTree.compareStates(canceller)) {
-                return null;
-            }
+            CompactionTree root = new CompactionTree(before, after, base);
 
-            List<CompactionTree> topLevel = new ArrayList<>();
-            for (Map.Entry<String, CompactionTree> childEntry : compactionTree.modifiedChildren.entrySet()) {
-                switch (childEntry.getKey()) {
-                    // these tend to be the largest directories, others will not be split up
-                    case "content":
-                    case "oak:index":
-                    case "jcr:system":
-                        topLevel.add(childEntry.getValue());
-                        break;
-                    default:
-                        checkState(childEntry.getValue().compactAsync(canceller));
-                        break;
-                }
-            }
-
-            if (diff(1, topLevel)) {
-                SegmentNodeState compacted = compactionTree.compact();
+            if (diff(0, Collections.singletonList(root))) {
+                CompactedNodeState compacted = root.compact();
                 if (compacted != null) {
                     return compacted;
                 }
@@ -320,14 +339,12 @@
         }
 
         private boolean diff(int depth, List<CompactionTree> nodes) {
-            int targetCount = getMinNodeCount();
-            gcListener.info("Found {} nodes at depth {}, target is {}.", nodes.size(), depth, targetCount);
+            gcListener.info("found {} nodes at depth {}.", nodes.size(), depth);
 
-            if (nodes.size() >= targetCount) {
-                nodes.forEach(node -> node.compactAsync(canceller));
+            if (nodes.size() >= EXPLORATION_LOWER_LIMIT) {
+                nodes.forEach(node -> node.compactAsync(hardCanceller, softCanceller));
                 return true;
             } else if (nodes.isEmpty()) {
-                gcListener.info("Amount of changes too small, tree will not be split.");
                 return true;
             }
 
@@ -335,35 +352,63 @@
             for (CompactionTree node : nodes) {
                 long estimatedSize = node.getEstimatedSize();
                 if (estimatedSize != -1 && estimatedSize <= (totalSizeEstimate / numWorkers)) {
-                    checkState(node.compactAsync(canceller));
-                } else if (nextDepth.size() < getMaxNodeCount()) {
-                    if (!node.compareStates(canceller)) {
+                    node.compactAsync(hardCanceller, softCanceller);
+                } else if (nextDepth.size() < EXPLORATION_UPPER_LIMIT) {
+                    List<Entry<String, CompactionTree>> children = node.expand(hardCanceller);
+                    if (children == null) {
                         return false;
                     }
-                    nextDepth.addAll(node.modifiedChildren.values());
+                    children.forEach(entry -> nextDepth.add(entry.getValue()));
                 } else {
                     nextDepth.add(node);
                 }
             }
 
+            if (nextDepth.size() < nodes.size()) {
+                nodes.forEach(node -> node.compactAsync(hardCanceller, softCanceller));
+                return true;
+            }
+
             return diff(depth + 1, nextDepth);
         }
     }
 
-    @Nullable
+    private boolean initializeExecutor() {
+        if (numWorkers <= 0) {
+            gcListener.info("using sequential compaction.");
+            return false;
+        }
+        if (executorService == null || executorService.isShutdown()) {
+            executorService = Executors.newFixedThreadPool(numWorkers);
+        }
+        return true;
+    }
+
     @Override
-    protected SegmentNodeState compactWithDelegate(
+    protected @Nullable CompactedNodeState compactDownWithDelegate(
+            @NotNull NodeState before,
+            @NotNull NodeState after,
+            @NotNull Canceller hardCanceller,
+            @NotNull Canceller softCanceller
+    ) throws IOException {
+        if (initializeExecutor()) {
+            return new CompactionHandler(after, hardCanceller, softCanceller).diff(before, after);
+        } else {
+            return super.compactDownWithDelegate(before, after, hardCanceller, softCanceller);
+        }
+    }
+
+    @Override
+    protected @Nullable CompactedNodeState compactWithDelegate(
             @NotNull NodeState before,
             @NotNull NodeState after,
             @NotNull NodeState onto,
-            Canceller canceller
+            @NotNull Canceller canceller
     ) throws IOException {
-        if (numWorkers <= 0) {
-            gcListener.info("using sequential compaction.");
+        if (initializeExecutor()) {
+            return new CompactionHandler(onto, canceller).diff(before, after);
+        } else {
             return super.compactWithDelegate(before, after, onto, canceller);
-        } else if (executorService == null || executorService.isShutdown()) {
-            executorService = Executors.newFixedThreadPool(numWorkers);
         }
-        return new CompactionHandler(onto, canceller).diff(before, after);
     }
 }
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBlob.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBlob.java
index 3bc438b..9ab2ffe 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBlob.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBlob.java
@@ -30,6 +30,7 @@
 import java.util.Set;
 
 import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.commons.properties.SystemPropertySupplier;
 import org.apache.jackrabbit.oak.plugins.blob.BlobStoreBlob;
 import org.apache.jackrabbit.oak.plugins.blob.datastore.InMemoryDataRecord;
 import org.apache.jackrabbit.oak.plugins.memory.AbstractBlob;
@@ -41,6 +42,10 @@
  * A BLOB (stream of bytes). This is a record of type "VALUE".
  */
 public class SegmentBlob extends Record implements Blob {
+    private static final boolean FAST_EQUALS_SAME_BLOBSTORE = SystemPropertySupplier
+            .create("oak.segment.blob.fastEquals.same.blobstore", false)
+            .formatSetMessage( (name, value) -> String.format("%s set to: %s", name, value) )
+            .get();
 
     @Nullable
     private final BlobStore blobStore;
@@ -192,6 +197,14 @@
                 }
             }
 
+            if (FAST_EQUALS_SAME_BLOBSTORE) {
+                if (blobStore != null && this.blobStore.equals(that.blobStore) && this.isExternal() && that.isExternal()) {
+                    if (this.getBlobId() != null && that.getBlobId() != null) {
+                        return this.getBlobId().equals(that.getBlobId());
+                    }
+                }
+            }
+
             if (this.length() != that.length()) {
                 return false;
             }
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/SegmentWriterFactory.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentWriterFactory.java
similarity index 86%
rename from oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/SegmentWriterFactory.java
rename to oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentWriterFactory.java
index 572f7a8..1eef291 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/SegmentWriterFactory.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentWriterFactory.java
@@ -17,13 +17,10 @@
  * under the License.
  */
 
-package org.apache.jackrabbit.oak.segment.file;
+package org.apache.jackrabbit.oak.segment;
 
-import org.apache.jackrabbit.oak.segment.SegmentWriter;
 import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
 
-interface SegmentWriterFactory {
-
+public interface SegmentWriterFactory {
     SegmentWriter newSegmentWriter(GCGeneration generation);
-
 }
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractCompactionStrategy.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractCompactionStrategy.java
index 4851994..f368f8e 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractCompactionStrategy.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractCompactionStrategy.java
@@ -27,8 +27,6 @@
 import static org.apache.jackrabbit.oak.segment.file.TarRevisions.EXPEDITE_OPTION;
 import static org.apache.jackrabbit.oak.segment.file.TarRevisions.timeout;
 
-import org.apache.jackrabbit.guava.common.base.Function;
-
 import org.apache.jackrabbit.oak.segment.CheckpointCompactor;
 import org.apache.jackrabbit.oak.segment.ClassicCompactor;
 import org.apache.jackrabbit.oak.segment.Compactor;
@@ -44,12 +42,15 @@
 import org.apache.jackrabbit.oak.spi.state.NodeState;
 
 import java.io.IOException;
+import java.util.concurrent.atomic.AtomicReference;
 
 abstract class AbstractCompactionStrategy implements CompactionStrategy {
 
     abstract GCType getCompactionType();
 
-    abstract GCGeneration nextGeneration(GCGeneration current);
+    abstract GCGeneration partialGeneration(GCGeneration current);
+
+    abstract GCGeneration targetGeneration(GCGeneration current);
 
     private CompactionResult compactionSucceeded(
         Context context,
@@ -60,6 +61,20 @@
         return CompactionResult.succeeded(getCompactionType(), generation, context.getGCOptions(), compactedRootId, context.getGCCount());
     }
 
+    private CompactionResult compactionPartiallySucceeded(
+            Context context,
+            GCGeneration generation,
+            RecordId compactedRootId
+    ) {
+        context.getGCListener().compactionSucceeded(generation);
+        return CompactionResult.partiallySucceeded(generation, compactedRootId, context.getGCCount());
+    }
+
+    private static CompactionResult compactionAborted(Context context, GCGeneration generation) {
+        context.getGCListener().compactionFailed(generation);
+        return CompactionResult.aborted(getGcGeneration(context), context.getGCCount());
+    }
+
     private static GCGeneration getGcGeneration(Context context) {
         return context.getRevisions().getHead().getSegmentId().getGcGeneration();
     }
@@ -72,23 +87,21 @@
         return context.getTarFiles().size();
     }
 
-    private static CompactionResult compactionAborted(Context context, GCGeneration generation) {
-        context.getGCListener().compactionFailed(generation);
-        return CompactionResult.aborted(getGcGeneration(context), generation, context.getGCCount());
-    }
-
-    private static SegmentNodeState forceCompact(
+    private static CompactedNodeState forceCompact(
         Context context,
         NodeState base,
         NodeState onto,
         Compactor compactor,
         Canceller canceller
     ) throws InterruptedException {
-        RecordId compactedId = setHead(context, headId -> {
+        AtomicReference<CompactedNodeState> compacted = new AtomicReference<>();
+        context.getRevisions().setHead(headId -> {
             try {
                 PrintableStopwatch t = PrintableStopwatch.createStarted();
-                SegmentNodeState after = compactor.compact(base, context.getSegmentReader().readNode(headId), onto, canceller);
+                NodeState currentHead = context.getSegmentReader().readNode(headId);
+                CompactedNodeState after = compactor.compact(base, currentHead, onto, canceller);
                 if (after != null) {
+                    compacted.set(after);
                     return after.getRecordId();
                 }
                 context.getGCListener().info("compaction cancelled after {}", t);
@@ -97,15 +110,8 @@
                 context.getGCListener().error("error during forced compaction.", e);
                 return null;
             }
-        });
-        if (compactedId == null) {
-            return null;
-        }
-        return context.getSegmentReader().readNode(compactedId);
-    }
-
-    private static RecordId setHead(Context context, Function<RecordId, RecordId> f) throws InterruptedException {
-        return context.getRevisions().setHead(f, timeout(context.getGCOptions().getForceTimeout(), SECONDS));
+        }, timeout(context.getGCOptions().getForceTimeout(), SECONDS));
+        return compacted.get();
     }
 
     private static String formatCompactionType(GCType compactionType) {
@@ -119,68 +125,109 @@
         }
     }
 
+    private boolean setHead(Context context, SegmentNodeState previous, SegmentNodeState head) {
+        return context.getRevisions().setHead(previous.getRecordId(), head.getRecordId(), EXPEDITE_OPTION);
+    }
+
     final CompactionResult compact(Context context, NodeState base) {
         context.getGCListener().info("running {} compaction", formatCompactionType(getCompactionType()));
 
-        GCGeneration nextGeneration = nextGeneration(getGcGeneration(context));
+        GCGeneration baseGeneration = getGcGeneration(context);
+        GCGeneration partialGeneration = partialGeneration(baseGeneration);
+        GCGeneration targetGeneration = targetGeneration(baseGeneration);
+        GCIncrement gcIncrement = new GCIncrement(baseGeneration, partialGeneration, targetGeneration);
 
         try {
             PrintableStopwatch watch = PrintableStopwatch.createStarted();
             context.getGCListener().info(
-                "compaction started, gc options={}, current generation={}, new generation={}",
+                "compaction started, gc options={},\n{}",
                 context.getGCOptions(),
-                getHead(context).getRecordId().getSegment().getGcGeneration(),
-                nextGeneration
+                gcIncrement
             );
             context.getGCListener().updateStatus(COMPACTION.message());
 
             GCJournal.GCJournalEntry gcEntry = context.getGCJournal().read();
             long initialSize = size(context);
 
-            SegmentWriter writer = context.getSegmentWriterFactory().newSegmentWriter(nextGeneration);
+            CompactionWriter writer = new CompactionWriter(
+                    context.getSegmentReader(),
+                    context.getBlobStore(),
+                    gcIncrement,
+                    context.getSegmentWriterFactory());
 
             context.getCompactionMonitor().init(gcEntry.getRepoSize(), gcEntry.getNodes(), initialSize);
 
-            Canceller compactionCanceller = context.getCanceller().withShortCircuit();
+            Canceller hardCanceller = context.getHardCanceller().withShortCircuit();
+            Canceller softCanceller = context.getSoftCanceller().withShortCircuit();
 
             Compactor compactor = newCompactor(context, writer);
+            CompactedNodeState compacted = null;
 
-            SegmentNodeState head = getHead(context);
-            SegmentNodeState compacted = compactor.compact(base, head, base, compactionCanceller);
-            if (compacted == null) {
-                context.getGCListener().warn("compaction cancelled: {}.", compactionCanceller.check().getReason().orElse("unknown reason"));
-                return compactionAborted(context, nextGeneration);
-            }
+            int cycles;
+            boolean success;
+            final int retryCount = Math.max(0, context.getGCOptions().getRetryCount());
 
-            context.getGCListener().info("compaction cycle 0 completed in {}. Compacted {} to {}",
-                watch, head.getRecordId(), compacted.getRecordId());
+            SegmentNodeState head;
+            Flusher flusher = () -> {
+                writer.flush();
+                context.getFlusher().flush();
+            };
 
-            int cycles = 0;
-            boolean success = false;
-            SegmentNodeState previousHead = head;
-            while (cycles < context.getGCOptions().getRetryCount() &&
-                !(success = context.getRevisions().setHead(previousHead.getRecordId(), compacted.getRecordId(), EXPEDITE_OPTION))) {
-                // Some other concurrent changes have been made.
-                // Rebase (and compact) those changes on top of the
-                // compacted state before retrying to set the head.
-                cycles++;
-                context.getGCListener().info("compaction detected concurrent commits while compacting. " +
-                        "Compacting these commits. Cycle {} of {}",
-                    cycles, context.getGCOptions().getRetryCount());
-                context.getGCListener().updateStatus(COMPACTION_RETRY.message() + cycles);
-                PrintableStopwatch cycleWatch = PrintableStopwatch.createStarted();
-
+            do {
                 head = getHead(context);
-                compacted = compactor.compact(previousHead, head, compacted, compactionCanceller);
-                if (compacted == null) {
-                    context.getGCListener().warn("compaction cancelled: {}.", compactionCanceller.check().getReason().orElse("unknown reason"));
-                    return compactionAborted(context, nextGeneration);
+                SegmentNodeState after = (compacted == null) ? head : compacted;
+                Canceller stateSaveTrigger = context.getStateSaveTriggerSupplier().get().withShortCircuit();
+
+                if (stateSaveTrigger.isCancelable()) {
+                    context.getGCListener().info("intermediate state save enabled.");
+                    Canceller saveStateCanceller = softCanceller.withCondition(
+                            "save intermediate compaction state", () -> stateSaveTrigger.check().isCancelled());
+                    compacted = compactor.compactDown(base, after, hardCanceller, saveStateCanceller);
+                } else if (softCanceller.isCancelable()) {
+                    context.getGCListener().info("soft cancellation enabled.");
+                    compacted = compactor.compactDown(base, after, hardCanceller, softCanceller);
+                } else {
+                    compacted = compactor.compactUp(base, after, hardCanceller);
                 }
 
-                context.getGCListener().info("compaction cycle {} completed in {}. Compacted {} against {} to {}",
-                    cycles, cycleWatch, head.getRecordId(), previousHead.getRecordId(), compacted.getRecordId());
-                previousHead = head;
-            }
+                if (compacted == null) {
+                    context.getGCListener().warn("compaction cancelled: {}.",
+                            hardCanceller.check().getReason().orElse("unknown reason"));
+                    return compactionAborted(context, targetGeneration);
+                }
+
+                context.getGCListener().info("compaction cycle 0 completed in {}. Compacted {} to {}",
+                        watch, head.getRecordId(), compacted.getRecordId());
+
+                cycles = 0;
+
+                while (!(success = setHead(context, head, compacted)) && cycles < retryCount) {
+                    // Some other concurrent changes have been made.
+                    // Rebase (and compact) those changes on top of the
+                    // compacted state before retrying to set the head.
+                    cycles++;
+                    context.getGCListener().info("compaction detected concurrent commits while compacting. " +
+                                    "Compacting these commits. Cycle {} of {}", cycles, retryCount);
+                    context.getGCListener().updateStatus(COMPACTION_RETRY.message() + cycles);
+                    PrintableStopwatch cycleWatch = PrintableStopwatch.createStarted();
+
+                    SegmentNodeState newHead = getHead(context);
+                    compacted = compactor.compact(head, newHead,compacted, hardCanceller);
+                    if (compacted == null) {
+                        context.getGCListener().warn("compaction cancelled: {}.",
+                                hardCanceller.check().getReason().orElse("unknown reason"));
+                        return compactionAborted(context, targetGeneration);
+                    }
+
+                    context.getGCListener().info("compaction cycle {} completed in {}. Compacted {} against {} to {}",
+                            cycles, cycleWatch, head.getRecordId(), newHead.getRecordId(), compacted.getRecordId());
+                    head = newHead;
+                }
+
+                if (success) {
+                    flusher.flush();
+                }
+            } while (success && !compacted.isComplete() && !softCanceller.check().isCancelled());
 
             if (!success) {
                 context.getGCListener().info("compaction gave up compacting concurrent commits after {} cycles.", cycles);
@@ -194,12 +241,13 @@
 
                     cycles++;
 
-                    Canceller forcedCompactionCanceller = compactionCanceller
+                    Canceller forcedCompactionCanceller = hardCanceller
                         .withTimeout("forced compaction timeout exceeded", forceTimeout, SECONDS)
                         .withShortCircuit();
-                    compacted = forceCompact(context, previousHead, compacted, compactor, forcedCompactionCanceller);
-                    success = compacted != null;
-                    if (success) {
+                    compacted = forceCompact(context, head, compacted, compactor, forcedCompactionCanceller);
+                    if (compacted != null) {
+                        success = true;
+                        flusher.flush();
                         context.getGCListener().info("compaction succeeded to force compact remaining commits after {}.", forceWatch);
                     } else {
                         Cancellation cancellation = forcedCompactionCanceller.check();
@@ -219,38 +267,40 @@
             if (success) {
                 // Update type of the last compaction before calling methods that could throw an exception.
                 context.getSuccessfulCompactionListener().onSuccessfulCompaction(getCompactionType());
-                writer.flush();
-                context.getFlusher().flush();
-                context.getGCListener().info("compaction succeeded in {}, after {} cycles", watch, cycles);
                 context.getCompactionMonitor().finished();
-                return compactionSucceeded(context, nextGeneration, compacted.getRecordId());
+
+                if (compacted.isComplete()) {
+                    context.getGCListener().info("compaction succeeded in {}, after {} cycles", watch, cycles);
+                    return compactionSucceeded(context, targetGeneration, compacted.getRecordId());
+                } else {
+                    context.getGCListener().info("compaction partially succeeded in {}: {}.",
+                            watch, softCanceller.check().getReason().orElse("unknown reason"));
+                    return compactionPartiallySucceeded(context, partialGeneration, compacted.getRecordId());
+                }
             } else {
                 context.getGCListener().info("compaction failed after {}, and {} cycles", watch, cycles);
-                return compactionAborted(context, nextGeneration);
+                return compactionAborted(context, targetGeneration);
             }
         } catch (InterruptedException e) {
             context.getGCListener().error("compaction interrupted", e);
             currentThread().interrupt();
-            return compactionAborted(context, nextGeneration);
+            return compactionAborted(context, targetGeneration);
         } catch (Throwable e) {
             context.getGCListener().error("compaction encountered an error", e instanceof Exception ? (Exception) e : new Exception(e));
-            return compactionAborted(context, nextGeneration);
+            return compactionAborted(context, targetGeneration);
         }
     }
 
-    private Compactor newCompactor(Context context, SegmentWriter writer) {
+    private Compactor newCompactor(Context context, CompactionWriter writer) {
         CompactorType compactorType = context.getGCOptions().getCompactorType();
         switch (compactorType) {
             case PARALLEL_COMPACTOR:
-                return new ParallelCompactor(context.getGCListener(), context.getSegmentReader(), writer,
-                        context.getBlobStore(), context.getCompactionMonitor(),
+                return new ParallelCompactor(context.getGCListener(), writer, context.getCompactionMonitor(),
                         context.getGCOptions().getConcurrency());
             case CHECKPOINT_COMPACTOR:
-                return new CheckpointCompactor(context.getGCListener(), context.getSegmentReader(), writer,
-                        context.getBlobStore(), context.getCompactionMonitor());
+                return new CheckpointCompactor(context.getGCListener(), writer, context.getCompactionMonitor());
             case CLASSIC_COMPACTOR:
-                return new ClassicCompactor(context.getSegmentReader(), writer, context.getBlobStore(),
-                        context.getCompactionMonitor());
+                return new ClassicCompactor(writer, context.getCompactionMonitor());
             default:
                 throw new IllegalArgumentException("Unknown compactor type: " + compactorType);
             }
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractGarbageCollectionStrategy.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractGarbageCollectionStrategy.java
index 0f418be..6d219c5 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractGarbageCollectionStrategy.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/AbstractGarbageCollectionStrategy.java
@@ -24,12 +24,14 @@
 
 import java.io.IOException;
 import java.util.List;
+import java.util.function.Supplier;
 
 import org.apache.jackrabbit.guava.common.base.Predicate;
 import org.apache.jackrabbit.oak.segment.Revisions;
 import org.apache.jackrabbit.oak.segment.SegmentCache;
 import org.apache.jackrabbit.oak.segment.SegmentReader;
 import org.apache.jackrabbit.oak.segment.SegmentTracker;
+import org.apache.jackrabbit.oak.segment.SegmentWriterFactory;
 import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions;
 import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
 import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
@@ -151,7 +153,7 @@
         return context.getRevisions().getHead().getSegmentId().getGcGeneration();
     }
 
-    private List<String> cleanup(Context context, CompactionResult compactionResult) throws IOException {
+    public List<String> cleanup(Context context, CompactionResult compactionResult) throws IOException {
         return getCleanupStrategy().cleanup(newCleanupStrategyContext(context, compactionResult));
     }
 
@@ -230,11 +232,21 @@
             }
 
             @Override
-            public Canceller getCanceller() {
+            public Canceller getHardCanceller() {
                 return context.getCanceller();
             }
 
             @Override
+            public Canceller getSoftCanceller() {
+                return Canceller.newCanceller();
+            }
+
+            @Override
+            public Supplier<Canceller> getStateSaveTriggerSupplier() {
+                return Canceller::newCanceller;
+            }
+
+            @Override
             public int getGCCount() {
                 return context.getGCCount();
             }
@@ -282,7 +294,7 @@
 
             @Override
             public GCJournal getGCJournal() {
-                return context.getGCJournal();
+                return compactionResult.requiresGCJournalEntry() ? context.getGCJournal() : null;
             }
 
             @Override
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CleanupFirstCompactionStrategy.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CleanupFirstCompactionStrategy.java
index 07c5e2c..c4150f7 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CleanupFirstCompactionStrategy.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CleanupFirstCompactionStrategy.java
@@ -19,20 +19,13 @@
 
 package org.apache.jackrabbit.oak.segment.file;
 
-import static org.apache.jackrabbit.guava.common.collect.Sets.newHashSet;
-import static org.apache.jackrabbit.oak.segment.SegmentId.isDataSegmentId;
 import static org.apache.jackrabbit.oak.segment.compaction.SegmentGCStatus.CLEANUP;
 import static org.apache.jackrabbit.oak.segment.file.PrintableBytes.newPrintableBytes;
 
 import java.io.IOException;
-import java.util.Collection;
 import java.util.List;
-import java.util.Set;
-import java.util.UUID;
 
 import org.apache.jackrabbit.guava.common.base.Joiner;
-import org.apache.jackrabbit.guava.common.base.Predicate;
-import org.apache.jackrabbit.oak.segment.SegmentId;
 import org.apache.jackrabbit.oak.segment.file.tar.CleanupContext;
 import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
 import org.apache.jackrabbit.oak.segment.file.tar.TarFiles;
@@ -76,46 +69,6 @@
 
         PrintableStopwatch watch = PrintableStopwatch.createStarted();
 
-        Predicate<GCGeneration> reclaimer;
-
-        GCGeneration currentGeneration = context.getRevisions().getHead().getSegmentId().getGcGeneration();
-
-        switch (context.getGCOptions().getGCType()) {
-            case FULL:
-                reclaimer = generation -> {
-                    if (generation == null) {
-                        return false;
-                    }
-                    if (generation.getFullGeneration() < currentGeneration.getFullGeneration()) {
-                        return true;
-                    }
-                    if (generation.getFullGeneration() > currentGeneration.getFullGeneration()) {
-                        return true;
-                    }
-                    return generation.getGeneration() < currentGeneration.getGeneration() && !generation.isCompacted();
-                };
-                break;
-            case TAIL:
-                reclaimer = generation -> {
-                    if (generation == null) {
-                        return false;
-                    }
-                    if (generation.getFullGeneration() < currentGeneration.getFullGeneration() - 1) {
-                        return true;
-                    }
-                    if (generation.getFullGeneration() == currentGeneration.getFullGeneration() - 1) {
-                        return !generation.isCompacted();
-                    }
-                    if (generation.getFullGeneration() > currentGeneration.getFullGeneration()) {
-                        return true;
-                    }
-                    return generation.getGeneration() < currentGeneration.getGeneration() && !generation.isCompacted();
-                };
-                break;
-            default:
-                throw new IllegalArgumentException("invalid garbage collection type");
-        }
-
         context.getGCListener().info("pre-compaction cleanup started");
         context.getGCListener().updateStatus(CLEANUP.message());
 
@@ -124,7 +77,7 @@
 
         System.gc();
 
-        TarFiles.CleanupResult cleanupResult = context.getTarFiles().cleanup(newCleanupContext(context, reclaimer));
+        TarFiles.CleanupResult cleanupResult = context.getTarFiles().cleanup(newCleanupContext(context));
 
         if (cleanupResult.isInterrupted()) {
             context.getGCListener().info("cleanup interrupted");
@@ -149,39 +102,37 @@
         return strategy.compact(context);
     }
 
-    private static CleanupContext newCleanupContext(Context context, Predicate<GCGeneration> old) {
-        return new CleanupContext() {
+    private static CleanupContext newCleanupContext(Context context) {
+        GCGeneration currentGeneration = context.getRevisions().getHead().getSegmentId().getGcGeneration();
+        String compactedRoot = context.getGCJournal().read().getRoot();
 
-            private boolean isUnreferencedBulkSegment(UUID id, boolean referenced) {
-                return !isDataSegmentId(id.getLeastSignificantBits()) && !referenced;
-            }
-
-            private boolean isOldDataSegment(UUID id, GCGeneration generation) {
-                return isDataSegmentId(id.getLeastSignificantBits()) && old.apply(generation);
-            }
-
-            @Override
-            public Collection<UUID> initialReferences() {
-                Set<UUID> references = newHashSet();
-                for (SegmentId id : context.getSegmentTracker().getReferencedSegmentIds()) {
-                    if (id.isBulkSegmentId()) {
-                        references.add(id.asUUID());
+        switch (context.getGCOptions().getGCType()) {
+            case FULL:
+                return new DefaultCleanupContext(context.getSegmentTracker(), generation -> {
+                    if (generation == null) {
+                        return false;
                     }
-                }
-                return references;
-            }
-
-            @Override
-            public boolean shouldReclaim(UUID id, GCGeneration generation, boolean referenced) {
-                return isUnreferencedBulkSegment(id, referenced) || isOldDataSegment(id, generation);
-            }
-
-            @Override
-            public boolean shouldFollow(UUID from, UUID to) {
-                return !isDataSegmentId(to.getLeastSignificantBits());
-            }
-
-        };
+                    if (generation.getFullGeneration() < currentGeneration.getFullGeneration()) {
+                        return true;
+                    }
+                    return generation.getGeneration() < currentGeneration.getGeneration() && !generation.isCompacted();
+                }, compactedRoot);
+            case TAIL:
+                return new DefaultCleanupContext(context.getSegmentTracker(), generation -> {
+                    if (generation == null) {
+                        return false;
+                    }
+                    if (generation.getFullGeneration() < currentGeneration.getFullGeneration() - 1) {
+                        return true;
+                    }
+                    if (generation.getFullGeneration() == currentGeneration.getFullGeneration() - 1) {
+                        return !generation.isCompacted();
+                    }
+                    return generation.getGeneration() < currentGeneration.getGeneration() && !generation.isCompacted();
+                }, compactedRoot);
+            default:
+                throw new IllegalArgumentException("invalid garbage collection type");
+        }
     }
 
     private static String toFileNames(@NotNull List<String> files) {
@@ -191,13 +142,4 @@
             return Joiner.on(",").join(files);
         }
     }
-
-    private static GCGeneration getGcGeneration(Context context) {
-        return context.getRevisions().getHead().getSegmentId().getGcGeneration();
-    }
-
-    private static long size(Context context) {
-        return context.getTarFiles().size();
-    }
-
 }
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CompactedNodeState.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CompactedNodeState.java
new file mode 100644
index 0000000..de31888
--- /dev/null
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CompactedNodeState.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.segment.file;
+
+import org.apache.jackrabbit.oak.segment.RecordId;
+import org.apache.jackrabbit.oak.segment.SegmentNodeState;
+import org.apache.jackrabbit.oak.segment.SegmentReader;
+import org.apache.jackrabbit.oak.segment.SegmentWriter;
+import org.apache.jackrabbit.oak.spi.blob.BlobStore;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Simple wrapper class for {@code SegmentNodeState} to keep track of fully and partially compacted nodes.
+ */
+public abstract class CompactedNodeState extends SegmentNodeState {
+    final boolean complete;
+
+    private CompactedNodeState(
+            @NotNull SegmentReader reader,
+            @NotNull SegmentWriter writer,
+            @Nullable BlobStore blobStore,
+            @NotNull RecordId id,
+            boolean complete) {
+        super(reader, writer, blobStore, id);
+        this.complete = complete;
+    }
+
+    final public boolean isComplete() {
+        return complete;
+    }
+
+    final static class FullyCompactedNodeState extends CompactedNodeState {
+        FullyCompactedNodeState(
+                @NotNull SegmentReader reader,
+                @NotNull SegmentWriter writer,
+                @Nullable BlobStore blobStore,
+                @NotNull RecordId id) {
+            super(reader, writer, blobStore, id, true);
+        }
+    }
+
+    final static class PartiallyCompactedNodeState extends CompactedNodeState {
+        PartiallyCompactedNodeState(
+                @NotNull SegmentReader reader,
+                @NotNull SegmentWriter writer,
+                @Nullable BlobStore blobStore,
+                @NotNull RecordId id) {
+            super(reader, writer, blobStore, id, false);
+        }
+    }
+}
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CompactionResult.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CompactionResult.java
index f2a447b..4de088e 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CompactionResult.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CompactionResult.java
@@ -74,6 +74,41 @@
             RecordId getCompactedRootId() {
                 return compactedRootId;
             }
+
+            @Override
+            boolean requiresGCJournalEntry() {
+                return true;
+            }
+        };
+    }
+
+    /**
+     * Result of incremental compaction if the compaction cycle is incomplete.
+     *
+     * @param newGeneration   the generation successfully created by compaction
+     * @param compactedRootId the record id of the root created by compaction
+     */
+    static CompactionResult partiallySucceeded(
+            @NotNull GCGeneration newGeneration,
+            @NotNull final RecordId compactedRootId,
+            int gcCount
+    ) {
+        return new CompactionResult(newGeneration, gcCount) {
+
+            @Override
+            Predicate<GCGeneration> reclaimer() {
+                return generation -> false;
+            }
+
+            @Override
+            boolean isSuccess() {
+                return true;
+            }
+
+            @Override
+            RecordId getCompactedRootId() {
+                return compactedRootId;
+            }
         };
     }
 
@@ -81,19 +116,16 @@
      * Result of an aborted compaction.
      *
      * @param currentGeneration the current generation of the store
-     * @param failedGeneration  the generation that compaction attempted to
-     *                          create
      */
     static CompactionResult aborted(
         @NotNull GCGeneration currentGeneration,
-        @NotNull final GCGeneration failedGeneration,
         int gcCount
     ) {
         return new CompactionResult(currentGeneration, gcCount) {
 
             @Override
             Predicate<GCGeneration> reclaimer() {
-                return Reclaimers.newExactReclaimer(failedGeneration);
+                return Reclaimers.newEmptyReclaimer();
             }
 
             @Override
@@ -181,6 +213,10 @@
         return false;
     }
 
+    boolean requiresGCJournalEntry() {
+        return false;
+    }
+
     /**
      * @return a diagnostic message describing the outcome of this compaction.
      */
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CompactionStrategy.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CompactionStrategy.java
index 73109b2..b7e0784 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CompactionStrategy.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CompactionStrategy.java
@@ -20,10 +20,12 @@
 package org.apache.jackrabbit.oak.segment.file;
 
 import java.io.IOException;
+import java.util.function.Supplier;
 
 import org.apache.jackrabbit.oak.segment.Revisions;
 import org.apache.jackrabbit.oak.segment.SegmentReader;
 import org.apache.jackrabbit.oak.segment.SegmentTracker;
+import org.apache.jackrabbit.oak.segment.SegmentWriterFactory;
 import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions;
 import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
 import org.apache.jackrabbit.oak.segment.file.tar.TarFiles;
@@ -51,7 +53,11 @@
 
         BlobStore getBlobStore();
 
-        Canceller getCanceller();
+        Canceller getHardCanceller();
+
+        Canceller getSoftCanceller();
+
+        Supplier<Canceller> getStateSaveTriggerSupplier();
 
         int getGCCount();
 
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CompactionWriter.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CompactionWriter.java
new file mode 100644
index 0000000..cd20a11
--- /dev/null
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/CompactionWriter.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.segment.file;
+
+import org.apache.jackrabbit.oak.commons.Buffer;
+import org.apache.jackrabbit.oak.segment.RecordId;
+import org.apache.jackrabbit.oak.segment.SegmentNodeState;
+import org.apache.jackrabbit.oak.segment.SegmentReader;
+import org.apache.jackrabbit.oak.segment.SegmentWriter;
+import org.apache.jackrabbit.oak.segment.SegmentWriterFactory;
+import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
+import org.apache.jackrabbit.oak.spi.blob.BlobStore;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+
+import java.io.IOException;
+
+import static org.apache.jackrabbit.oak.segment.file.CompactedNodeState.PartiallyCompactedNodeState;
+import static org.apache.jackrabbit.oak.segment.file.CompactedNodeState.FullyCompactedNodeState;
+
+/**
+ * The CompactionWriter delegates compaction calls to the correct {@code SegmentWriter} based on GCGeneration.
+ */
+public class CompactionWriter {
+    private final @NotNull SegmentReader reader;
+    private final @Nullable BlobStore blobStore;
+    private final @NotNull GCIncrement gcIncrement;
+    private final @NotNull SegmentWriter partialWriter;
+    private final @NotNull SegmentWriter targetWriter;
+
+    public CompactionWriter(
+            @NotNull SegmentReader reader,
+            @Nullable BlobStore blobStore,
+            @NotNull GCGeneration generation,
+            @NotNull SegmentWriter segmentWriter) {
+        this.reader = reader;
+        this.blobStore = blobStore;
+        this.gcIncrement = new GCIncrement(generation, generation, generation);
+        this.partialWriter = segmentWriter;
+        this.targetWriter = segmentWriter;
+    }
+
+    public CompactionWriter(
+            @NotNull SegmentReader reader,
+            @Nullable BlobStore blobStore,
+            @NotNull GCIncrement gcIncrement,
+            @NotNull SegmentWriterFactory segmentWriterFactory) {
+        this.reader = reader;
+        this.blobStore = blobStore;
+        this.gcIncrement = gcIncrement;
+        this.partialWriter = gcIncrement.createPartialWriter(segmentWriterFactory);
+        this.targetWriter = gcIncrement.createTargetWriter(segmentWriterFactory);
+    }
+
+    public @NotNull FullyCompactedNodeState writeFullyCompactedNode(
+            @NotNull NodeState nodeState,
+            @Nullable Buffer stableId
+    ) throws IOException {
+        RecordId nodeId = targetWriter.writeNode(nodeState, stableId);
+        return new FullyCompactedNodeState(reader, targetWriter, blobStore, nodeId);
+    }
+
+    public @Nullable PartiallyCompactedNodeState writePartiallyCompactedNode(
+            @NotNull NodeState nodeState,
+            @Nullable Buffer stableId
+    ) throws IOException {
+        RecordId nodeId = partialWriter.writeNode(nodeState, stableId);
+        return new PartiallyCompactedNodeState(reader, partialWriter, blobStore, nodeId);
+    }
+
+    public void flush() throws IOException {
+        partialWriter.flush();
+        targetWriter.flush();
+    }
+
+    public @Nullable FullyCompactedNodeState getPreviouslyCompactedState(NodeState nodeState) {
+        if (!(nodeState instanceof SegmentNodeState)) {
+            return null;
+        }
+        SegmentNodeState segmentNodeState = (SegmentNodeState) nodeState;
+        if (!gcIncrement.isFullyCompacted(segmentNodeState.getGcGeneration())) {
+            return null;
+        }
+        RecordId nodeId = segmentNodeState.getRecordId();
+        return new FullyCompactedNodeState(reader, targetWriter, blobStore, nodeId);
+    }
+}
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/DefaultCleanupContext.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/DefaultCleanupContext.java
new file mode 100644
index 0000000..388247d
--- /dev/null
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/DefaultCleanupContext.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.segment.file;
+
+import org.apache.jackrabbit.oak.segment.RecordId;
+import org.apache.jackrabbit.oak.segment.SegmentId;
+import org.apache.jackrabbit.oak.segment.SegmentTracker;
+import org.apache.jackrabbit.oak.segment.file.tar.CleanupContext;
+import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+
+import java.util.Set;
+import java.util.UUID;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+import static org.apache.jackrabbit.oak.segment.SegmentId.isDataSegmentId;
+
+class DefaultCleanupContext implements CleanupContext {
+    private final @NotNull SegmentTracker segmentTracker;
+    private final @NotNull Predicate<GCGeneration> old;
+    private final @Nullable UUID rootSegmentUUID;
+    private boolean aheadOfRoot;
+
+    DefaultCleanupContext(@NotNull SegmentTracker tracker, @NotNull Predicate<GCGeneration> old, @NotNull String compactedRoot) {
+        this.segmentTracker = tracker;
+        this.old = old;
+
+        RecordId rootId =  RecordId.fromString(tracker, compactedRoot);
+        if (rootId.equals(RecordId.NULL)) {
+            rootSegmentUUID = null;
+            aheadOfRoot = false;
+        } else {
+            rootSegmentUUID = rootId.getSegmentId().asUUID();
+            aheadOfRoot = true;
+        }
+    }
+
+    /**
+     * Reference-based reclamation for bulk segments.
+     */
+    private boolean isUnreferencedBulkSegment(UUID id, boolean referenced) {
+        return !isDataSegmentId(id.getLeastSignificantBits()) && !referenced;
+    }
+
+    /**
+     * Generational reclamation for data segments.
+     */
+    private boolean isOldDataSegment(UUID id, GCGeneration generation) {
+        return isDataSegmentId(id.getLeastSignificantBits()) && old.test(generation);
+    }
+
+    /**
+     * Special reclamation for unused future segments. Aborting compaction will lead to persisted, but unused
+     * TAR entries with higher generation than the root and set compacted flag. Due to incremental compaction,
+     * a purely generational approach for this cleanup is no longer feasible as segments of higher generation
+     * than the root may be part of a valid repository tree. Observation: compacted segments are unused iff
+     * they are persisted after the last compacted root. This context relies on the cleanup algorithm to mark
+     * TAR entries in reverse order and will consider each compacted segment to be reclaimable until the root
+     * has been encountered, i.e. as long as {@code aheadOfRoot} is true.
+     */
+    private boolean isDanglingFutureSegment(UUID id, GCGeneration generation) {
+        return (aheadOfRoot &= !id.equals(rootSegmentUUID)) && generation.isCompacted();
+    }
+
+    /**
+     * Returns IDs of directly referenced segments. Since reference-based reclamation
+     * is only used for bulk segments, data segment IDs are filtered out.
+     */
+    @Override
+    public Set<UUID> initialReferences() {
+        return segmentTracker.getReferencedSegmentIds().stream()
+                .filter(SegmentId::isBulkSegmentId)
+                .map(SegmentId::asUUID)
+                .collect(Collectors.toSet());
+    }
+
+    @Override
+    public boolean shouldReclaim(UUID id, GCGeneration generation, boolean referenced) {
+        return isDanglingFutureSegment(id, generation) || isUnreferencedBulkSegment(id, referenced) ||
+                isOldDataSegment(id, generation);
+    }
+
+    @Override
+    public boolean shouldFollow(UUID from, UUID to) {
+        return !isDataSegmentId(to.getLeastSignificantBits());
+    }
+}
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/DefaultCleanupStrategy.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/DefaultCleanupStrategy.java
index 9e202a2..8cb636d 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/DefaultCleanupStrategy.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/DefaultCleanupStrategy.java
@@ -19,20 +19,14 @@
 
 package org.apache.jackrabbit.oak.segment.file;
 
-import static org.apache.jackrabbit.guava.common.collect.Sets.newHashSet;
-import static org.apache.jackrabbit.oak.segment.SegmentId.isDataSegmentId;
 import static org.apache.jackrabbit.oak.segment.compaction.SegmentGCStatus.CLEANUP;
 import static org.apache.jackrabbit.oak.segment.file.PrintableBytes.newPrintableBytes;
 
 import java.io.IOException;
-import java.util.Collection;
 import java.util.List;
-import java.util.Set;
-import java.util.UUID;
 
 import org.apache.jackrabbit.guava.common.base.Joiner;
 import org.apache.jackrabbit.guava.common.base.Predicate;
-import org.apache.jackrabbit.oak.segment.SegmentId;
 import org.apache.jackrabbit.oak.segment.file.tar.CleanupContext;
 import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
 import org.apache.jackrabbit.oak.segment.file.tar.TarFiles;
@@ -52,7 +46,7 @@
         // to clear stale weak references in the SegmentTracker
         System.gc();
 
-        TarFiles.CleanupResult cleanupResult = context.getTarFiles().cleanup(newCleanupContext(context, context.getReclaimer()));
+        TarFiles.CleanupResult cleanupResult = context.getTarFiles().cleanup(newCleanupContext(context));
         if (cleanupResult.isInterrupted()) {
             context.getGCListener().info("cleanup interrupted");
         }
@@ -62,13 +56,17 @@
         long finalSize = size(context);
         long reclaimedSize = cleanupResult.getReclaimedSize();
         context.getFileStoreStats().reclaimed(reclaimedSize);
-        context.getGCJournal().persist(
-            reclaimedSize,
-            finalSize,
-            getGcGeneration(context),
-            context.getCompactionMonitor().getCompactedNodes(),
-            context.getCompactedRootId()
-        );
+
+        GCJournal gcJournal = context.getGCJournal();
+        if (gcJournal != null) {
+            gcJournal.persist(
+                    reclaimedSize,
+                    finalSize,
+                    getGcGeneration(context),
+                    context.getCompactionMonitor().getCompactedNodes(),
+                    context.getCompactedRootId()
+            );
+        }
         context.getGCListener().cleaned(reclaimedSize, finalSize);
         context.getGCListener().info(
             "cleanup completed in {}. Post cleanup size is {} and space reclaimed {}.",
@@ -79,40 +77,9 @@
         return cleanupResult.getRemovableFiles();
     }
 
-    private static CleanupContext newCleanupContext(Context context, Predicate<GCGeneration> old) {
-        return new CleanupContext() {
-
-            private boolean isUnreferencedBulkSegment(UUID id, boolean referenced) {
-                return !isDataSegmentId(id.getLeastSignificantBits()) && !referenced;
-            }
-
-            private boolean isOldDataSegment(UUID id, GCGeneration generation) {
-                return isDataSegmentId(id.getLeastSignificantBits()) && old.apply(generation);
-            }
-
-            @Override
-            public Collection<UUID> initialReferences() {
-                Set<UUID> references = newHashSet();
-                for (SegmentId id : context.getSegmentTracker().getReferencedSegmentIds()) {
-                    if (id.isBulkSegmentId()) {
-                        references.add(id.asUUID());
-                    }
-                }
-                return references;
-            }
-
-            @Override
-            public boolean shouldReclaim(UUID id, GCGeneration generation, boolean referenced) {
-                return isUnreferencedBulkSegment(id, referenced) || isOldDataSegment(id, generation)
-                        || generation.getFullGeneration() > getGcGeneration(context).getFullGeneration();
-            }
-
-            @Override
-            public boolean shouldFollow(UUID from, UUID to) {
-                return !isDataSegmentId(to.getLeastSignificantBits());
-            }
-
-        };
+    private static CleanupContext newCleanupContext(Context context) {
+        return new DefaultCleanupContext(context.getSegmentTracker(), context.getReclaimer(),
+                context.getCompactedRootId());
     }
 
     private static String toFileNames(@NotNull List<String> files) {
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FullCompactionStrategy.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FullCompactionStrategy.java
index 342317f..237dbce 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FullCompactionStrategy.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FullCompactionStrategy.java
@@ -33,7 +33,12 @@
     }
 
     @Override
-    GCGeneration nextGeneration(GCGeneration current) {
+    GCGeneration partialGeneration(GCGeneration current) {
+        return current.nextPartial();
+    }
+
+    @Override
+    GCGeneration targetGeneration(GCGeneration current) {
         return current.nextFull();
     }
 
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GCIncrement.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GCIncrement.java
new file mode 100644
index 0000000..8f925ad
--- /dev/null
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GCIncrement.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.segment.file;
+
+import org.apache.jackrabbit.oak.segment.SegmentWriter;
+import org.apache.jackrabbit.oak.segment.SegmentWriterFactory;
+import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * Utility class to keep track of generations for incremental compaction.
+ */
+public class GCIncrement {
+    private final @NotNull GCGeneration baseGeneration;
+    private final @NotNull GCGeneration partialGeneration;
+    private final @NotNull GCGeneration targetGeneration;
+
+    public GCIncrement(@NotNull GCGeneration base, @NotNull GCGeneration partial, @NotNull GCGeneration target) {
+        baseGeneration = base;
+        partialGeneration = partial;
+        targetGeneration = target;
+    }
+
+    @NotNull SegmentWriter createPartialWriter(@NotNull SegmentWriterFactory factory) {
+        return factory.newSegmentWriter(partialGeneration);
+    }
+
+    @NotNull SegmentWriter createTargetWriter(@NotNull SegmentWriterFactory factory) {
+        return factory.newSegmentWriter(targetGeneration);
+    }
+
+    /**
+     * Compaction may be used to copy a repository to the same generation as before.
+     * Therefore, we only consider a segment as fully compacted if it is distinct from the base generation,
+     * even if it already matches the target generation.
+     */
+    boolean isFullyCompacted(GCGeneration generation) {
+        return (generation.compareWith(baseGeneration) > 0) && generation.equals(targetGeneration);
+    }
+
+    @Override
+    public String toString() {
+        return "GCIncrement{\n" +
+                "  base:    " + baseGeneration    + "\n" +
+                "  partial: " + partialGeneration + "\n" +
+                "  target:  " + targetGeneration  + "\n" +
+                "}";
+    }
+}
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GCNodeWriteMonitor.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GCNodeWriteMonitor.java
index 7540e68..7613ddf 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GCNodeWriteMonitor.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GCNodeWriteMonitor.java
@@ -48,17 +48,18 @@
     private long estimated = -1;
 
     /**
-     * Number of compacted nodes
+     * Number of compacted nodes. This is queried much more often than other properties,
+     * therefore it is the only one to use {@link AtomicLong} instead of {@link LongAdder}.
      */
     private long nodes;
 
     /**
-     * Number of compacted properties
+     * Number of compacted properties.
      */
     private long properties;
 
     /**
-     * Number of compacted binaries
+     * Number of compacted binaries.
      */
     private long binaries;
 
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollectionStrategy.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollectionStrategy.java
index 84f011a..eba6c6e 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollectionStrategy.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollectionStrategy.java
@@ -27,6 +27,7 @@
 import org.apache.jackrabbit.oak.segment.SegmentCache;
 import org.apache.jackrabbit.oak.segment.SegmentReader;
 import org.apache.jackrabbit.oak.segment.SegmentTracker;
+import org.apache.jackrabbit.oak.segment.SegmentWriterFactory;
 import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions;
 import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
 import org.apache.jackrabbit.oak.segment.file.tar.TarFiles;
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollector.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollector.java
index 375f0af..bbf471b 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollector.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/GarbageCollector.java
@@ -29,12 +29,14 @@
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Consumer;
 
+import org.apache.jackrabbit.guava.common.base.Predicate;
 import org.apache.jackrabbit.guava.common.base.Supplier;
 import org.apache.jackrabbit.oak.segment.Revisions;
 import org.apache.jackrabbit.oak.segment.SegmentCache;
 import org.apache.jackrabbit.oak.segment.SegmentReader;
 import org.apache.jackrabbit.oak.segment.SegmentTracker;
 import org.apache.jackrabbit.oak.segment.SegmentWriter;
+import org.apache.jackrabbit.oak.segment.SegmentWriterFactory;
 import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions;
 import org.apache.jackrabbit.oak.segment.file.GarbageCollectionStrategy.SuccessfulGarbageCollectionListener;
 import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/Reclaimers.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/Reclaimers.java
index 08dfe10..fa04f43 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/Reclaimers.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/Reclaimers.java
@@ -36,6 +36,19 @@
     }
 
     /**
+     * Create a reclaimer that will never reclaim a segment.
+     */
+    static Predicate<GCGeneration> newEmptyReclaimer() {
+        return new Predicate<GCGeneration>() {
+
+            @Override
+            public boolean apply(GCGeneration generation) {
+                return false;
+            }
+        };
+    }
+
+    /**
      * Create a reclaimer for segments of old generations. Whether a segment is considered old and
      * thus reclaimable depends on the type of the most recent GC operation and the number of
      * retained generations.
@@ -137,7 +150,7 @@
     /**
      * Create an exact reclaimer. An exact reclaimer reclaims only segment of on single generation.
      * @param referenceGeneration  the generation to collect.
-     * @return  an new instance of an exact reclaimer for segments with their generation
+     * @return  a new instance of an exact reclaimer for segments with their generation
      *          matching {@code referenceGeneration}.
      */
     static Predicate<GCGeneration> newExactReclaimer(@NotNull final GCGeneration referenceGeneration) {
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/TailCompactionStrategy.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/TailCompactionStrategy.java
index 8dce627..e4740c4 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/TailCompactionStrategy.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/TailCompactionStrategy.java
@@ -35,7 +35,12 @@
     }
 
     @Override
-    GCGeneration nextGeneration(GCGeneration current) {
+    GCGeneration partialGeneration(GCGeneration current) {
+        return current.nextPartial();
+    }
+
+    @Override
+    GCGeneration targetGeneration(GCGeneration current) {
         return current.nextTail();
     }
 
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/cancel/Canceller.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/cancel/Canceller.java
index dcd774e..e395149 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/cancel/Canceller.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/cancel/Canceller.java
@@ -44,7 +44,15 @@
     }
 
     Canceller() {
-        // Prevent instantiation outside of this package.
+        // Prevent instantiation outside this package.
+    }
+
+    /**
+     * Check if this instance can ever return anything other than {@code NOPE}.
+     * This will be only be false for {@code ROOT} and short circuits thereof.
+     */
+    public boolean isCancelable() {
+        return (this != ROOT);
     }
 
     /**
@@ -91,10 +99,9 @@
      * returned instance will be canceled when this instance is canceled, but
      * will never transition back to an "uncanceled" state.
      *
-     * @return an new instance of {@link Canceller}.
+     * @return a new instance of {@link Canceller}.
      */
     public Canceller withShortCircuit() {
         return new ShortCircuitCanceller(this);
     }
-
 }
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/cancel/ShortCircuitCanceller.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/cancel/ShortCircuitCanceller.java
index b486ba7..ce4df5e 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/cancel/ShortCircuitCanceller.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/cancel/ShortCircuitCanceller.java
@@ -32,8 +32,12 @@
     }
 
     @Override
+    public boolean isCancelable() {
+        return parent.isCancelable();
+    }
+
+    @Override
     public Cancellation check() {
         return cancellation.updateAndGet(prev -> prev != null && prev.isCancelled() ? prev : parent.check());
     }
-
 }
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/tar/GCGeneration.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/tar/GCGeneration.java
index bb3ad90..40e3008 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/tar/GCGeneration.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/tar/GCGeneration.java
@@ -21,7 +21,6 @@
 import static org.apache.jackrabbit.guava.common.base.Preconditions.checkNotNull;
 
 import org.apache.jackrabbit.guava.common.base.Objects;
-import org.apache.jackrabbit.oak.segment.file.tar.index.IndexEntry;
 import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentArchiveEntry;
 import org.jetbrains.annotations.NotNull;
 
@@ -89,7 +88,7 @@
 
     /**
      * Create a new instance with the generation and the full generation incremented by one
-     * and the compaction flag left unchanged.
+     * and the compaction flag set.
      */
     @NotNull
     public GCGeneration nextFull() {
@@ -97,8 +96,8 @@
     }
 
     /**
-     * Create a new instance with the generation incremented by one and the full
-     * generation and the compaction flag left unchanged.
+     * Create a new instance with the generation incremented by one, the full
+     * generation left unchanged and the compaction flag set.
      */
     @NotNull
     public GCGeneration nextTail() {
@@ -106,6 +105,15 @@
     }
 
     /**
+     * Create a new instance with the compaction flag set and the generation and the
+     * full generation left unchanged.
+     */
+    @NotNull
+    public GCGeneration nextPartial() {
+        return new GCGeneration(generation, fullGeneration, true);
+    }
+
+    /**
      * Create a new instance with the compaction flag unset and the generation and the
      * full generation left unchanged.
      */
@@ -155,8 +163,8 @@
     @Override
     public String toString() {
         return "GCGeneration{" +
-                "generation=" + generation + ',' +
-                "fullGeneration=" + fullGeneration +  ',' +
+                "generation=" + generation + ", " +
+                "fullGeneration=" + fullGeneration +  ", " +
                 "isCompacted=" + isCompacted + '}';
     }
 
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/tar/TarReader.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/tar/TarReader.java
index 436ffcd..47f0803 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/tar/TarReader.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/tar/TarReader.java
@@ -34,6 +34,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Objects;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.UUID;
@@ -244,7 +245,7 @@
                 SegmentArchiveReader reader = openStrategy.open(archiveManager, name);
                 if (reader != null) {
                     for (String other : archives) {
-                        if (other != name) {
+                        if (!Objects.equals(other, name)) {
                             log.info("Removing unused tar file {}", other);
                             archiveManager.delete(other);
                         }
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorExternalBlobTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorExternalBlobTest.java
index 8ae3f84..a6aac0d 100644
--- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorExternalBlobTest.java
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorExternalBlobTest.java
@@ -20,6 +20,8 @@
 
 import static java.util.concurrent.TimeUnit.DAYS;
 import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
+import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.SimpleCompactor;
+import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.SimpleCompactorFactory;
 import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.addTestContent;
 import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.assertSameRecord;
 import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.assertSameStableId;
@@ -49,21 +51,28 @@
 import org.junit.Test;
 import org.junit.rules.RuleChain;
 import org.junit.rules.TemporaryFolder;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
 
+@RunWith(Parameterized.class)
 public abstract class AbstractCompactorExternalBlobTest {
 
-    private TemporaryFolder folder = new TemporaryFolder(new File("target"));
+    final private TemporaryFolder folder = new TemporaryFolder(new File("target"));
 
-    private TemporaryBlobStore temporaryBlobStore = new TemporaryBlobStore(folder);
+    final private TemporaryBlobStore temporaryBlobStore = new TemporaryBlobStore(folder);
 
     private FileStore fileStore;
 
     private SegmentNodeStore nodeStore;
 
-    private Compactor compactor;
+    private SimpleCompactor simpleCompactor;
+
+    private final SimpleCompactorFactory compactorFactory;
 
     private GCGeneration compactedGeneration;
 
@@ -71,6 +80,19 @@
     public RuleChain rules = RuleChain.outerRule(folder)
         .around(temporaryBlobStore);
 
+    @Parameterized.Parameters
+    public static List<SimpleCompactorFactory> compactorFactories() {
+        return Arrays.asList(
+                compactor -> compactor::compactUp,
+                compactor -> (node, canceller) -> compactor.compactDown(node, canceller, canceller),
+                compactor -> (node, canceller) -> compactor.compact(EMPTY_NODE, node, EMPTY_NODE, canceller)
+        );
+    }
+
+    public AbstractCompactorExternalBlobTest(@NotNull SimpleCompactorFactory compactorFactory) {
+        this.compactorFactory = compactorFactory;
+    }
+
     public void setup(boolean withBlobStore) throws IOException, InvalidFileStoreVersionException {
         BlobStore blobStore = temporaryBlobStore.blobStore();
         FileStoreBuilder fileStoreBuilder = fileStoreBuilder(folder.getRoot());
@@ -82,7 +104,7 @@
         fileStore = fileStoreBuilder.build();
         nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
         compactedGeneration = newGCGeneration(1,1, true);
-        compactor = createCompactor(fileStore, compactedGeneration);
+        simpleCompactor = compactorFactory.newSimpleCompactor(createCompactor(fileStore, compactedGeneration));
     }
 
     protected abstract Compactor createCompactor(@NotNull FileStore fileStore, @NotNull GCGeneration generation);
@@ -117,7 +139,7 @@
         String cp5 = nodeStore.checkpoint(DAYS.toMillis(1));
 
         SegmentNodeState uncompacted1 = fileStore.getHead();
-        SegmentNodeState compacted1 = compactor.compact(EMPTY_NODE, uncompacted1, EMPTY_NODE, Canceller.newCanceller());
+        SegmentNodeState compacted1 = simpleCompactor.compact(uncompacted1, Canceller.newCanceller());
 
         assertNotNull(compacted1);
         assertNotSame(uncompacted1, compacted1);
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorTest.java
index 7758948..298318e 100644
--- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorTest.java
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/AbstractCompactorTest.java
@@ -20,30 +20,45 @@
 
 import static java.util.concurrent.TimeUnit.DAYS;
 import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
+import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.SimpleCompactor;
+import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.SimpleCompactorFactory;
 import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.addTestContent;
 import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.assertSameRecord;
 import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.assertSameStableId;
 import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.checkGeneration;
 import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.getCheckpoint;
 import static org.apache.jackrabbit.oak.segment.file.FileStoreBuilder.fileStoreBuilder;
-import static org.apache.jackrabbit.oak.segment.file.tar.GCGeneration.newGCGeneration;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
 
+import org.apache.jackrabbit.oak.segment.file.CompactedNodeState;
 import org.apache.jackrabbit.oak.segment.file.FileStore;
+import org.apache.jackrabbit.oak.segment.file.GCIncrement;
+import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor;
 import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
 import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
 import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
+import org.apache.jackrabbit.oak.spi.gc.GCMonitor;
 import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
+import org.junit.runners.Parameterized;
 
 public abstract class AbstractCompactorTest {
+
     @Rule
     public TemporaryFolder folder = new TemporaryFolder(new File("target"));
 
@@ -53,17 +68,48 @@
 
     private Compactor compactor;
 
-    private GCGeneration compactedGeneration;
+    private SimpleCompactor simpleCompactor;
+
+    private final SimpleCompactorFactory compactorFactory;
+
+    private GCNodeWriteMonitor compactionMonitor;
+
+    private GCGeneration baseGeneration;
+    private GCGeneration partialGeneration;
+    private GCGeneration targetGeneration;
+
+    @Parameterized.Parameters
+    public static List<SimpleCompactorFactory> compactorFactories() {
+        return Arrays.asList(
+                compactor -> compactor::compactUp,
+                compactor -> (node, canceller) -> compactor.compactDown(node, canceller, canceller),
+                compactor -> (node, canceller) -> compactor.compact(EMPTY_NODE, node, EMPTY_NODE, canceller)
+        );
+    }
+
+    public AbstractCompactorTest(@NotNull SimpleCompactorFactory compactorFactory) {
+        this.compactorFactory = compactorFactory;
+    }
 
     @Before
     public void setup() throws IOException, InvalidFileStoreVersionException {
         fileStore = fileStoreBuilder(folder.getRoot()).build();
         nodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
-        compactedGeneration = newGCGeneration(1,1, true);
-        compactor = createCompactor(fileStore, compactedGeneration);
+
+        baseGeneration = fileStore.getHead().getGcGeneration();
+        partialGeneration = baseGeneration.nextPartial();
+        targetGeneration = baseGeneration.nextFull();
+        GCIncrement increment = new GCIncrement(baseGeneration, partialGeneration, targetGeneration);
+
+        compactionMonitor = new GCNodeWriteMonitor(-1, GCMonitor.EMPTY);
+        compactor = createCompactor(fileStore, increment, compactionMonitor);
+        simpleCompactor = compactorFactory.newSimpleCompactor(compactor);
     }
 
-    protected abstract Compactor createCompactor(@NotNull FileStore fileStore, @NotNull GCGeneration generation);
+    protected abstract Compactor createCompactor(
+            @NotNull FileStore fileStore,
+            @NotNull GCIncrement increment,
+            @NotNull GCNodeWriteMonitor compactionMonitor);
 
     @After
     public void tearDown() {
@@ -78,10 +124,10 @@
         String cp2 = nodeStore.checkpoint(DAYS.toMillis(1));
 
         SegmentNodeState uncompacted1 = fileStore.getHead();
-        SegmentNodeState compacted1 = compactor.compact(EMPTY_NODE, uncompacted1, EMPTY_NODE, Canceller.newCanceller());
+        SegmentNodeState compacted1 = simpleCompactor.compact(uncompacted1, Canceller.newCanceller());
         assertNotNull(compacted1);
         assertNotSame(uncompacted1, compacted1);
-        checkGeneration(compacted1, compactedGeneration);
+        checkGeneration(compacted1, targetGeneration);
 
         assertSameStableId(uncompacted1, compacted1);
         assertSameStableId(getCheckpoint(uncompacted1, cp1), getCheckpoint(compacted1, cp1));
@@ -98,7 +144,7 @@
         SegmentNodeState compacted2 = compactor.compact(uncompacted1, uncompacted2, compacted1, Canceller.newCanceller());
         assertNotNull(compacted2);
         assertNotSame(uncompacted2, compacted2);
-        checkGeneration(compacted2, compactedGeneration);
+        checkGeneration(compacted2, targetGeneration);
 
         assertTrue(fileStore.getRevisions().setHead(uncompacted2.getRecordId(), compacted2.getRecordId()));
 
@@ -112,4 +158,46 @@
         assertSameRecord(getCheckpoint(compacted1, cp2), getCheckpoint(compacted2, cp2));
         assertSameRecord(getCheckpoint(compacted2, cp4), compacted2.getChildNode("root"));
     }
+
+    @Test
+    public void testHardCancellation() throws Exception {
+        for (int i = 1; i < 25; i++) {
+            addTestContent("cp" + i, nodeStore, 42);
+        }
+
+        Canceller canceller = Canceller.newCanceller()
+                .withCondition(null, () -> (compactionMonitor.getCompactedNodes() >= 10));
+
+        SegmentNodeState uncompacted1 = fileStore.getHead();
+        SegmentNodeState compacted1 = simpleCompactor.compact(uncompacted1, canceller);
+        assertNull(compacted1);
+    }
+
+    @Test
+    public void testSoftCancellation() throws Exception {
+        for (int i = 1; i < 25; i++) {
+            addTestContent("cp" + i, nodeStore, 42);
+        }
+
+        Canceller nullCanceller = Canceller.newCanceller();
+        Canceller softCanceller = Canceller.newCanceller()
+                .withCondition(null, () -> (compactionMonitor.getCompactedNodes() >= 10));
+
+        SegmentNodeState uncompacted1 = fileStore.getHead();
+        CompactedNodeState compacted1 = compactor.compactDown(uncompacted1, nullCanceller, softCanceller);
+        System.out.println(compactionMonitor.getCompactedNodes());
+
+        assertNotNull(compacted1);
+        assertFalse(compacted1.isComplete());
+        assertEquals(uncompacted1, compacted1);
+        checkGeneration(uncompacted1, baseGeneration);
+        assertNotEquals(targetGeneration, compacted1.getGcGeneration());
+
+        CompactedNodeState compacted2 = simpleCompactor.compact(compacted1, nullCanceller);
+        System.out.println(compactionMonitor.getCompactedNodes());
+
+        assertNotNull(compacted2);
+        assertTrue(compacted2.isComplete());
+        checkGeneration(compacted2, targetGeneration);
+    }
 }
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorExternalBlobTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorExternalBlobTest.java
index baca4d5..0bdfb39 100644
--- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorExternalBlobTest.java
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorExternalBlobTest.java
@@ -20,24 +20,26 @@
 
 import org.apache.jackrabbit.oak.segment.file.FileStore;
 import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor;
+import org.apache.jackrabbit.oak.segment.file.CompactionWriter;
 import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
 import org.apache.jackrabbit.oak.spi.gc.GCMonitor;
 import org.jetbrains.annotations.NotNull;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 import static org.apache.jackrabbit.oak.segment.DefaultSegmentWriterBuilder.defaultSegmentWriterBuilder;
+import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.SimpleCompactorFactory;
 
+@RunWith(Parameterized.class)
 public class CheckpointCompactorExternalBlobTest extends AbstractCompactorExternalBlobTest {
+    public CheckpointCompactorExternalBlobTest(@NotNull SimpleCompactorFactory compactorFactory) {
+        super(compactorFactory);
+    }
+
     @Override
     protected CheckpointCompactor createCompactor(@NotNull FileStore fileStore, @NotNull GCGeneration generation) {
-        SegmentWriter writer = defaultSegmentWriterBuilder("c")
-                .withGeneration(generation)
-                .build(fileStore);
-
-        return new CheckpointCompactor(
-                GCMonitor.EMPTY,
-                fileStore.getReader(),
-                writer,
-                fileStore.getBlobStore(),
-                GCNodeWriteMonitor.EMPTY);
+        SegmentWriter writer = defaultSegmentWriterBuilder("c").withGeneration(generation).build(fileStore);
+        CompactionWriter compactionWriter = new CompactionWriter(fileStore.getReader(), fileStore.getBlobStore(), generation, writer);
+        return new CheckpointCompactor(GCMonitor.EMPTY, compactionWriter, GCNodeWriteMonitor.EMPTY);
     }
 }
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorTest.java
index 95ea97f..4c4150a 100644
--- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorTest.java
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CheckpointCompactorTest.java
@@ -19,25 +19,33 @@
 package org.apache.jackrabbit.oak.segment;
 
 import org.apache.jackrabbit.oak.segment.file.FileStore;
+import org.apache.jackrabbit.oak.segment.file.GCIncrement;
 import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor;
-import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
+import org.apache.jackrabbit.oak.segment.file.CompactionWriter;
 import org.apache.jackrabbit.oak.spi.gc.GCMonitor;
 import org.jetbrains.annotations.NotNull;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 import static org.apache.jackrabbit.oak.segment.DefaultSegmentWriterBuilder.defaultSegmentWriterBuilder;
+import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.SimpleCompactorFactory;
 
+@RunWith(Parameterized.class)
 public class CheckpointCompactorTest extends AbstractCompactorTest {
+    public CheckpointCompactorTest(@NotNull SimpleCompactorFactory compactorFactory) {
+        super(compactorFactory);
+    }
+
     @Override
-    protected CheckpointCompactor createCompactor(@NotNull FileStore fileStore, @NotNull GCGeneration generation) {
-        SegmentWriter writer = defaultSegmentWriterBuilder("c")
+    protected CheckpointCompactor createCompactor(
+            @NotNull FileStore fileStore,
+            @NotNull GCIncrement increment,
+            @NotNull GCNodeWriteMonitor compactionMonitor
+    ) {
+        SegmentWriterFactory writerFactory = generation ->  defaultSegmentWriterBuilder("c")
                 .withGeneration(generation)
                 .build(fileStore);
-
-        return new CheckpointCompactor(
-                GCMonitor.EMPTY,
-                fileStore.getReader(),
-                writer,
-                fileStore.getBlobStore(),
-                GCNodeWriteMonitor.EMPTY);
+        CompactionWriter compactionWriter = new CompactionWriter(fileStore.getReader(), fileStore.getBlobStore(), increment, writerFactory);
+        return new CheckpointCompactor(GCMonitor.EMPTY, compactionWriter, compactionMonitor);
     }
 }
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ClassicCompactorTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ClassicCompactorTest.java
index d728c77..fc922af 100644
--- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ClassicCompactorTest.java
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ClassicCompactorTest.java
@@ -40,8 +40,10 @@
 import org.apache.jackrabbit.oak.commons.Buffer;
 import org.apache.jackrabbit.oak.segment.file.FileStore;
 import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor;
+import org.apache.jackrabbit.oak.segment.file.CompactionWriter;
 import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
 import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
+import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
 import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
 import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
@@ -80,7 +82,7 @@
         addTestContent(nodeStore);
 
         SegmentNodeState uncompacted = (SegmentNodeState) nodeStore.getRoot();
-        SegmentNodeState compacted = compactor.compact(uncompacted, Canceller.newCanceller());
+        SegmentNodeState compacted = compactor.compactUp(uncompacted, Canceller.newCanceller());
         assertNotNull(compacted);
         assertFalse(uncompacted == compacted);
         assertEquals(uncompacted, compacted);
@@ -101,7 +103,7 @@
         addNodes(nodeStore, ClassicCompactor.UPDATE_LIMIT * 2 + 1);
 
         SegmentNodeState uncompacted = (SegmentNodeState) nodeStore.getRoot();
-        SegmentNodeState compacted = compactor.compact(uncompacted, Canceller.newCanceller());
+        SegmentNodeState compacted = compactor.compactUp(uncompacted, Canceller.newCanceller());
         assertNotNull(compacted);
         assertFalse(uncompacted == compacted);
         assertEquals(uncompacted, compacted);
@@ -116,25 +118,27 @@
         builder.setChildNode("cancel").setProperty("cancel", "cancel");
         nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
 
-        assertNull(compactor.compact(nodeStore.getRoot(), Canceller.newCanceller().withCondition("reason", () -> true)));
+        assertNull(compactor.compactUp(nodeStore.getRoot(), Canceller.newCanceller().withCondition("reason", () -> true)));
     }
 
     @Test(expected = IOException.class)
     public void testIOException() throws IOException, CommitFailedException {
         ClassicCompactor compactor = createCompactor(fileStore, "IOException");
         addTestContent(nodeStore);
-        compactor.compact(nodeStore.getRoot(), Canceller.newCanceller());
+        compactor.compactUp(nodeStore.getRoot(), Canceller.newCanceller());
     }
 
     @NotNull
     private static ClassicCompactor createCompactor(FileStore fileStore, String failOnName) {
+        GCGeneration generation = newGCGeneration(1, 1, true);
         SegmentWriter writer = defaultSegmentWriterBuilder("c")
-                .withGeneration(newGCGeneration(1, 1, true))
+                .withGeneration(generation)
                 .build(fileStore);
         if (failOnName != null) {
             writer = new FailingSegmentWriter(writer, failOnName);
         }
-        return new ClassicCompactor(fileStore.getReader(), writer, fileStore.getBlobStore(), GCNodeWriteMonitor.EMPTY);
+        CompactionWriter compactionWriter = new CompactionWriter(fileStore.getReader(), fileStore.getBlobStore(), generation, writer);
+        return new ClassicCompactor(compactionWriter, GCNodeWriteMonitor.EMPTY);
     }
 
     private static void addNodes(SegmentNodeStore nodeStore, int count)
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactorTestUtils.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactorTestUtils.java
index 70d0613..89aa792 100644
--- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactorTestUtils.java
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/CompactorTestUtils.java
@@ -20,18 +20,16 @@
 
 import static org.apache.jackrabbit.guava.common.collect.Lists.newArrayList;
 import static org.apache.jackrabbit.oak.plugins.memory.MultiBinaryPropertyState.binaryPropertyFromBlob;
-import static org.apache.jackrabbit.oak.segment.DefaultSegmentWriterBuilder.defaultSegmentWriterBuilder;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import org.apache.jackrabbit.oak.api.Blob;
 import org.apache.jackrabbit.oak.api.CommitFailedException;
-import org.apache.jackrabbit.oak.segment.file.FileStore;
-import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor;
+import org.apache.jackrabbit.oak.segment.file.CompactedNodeState;
+import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
 import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
 import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
 import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
-import org.apache.jackrabbit.oak.spi.gc.GCMonitor;
 import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
 import org.apache.jackrabbit.oak.spi.state.NodeState;
@@ -47,6 +45,14 @@
 
     private CompactorTestUtils() {}
 
+    public interface SimpleCompactor {
+        CompactedNodeState compact(NodeState nodeState, Canceller canceller) throws IOException;
+    }
+
+    public interface SimpleCompactorFactory {
+        SimpleCompactor newSimpleCompactor(Compactor compactor);
+    }
+
     public static void checkGeneration(NodeState node, GCGeneration gcGeneration) {
         assertTrue(node instanceof SegmentNodeState);
         assertEquals(gcGeneration, ((SegmentNodeState) node).getRecordId().getSegmentId().getGcGeneration());
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorExternalBlobTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorExternalBlobTest.java
index ae8b817..210dd60 100644
--- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorExternalBlobTest.java
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorExternalBlobTest.java
@@ -20,16 +20,18 @@
 
 import org.apache.jackrabbit.oak.segment.file.FileStore;
 import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor;
+import org.apache.jackrabbit.oak.segment.file.CompactionWriter;
 import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
 import org.apache.jackrabbit.oak.spi.gc.GCMonitor;
 import org.jetbrains.annotations.NotNull;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
-import java.util.Arrays;
+import java.util.ArrayList;
 import java.util.List;
 
 import static org.apache.jackrabbit.oak.segment.DefaultSegmentWriterBuilder.defaultSegmentWriterBuilder;
+import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.SimpleCompactorFactory;
 
 @RunWith(Parameterized.class)
 public class ParallelCompactorExternalBlobTest extends AbstractCompactorExternalBlobTest {
@@ -37,11 +39,20 @@
     private final int concurrency;
 
     @Parameterized.Parameters
-    public static List<Integer> concurrencyLevels() {
-        return Arrays.asList(1, 2, 4, 8, 16);
+    public static List<Object[]> parameters() {
+        Integer[] concurrencyLevels = {1, 2, 4, 8, 16};
+
+        List<Object[]> parameters = new ArrayList<>();
+        for (SimpleCompactorFactory factory : AbstractCompactorExternalBlobTest.compactorFactories()) {
+            for (int concurrency : concurrencyLevels) {
+                parameters.add(new Object[]{factory, concurrency});
+            }
+        }
+        return parameters;
     }
 
-    public ParallelCompactorExternalBlobTest(int concurrency) {
+    public ParallelCompactorExternalBlobTest(@NotNull SimpleCompactorFactory compactorFactory, int concurrency) {
+        super(compactorFactory);
         this.concurrency = concurrency;
     }
 
@@ -51,13 +62,7 @@
                 .withGeneration(generation)
                 .withWriterPool(SegmentBufferWriterPool.PoolType.THREAD_SPECIFIC)
                 .build(fileStore);
-
-        return new ParallelCompactor(
-                GCMonitor.EMPTY,
-                fileStore.getReader(),
-                writer,
-                fileStore.getBlobStore(),
-                GCNodeWriteMonitor.EMPTY,
-                concurrency);
+        CompactionWriter compactionWriter = new CompactionWriter(fileStore.getReader(), fileStore.getBlobStore(), generation, writer);
+        return new ParallelCompactor(GCMonitor.EMPTY, compactionWriter, GCNodeWriteMonitor.EMPTY, concurrency);
     }
 }
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorTest.java
index 27bebb1..2855516 100644
--- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorTest.java
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ParallelCompactorTest.java
@@ -19,17 +19,19 @@
 package org.apache.jackrabbit.oak.segment;
 
 import org.apache.jackrabbit.oak.segment.file.FileStore;
+import org.apache.jackrabbit.oak.segment.file.GCIncrement;
 import org.apache.jackrabbit.oak.segment.file.GCNodeWriteMonitor;
-import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
+import org.apache.jackrabbit.oak.segment.file.CompactionWriter;
 import org.apache.jackrabbit.oak.spi.gc.GCMonitor;
 import org.jetbrains.annotations.NotNull;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
-import java.util.Arrays;
+import java.util.ArrayList;
 import java.util.List;
 
 import static org.apache.jackrabbit.oak.segment.DefaultSegmentWriterBuilder.defaultSegmentWriterBuilder;
+import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.SimpleCompactorFactory;
 
 @RunWith(Parameterized.class)
 public class ParallelCompactorTest extends AbstractCompactorTest {
@@ -37,27 +39,34 @@
     private final int concurrency;
 
     @Parameterized.Parameters
-    public static List<Integer> concurrencyLevels() {
-        return Arrays.asList(1, 2, 4, 8, 16);
+    public static List<Object[]> parameters() {
+        Integer[] concurrencyLevels = {1, 2, 4, 8, 16};
+
+        List<Object[]> parameters = new ArrayList<>();
+        for (SimpleCompactorFactory factory : AbstractCompactorExternalBlobTest.compactorFactories()) {
+            for (int concurrency : concurrencyLevels) {
+                parameters.add(new Object[]{factory, concurrency});
+            }
+        }
+        return parameters;
     }
 
-    public ParallelCompactorTest(int concurrency) {
+    public ParallelCompactorTest(@NotNull SimpleCompactorFactory compactorFactory, int concurrency) {
+        super(compactorFactory);
         this.concurrency = concurrency;
     }
 
     @Override
-    protected ParallelCompactor createCompactor(@NotNull FileStore fileStore, @NotNull GCGeneration generation) {
-        SegmentWriter writer = defaultSegmentWriterBuilder("c")
+    protected ParallelCompactor createCompactor(
+            @NotNull FileStore fileStore,
+            @NotNull GCIncrement increment,
+            @NotNull GCNodeWriteMonitor compactionMonitor
+    ) {
+        SegmentWriterFactory writerFactory = generation -> defaultSegmentWriterBuilder("c")
                 .withGeneration(generation)
                 .withWriterPool(SegmentBufferWriterPool.PoolType.THREAD_SPECIFIC)
                 .build(fileStore);
-
-        return new ParallelCompactor(
-                GCMonitor.EMPTY,
-                fileStore.getReader(),
-                writer,
-                fileStore.getBlobStore(),
-                GCNodeWriteMonitor.EMPTY,
-                concurrency);
+        CompactionWriter compactionWriter = new CompactionWriter(fileStore.getReader(), fileStore.getBlobStore(), increment, writerFactory);
+        return new ParallelCompactor(GCMonitor.EMPTY, compactionWriter, compactionMonitor, concurrency);
     }
 }
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/CompactionStrategyTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/CompactionStrategyTest.java
deleted file mode 100644
index c7f67e6..0000000
--- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/CompactionStrategyTest.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.jackrabbit.oak.segment.file;
-
-import org.apache.jackrabbit.oak.segment.memory.MemoryStore;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-
-import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.when;
-
-public class CompactionStrategyTest {
-
-    private static final Throwable MARKER_THROWABLE =
-            new RuntimeException("We pretend that something went horribly wrong.");
-
-    @Test
-    public void compactionIsAbortedOnAnyThrowable() throws IOException {
-        MemoryStore store = new MemoryStore();
-        CompactionStrategy.Context throwingContext = Mockito.mock(CompactionStrategy.Context.class);
-        when(throwingContext.getGCListener()).thenReturn(Mockito.mock(GCListener.class));
-        when(throwingContext.getRevisions()).thenReturn(store.getRevisions());
-        when(throwingContext.getGCOptions()).thenThrow(MARKER_THROWABLE);
-
-        try {
-            final CompactionResult compactionResult = new FullCompactionStrategy().compact(throwingContext);
-            assertThat("Compaction should be properly aborted.", compactionResult.isSuccess(), is(false));
-        } catch (Throwable e) {
-            if (e == MARKER_THROWABLE) {
-                fail("The marker throwable was not caught by the CompactionStrategy and therefore not properly aborted.");
-            } else {
-                throw new IllegalStateException("The test likely needs to be adjusted.", e);
-            }
-        }
-    }
-}
\ No newline at end of file
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/DefaultGarbageCollectionStrategyTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/DefaultGarbageCollectionStrategyTest.java
new file mode 100644
index 0000000..953271c
--- /dev/null
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/DefaultGarbageCollectionStrategyTest.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.jackrabbit.oak.segment.file;
+
+import org.apache.jackrabbit.oak.segment.RecordId;
+import org.apache.jackrabbit.oak.segment.SegmentCache;
+import org.apache.jackrabbit.oak.segment.SegmentId;
+import org.apache.jackrabbit.oak.segment.SegmentTracker;
+import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions;
+import org.apache.jackrabbit.oak.segment.file.tar.CleanupContext;
+import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
+import org.apache.jackrabbit.oak.segment.file.tar.TarFiles;
+import org.apache.jackrabbit.oak.segment.memory.MemoryStore;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.verification.VerificationMode;
+
+import java.io.IOException;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class DefaultGarbageCollectionStrategyTest {
+    private final GCJournal journal;
+
+    public DefaultGarbageCollectionStrategyTest() {
+        journal = Mockito.mock(GCJournal.class);
+        when(journal.read()).thenReturn(Mockito.mock(GCJournal.GCJournalEntry.class));
+    }
+
+    private GarbageCollectionStrategy.Context getMockedGCContext(MemoryStore store) throws IOException {
+        GarbageCollectionStrategy.Context mockedContext = Mockito.mock(GarbageCollectionStrategy.Context.class);
+
+        when(mockedContext.getGCListener()).thenReturn(Mockito.mock(GCListener.class));
+        when(mockedContext.getTarFiles()).thenReturn(Mockito.mock(TarFiles.class));
+        when(mockedContext.getSegmentCache()).thenReturn(Mockito.mock(SegmentCache.class));
+        when(mockedContext.getFileStoreStats()).thenReturn(Mockito.mock(FileStoreStats.class));
+
+        SegmentTracker tracker = new SegmentTracker((msb, lsb) -> new SegmentId(store, msb, lsb));
+        when(mockedContext.getSegmentTracker()).thenReturn(tracker);
+        when(mockedContext.getCompactionMonitor()).thenReturn(GCNodeWriteMonitor.EMPTY);
+        when(mockedContext.getRevisions()).thenReturn(store.getRevisions());
+        when(mockedContext.getGCJournal()).thenReturn(journal);
+
+        TarFiles mockedTarFiles = Mockito.mock(TarFiles.class);
+        when(mockedContext.getTarFiles()).thenReturn(mockedTarFiles);
+        when(mockedTarFiles.cleanup(any(CleanupContext.class)))
+                .thenReturn(Mockito.mock(TarFiles.CleanupResult.class));
+
+        return mockedContext;
+    }
+
+    private void runCleanup(CompactionResult result) throws IOException {
+        MemoryStore store = new MemoryStore();
+        DefaultGarbageCollectionStrategy strategy = new DefaultGarbageCollectionStrategy();
+        strategy.cleanup(getMockedGCContext(store), result);
+    }
+
+    private void verifyGCJournalPersistence(VerificationMode mode) {
+        verify(journal, mode).persist(
+                anyLong(),
+                anyLong(),
+                any(GCGeneration.class),
+                anyLong(),
+                anyString());
+    }
+
+    @Test
+    public void successfulCompactionPersistsToJournal() throws Exception {
+        CompactionResult result = CompactionResult.succeeded(
+                SegmentGCOptions.GCType.FULL,
+                GCGeneration.NULL,
+                SegmentGCOptions.defaultGCOptions(),
+                RecordId.NULL,
+                0);
+        runCleanup(result);
+        verifyGCJournalPersistence(times(1));
+    }
+
+    @Test
+    public void partialCompactionDoesNotPersistToJournal() throws Exception {
+        CompactionResult result = CompactionResult.partiallySucceeded(GCGeneration.NULL, RecordId.NULL, 0);
+        runCleanup(result);
+        verifyGCJournalPersistence(never());
+    }
+
+    @Test
+    public void skippedCompactionDoesNotPersistToJournal() throws Exception {
+        CompactionResult result = CompactionResult.skipped(
+                SegmentGCOptions.GCType.FULL,
+                GCGeneration.NULL,
+                SegmentGCOptions.defaultGCOptions(),
+                RecordId.NULL,
+                0);
+        runCleanup(result);
+        verifyGCJournalPersistence(never());
+    }
+
+    @Test
+    public void nonApplicableCompactionDoesNotPersistToJournal() throws Exception {
+        runCleanup(CompactionResult.notApplicable(0));
+        verifyGCJournalPersistence(never());
+    }
+
+    @Test
+    public void abortedCompactionDoesNotPersistToJournal() throws Exception {
+        runCleanup(CompactionResult.aborted(GCGeneration.NULL, 0));
+        verifyGCJournalPersistence(never());
+    }
+}
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/FullCompactionStrategyTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/FullCompactionStrategyTest.java
new file mode 100644
index 0000000..44995f6
--- /dev/null
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/FullCompactionStrategyTest.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.segment.file;
+
+import org.apache.jackrabbit.oak.segment.CompactorTestUtils;
+import org.apache.jackrabbit.oak.segment.RecordId;
+import org.apache.jackrabbit.oak.segment.SegmentBufferWriterPool;
+import org.apache.jackrabbit.oak.segment.SegmentNodeState;
+import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders;
+import org.apache.jackrabbit.oak.segment.SegmentWriterFactory;
+import org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions;
+import org.apache.jackrabbit.oak.segment.file.cancel.Canceller;
+import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
+import org.apache.jackrabbit.oak.segment.file.tar.TarFiles;
+import org.apache.jackrabbit.oak.segment.memory.MemoryStore;
+import org.apache.jackrabbit.oak.spi.gc.GCMonitor;
+import org.apache.jackrabbit.oak.spi.state.NodeStore;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+
+import static org.apache.jackrabbit.oak.segment.CompactorTestUtils.addTestContent;
+import static org.apache.jackrabbit.oak.segment.DefaultSegmentWriterBuilder.defaultSegmentWriterBuilder;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.when;
+
+public class FullCompactionStrategyTest {
+
+    private static final Throwable MARKER_THROWABLE =
+            new RuntimeException("We pretend that something went horribly wrong.");
+
+    @Test
+    public void compactionIsAbortedOnAnyThrowable() throws IOException {
+        MemoryStore store = new MemoryStore();
+        CompactionStrategy.Context throwingContext = Mockito.mock(CompactionStrategy.Context.class);
+        when(throwingContext.getGCListener()).thenReturn(Mockito.mock(GCListener.class));
+        when(throwingContext.getRevisions()).thenReturn(store.getRevisions());
+        when(throwingContext.getGCOptions()).thenThrow(MARKER_THROWABLE);
+
+        try {
+            final CompactionResult compactionResult = new FullCompactionStrategy().compact(throwingContext);
+            assertThat("Compaction should be properly aborted.", compactionResult.isSuccess(), is(false));
+        } catch (Throwable e) {
+            if (e == MARKER_THROWABLE) {
+                fail("The marker throwable was not caught by the CompactionStrategy and therefore not properly aborted.");
+            } else {
+                throw new IllegalStateException("The test likely needs to be adjusted.", e);
+            }
+        }
+    }
+
+    private CompactionStrategy.Context getMockedCompactionContext(MemoryStore store) {
+        CompactionStrategy.Context mockedContext = Mockito.mock(CompactionStrategy.Context.class);
+        when(mockedContext.getGCListener()).thenReturn(Mockito.mock(GCListener.class));
+        when(mockedContext.getTarFiles()).thenReturn(Mockito.mock(TarFiles.class));
+        when(mockedContext.getSuccessfulCompactionListener()).thenReturn(Mockito.mock(SuccessfulCompactionListener.class));
+        when(mockedContext.getGCOptions()).thenReturn(SegmentGCOptions.defaultGCOptions());
+        when(mockedContext.getFlusher()).thenReturn(() -> {});
+
+        GCJournal mockedJournal = Mockito.mock(GCJournal.class);
+        when(mockedContext.getGCJournal()).thenReturn(mockedJournal);
+        when(mockedJournal.read()).thenReturn(Mockito.mock(GCJournal.GCJournalEntry.class));
+
+        when(mockedContext.getRevisions()).thenReturn(store.getRevisions());
+        when(mockedContext.getSegmentReader()).thenReturn(store.getReader());
+
+        SegmentWriterFactory writerFactory = generation -> defaultSegmentWriterBuilder("c")
+                .withGeneration(generation)
+                .withWriterPool(SegmentBufferWriterPool.PoolType.THREAD_SPECIFIC)
+                .build(store);
+        when(mockedContext.getSegmentWriterFactory()).thenReturn(writerFactory);
+
+        return mockedContext;
+    }
+
+    @Test
+    public void testIntermediateStateSave() throws Exception {
+        MemoryStore store = new MemoryStore();
+        NodeStore nodeStore = SegmentNodeStoreBuilders.builder(store).build();
+
+        for (int i = 1; i < 100; i++) {
+            addTestContent("node" + i, nodeStore, 42);
+        }
+
+        CompactionStrategy.Context mockedContext = getMockedCompactionContext(store);
+        GCNodeWriteMonitor gcMonitor = new GCNodeWriteMonitor(-1, GCMonitor.EMPTY);
+        when(mockedContext.getCompactionMonitor()).thenReturn(gcMonitor);
+
+        when(mockedContext.getStateSaveTriggerSupplier()).thenReturn(
+                () -> {
+                    long compactedNodes = gcMonitor.getCompactedNodes();
+                    return Canceller.newCanceller().withCondition("10 more nodes compacted",
+                            () -> gcMonitor.getCompactedNodes() >= compactedNodes + 10);
+                }
+        );
+        when(mockedContext.getHardCanceller()).thenReturn(
+                Canceller.newCanceller().withCondition("50 total nodes compacted",
+                        () -> gcMonitor.getCompactedNodes() >= 50)
+        );
+        when(mockedContext.getSoftCanceller()).thenReturn(Canceller.newCanceller());
+
+        RecordId initialHead = store.getRevisions().getHead();
+
+        FullCompactionStrategy strategy = new FullCompactionStrategy();
+        CompactionResult result = strategy.compact(mockedContext);
+
+        assertFalse(result.isSuccess());
+        assertNotEquals(initialHead, store.getRevisions().getHead());
+    }
+
+    @Test
+    public void testIncrementalCompaction() throws Exception {
+        MemoryStore store = new MemoryStore();
+        NodeStore nodeStore = SegmentNodeStoreBuilders.builder(store).build();
+
+        for (int i = 1; i < 100; i++) {
+            addTestContent("node" + i, nodeStore, 42);
+        }
+
+        CompactionStrategy.Context mockedContext = getMockedCompactionContext(store);
+        GCNodeWriteMonitor gcMonitor = new GCNodeWriteMonitor(-1, GCMonitor.EMPTY);
+        when(mockedContext.getCompactionMonitor()).thenReturn(gcMonitor);
+
+        when(mockedContext.getStateSaveTriggerSupplier()).thenReturn(
+                () -> {
+                    long compactedNodes = gcMonitor.getCompactedNodes();
+                    return Canceller.newCanceller().withCondition("10 more nodes compacted",
+                            () -> gcMonitor.getCompactedNodes() >= compactedNodes + 10);
+                }
+        );
+        when(mockedContext.getHardCanceller()).thenReturn(Canceller.newCanceller());
+        when(mockedContext.getSoftCanceller()).thenReturn(Canceller.newCanceller());
+
+        SegmentNodeState initialHeadState = store.getReader().readNode(store.getRevisions().getHead());
+        GCGeneration baseGeneration = initialHeadState.getGcGeneration();
+        CompactorTestUtils.checkGeneration(initialHeadState, baseGeneration);
+
+        FullCompactionStrategy strategy = new FullCompactionStrategy();
+        CompactionResult result = strategy.compact(mockedContext);
+        assertTrue(result.isSuccess());
+
+        SegmentNodeState headState = store.getReader().readNode(store.getRevisions().getHead());
+        CompactorTestUtils.checkGeneration(headState, baseGeneration.nextFull());
+    }
+}
\ No newline at end of file
diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/GCIncrementTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/GCIncrementTest.java
new file mode 100644
index 0000000..469f7a7
--- /dev/null
+++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/file/GCIncrementTest.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.segment.file;
+
+import org.apache.jackrabbit.oak.segment.file.tar.GCGeneration;
+import org.junit.Test;
+
+import static org.apache.jackrabbit.oak.segment.file.tar.GCGeneration.newGCGeneration;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class GCIncrementTest {
+
+    @Test
+    public void testIsCompactedSameGeneration() {
+        GCGeneration base = GCGeneration.NULL;
+        GCIncrement increment = new GCIncrement(base, base, base);
+
+        assertFalse(increment.isFullyCompacted(base));
+        assertFalse(increment.isFullyCompacted(newGCGeneration(1, 1, true)));
+    }
+
+    @Test
+    public void testIsCompactedTail() {
+        GCGeneration base = GCGeneration.NULL;
+        GCIncrement increment = new GCIncrement(base, base.nextPartial(), base.nextTail());
+
+        assertFalse(increment.isFullyCompacted(newGCGeneration(1, 0, false)));
+        assertFalse(increment.isFullyCompacted(base));
+        assertFalse(increment.isFullyCompacted(base.nextPartial()));
+        assertTrue(increment.isFullyCompacted(base.nextTail()));
+    }
+
+    @Test
+    public void testIsCompactedFull() {
+        GCGeneration base = GCGeneration.NULL;
+        GCIncrement increment = new GCIncrement(base, base.nextPartial(), base.nextFull());
+
+        assertFalse(increment.isFullyCompacted(base));
+        assertFalse(increment.isFullyCompacted(newGCGeneration(1, 1, false)));
+        assertFalse(increment.isFullyCompacted(base.nextPartial()));
+        assertFalse(increment.isFullyCompacted(base.nextTail()));
+        assertTrue(increment.isFullyCompacted(base.nextFull()));
+    }
+
+}
diff --git a/oak-solr-osgi/pom.xml b/oak-solr-osgi/pom.xml
index 1ee81c0..ccf4e15 100644
--- a/oak-solr-osgi/pom.xml
+++ b/oak-solr-osgi/pom.xml
@@ -40,36 +40,20 @@
                 <configuration>
                     <instructions>
                         <Import-Package>
-                            org.apache.lucene.*;resolution:=optional,
-                            com.googlecode.*;resolution:=optional,
-                            com.vividsolutions.jts.*;resolution:=optional,
                             com.sun.*;resolution:=optional,
-                            jline;resolution:=optional,
-                            org.apache.hadoop.*;resolution:=optional,
-                            org.apache.regexp.*;resolution:=optional,
+                            io.netty.*;resolution:=optional,
+                            jline.*;resolution:=optional,
+                            javax.annotation;resolution:=optional,
+                            org.apache.jute.*;resolution:=optional,
                             org.apache.log4j.*;resolution:=optional,
                             org.apache.yetus.audience.*;resolution:=optional,
-                            org.jboss.netty.*;resolution:=optional,
-                            org.restlet.*;resolution:=optional,
-                            org.joda.time.*;resolution:=optional,
+                            org.apache.zookeeper.data.*;resolution:=optional,
+                            org.apache.zookeeper.proto.*;resolution:=optional,
+                            org.apache.zookeeper.txn.*;resolution:=optional,
                             org.eclipse.*;resolution:=optional,
-                            javax.servlet.*;resolution:=optional,
-                            com.tdunning.math.*;resolution:=optional,
-                            com.codahale.metrics.*;resolution:=optional,
-                            info.ganglia.gmetric4j.*;resolution:=optional,
-                            org.apache.calcite.adapter.*;resolution:=optional,
-                            org.apache.calcite.ling4j.*;resolution:=optional,
-                            org.apache.calcite.rel.*;resolution:=optional,
-                            org.apache.calcite.schema.*;resolution:=optional,
-                            org.apache.calcite.sql.*;resolution:=optional,
-                            org.apache.calcite.*;resolution:=optional,
-                            org.apache.curator.framework.*;resolution:=optional,
-                            org.apache.curator.*;resolution:=optional,
-                            com.github.benmanes.caffeine.*;resolution:=optional,
-                            org.apache.solr.handler.extraction.*;resolution:=optional,
-                            com.ibm.security.krb5.internal.*;resolution:=optional,
-                            sun.misc.*;resolution:=optional,
-                            sun.security.krb5.*;resolution:=optional,
+                            org.xerial.snappy.*;resolution:=optional,
+                            com.fasterxml.jackson.annotation.*;resolution:=optional,
+                            com.fasterxml.jackson.databind.*;resolution:=optional,
                             *
                         </Import-Package>
                         <Embed-Dependency>*;scope=runtime;inline=true</Embed-Dependency>
@@ -153,25 +137,7 @@
         <dependency>
             <groupId>org.apache.zookeeper</groupId>
             <artifactId>zookeeper</artifactId>
-            <version>3.4.14</version>
-            <scope>runtime</scope>
-        </dependency>
-        <dependency>
-            <groupId>com.fasterxml.jackson.core</groupId>
-            <artifactId>jackson-core</artifactId>
-            <version><!-- see OAK-8829-->2.9.10</version>
-            <scope>runtime</scope>
-        </dependency>
-        <dependency>
-            <groupId>com.fasterxml.jackson.dataformat</groupId>
-            <artifactId>jackson-dataformat-smile</artifactId>
-            <version><!-- see OAK-8829-->2.9.10</version>
-            <scope>runtime</scope>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.commons</groupId>
-            <artifactId>commons-exec</artifactId>
-            <version>1.3</version>
+            <version><!-- see OAK-10548-->3.9.1</version>
             <scope>runtime</scope>
         </dependency>
         <dependency>
diff --git a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
index c79c6ad..6e7afea 100644
--- a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
+++ b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
@@ -971,20 +971,7 @@
                     readRevision, validRevisions, lastRevs);
 
             // check if there may be more recent values in a previous document
-            if (value != null
-                    && !getPreviousRanges().isEmpty()
-                    && !isMostRecentCommitted(local, value.revision, nodeStore)) {
-                // not reading the most recent value, we may need to
-                // consider previous documents as well
-                for (Revision prev : getPreviousRanges().keySet()) {
-                    if (prev.compareRevisionTimeThenClusterId(value.revision) > 0) {
-                        // a previous document has more recent changes
-                        // than value.revision
-                        value = null;
-                        break;
-                    }
-                }
-            }
+            value = requiresCompleteMapCheck(value, local, nodeStore) ? null : value;
 
             if (value == null && !getPreviousRanges().isEmpty()) {
                 // check revision history
@@ -1067,8 +1054,11 @@
                                     RevisionVector readRevision,
                                     Map<Revision, String> validRevisions,
                                     LastRevs lastRevs) {
+        final SortedMap<Revision, String> local = getLocalDeleted();
         // check local deleted map first
-        Value value = getLatestValue(context, getLocalDeleted().entrySet(), readRevision, validRevisions, lastRevs);
+        Value value = getLatestValue(context, local.entrySet(), readRevision, validRevisions, lastRevs);
+        // check if there may be more recent values in a previous document
+        value = requiresCompleteMapCheck(value, local, context) ? null : value;
         if (value == null && !getPreviousRanges().isEmpty()) {
             // need to check complete map
             value = getLatestValue(context, getDeleted().entrySet(), readRevision, validRevisions, lastRevs);
@@ -2182,6 +2172,34 @@
     }
 
     /**
+     * Check if there may be more recent values in a previous document and thus a
+     * complete map check is required.
+     *
+     * @param localValue value as resolved from local value map
+     * @param local      local value map
+     * @param context    the revision context
+     * @return false if it is most recent, true otherwise
+     */
+    private boolean requiresCompleteMapCheck(@Nullable Value localValue,
+            @NotNull SortedMap<Revision, String> local,
+            @NotNull RevisionContext context) {
+        if (localValue != null
+                && !getPreviousRanges().isEmpty()
+                && !isMostRecentCommitted(local, localValue.revision, context)) {
+            // not reading the most recent value, we may need to
+            // consider previous documents as well
+            for (Revision prev : getPreviousRanges().keySet()) {
+                if (prev.compareRevisionTimeThenClusterId(localValue.revision) > 0) {
+                    // a previous document has more recent changes
+                    // than localValue.revision
+                    return true;
+                }
+            }
+        }
+        return false;
+    }
+
+    /**
      * Get the latest property value smaller or equal the readRevision revision.
      *
      * @param valueMap the sorted revision-value map
diff --git a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/SplitOperations.java b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/SplitOperations.java
index 5e0b8f6..2644ee7 100644
--- a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/SplitOperations.java
+++ b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/SplitOperations.java
@@ -324,7 +324,13 @@
                 for (Range r : entry.getValue()) {
                     setPrevious(intermediate, r);
                 }
-                setIntermediateDocProps(intermediate, h);
+                // OAK-10526 : setting 'maxRev=now()' here guarantees earliest GC of this
+                // split doc will be 'maxAgeMillis' (24h) from now (hence covers all open
+                // JCR sessions) or until any checkpoint created before 'now()' is
+                // released. While this leaves garbage split doc slightly longer than
+                // absolutely necessary, it is a rather simple and robust mechanism.
+                setIntermediateDocProps(intermediate,
+                        Revision.newRevision(context.getClusterId()));
                 splitOps.add(intermediate);
             }
         }
@@ -380,7 +386,13 @@
             // check size of old document
             NodeDocument oldDoc = new NodeDocument(STORE);
             UpdateUtils.applyChanges(oldDoc, old);
-            setSplitDocProps(doc, oldDoc, old, high);
+            // OAK-10526 : setting 'maxRev=now()' here guarantees earliest GC of this
+            // split doc will be 'maxAgeMillis' (24h) from now (hence covers all open
+            // JCR sessions) or until any checkpoint created before 'now()' is
+            // released. While this leaves garbage split doc slightly longer than
+            // absolutely necessary, it is a rather simple and robust mechanism.
+            setSplitDocProps(doc, oldDoc, old,
+                    Revision.newRevision(context.getClusterId()));
             splitOps.add(old);
 
             if (numValues < numRevsThreshold) {
diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGCSplitDocTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGCSplitDocTest.java
index dae5394..c585f00 100644
--- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGCSplitDocTest.java
+++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGCSplitDocTest.java
@@ -259,8 +259,21 @@
 
         ns.runBackgroundOperations();
 
-        // wait one hour
-        clock.waitUntil(clock.getTime() + HOURS.toMillis(1));
+        // with OAK-10526 split doc maxRev is now set to now
+        // the split doc type 70 GC on mongo uses sweepRev
+        // so to get 70 GCed we need to advance sweepRev
+        // hence instead of a 1 HOUR wait, we now do :
+        // wait 1 min
+        clock.waitUntil(clock.getTime() + MINUTES.toMillis(1));
+
+        // to advance sweepRev : unrelated change + sweep
+        builder = ns.getRoot().builder();
+        builder.child("unrelated");
+        ns.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+        ns.runBackgroundOperations();
+
+        // wait 59 min
+        clock.waitUntil(clock.getTime() + MINUTES.toMillis(59));
 
         int nodesBeforeGc = countNodeDocuments();
         assertEquals(0, countStalePrev());
diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollectorIT.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollectorIT.java
index 1ecbd73..f46d4cd 100644
--- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollectorIT.java
+++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollectorIT.java
@@ -82,7 +82,6 @@
 import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -290,12 +289,181 @@
     }
 
     /**
+     * OAK-10542 with OAK-10526 : This reproduces a case where a _deleted revision
+     * that is still used by a checkpoint is split away and then GCed. This variant
+     * tests a checkpoint when /t/target is deleted.
+     */
+    @Test
+    public void gcSplitDocWithReferencedDeleted_combined() throws Exception {
+        // step 1 : create a _delete entry with clusterId 2, plus do a GC
+        final DocumentNodeStore store2 = createSecondary();
+        createLeaf(store2, "t", "target");
+        store2.runBackgroundOperations();
+        assertEquals(0, store2.getVersionGarbageCollector().gc(24, HOURS).splitDocGCCount);
+
+        // step 2 : nearly cause target docu split - via clusterId 1
+        store.runBackgroundOperations();
+        for (int i = 0; i < (NUM_REVS_THRESHOLD / 2) - 1; i++) {
+            deleteLeaf(store, "t", "target");
+            createLeaf(store, "t", "target");
+        }
+        // last change should be deleted (that's what this test case is for)
+        deleteLeaf(store, "t", "target");
+        store.runBackgroundOperations();
+
+        // step 3 : do a minimal sleep + bcOps between last change and the checkpoint to
+        // ensure maxRev and checkpoint are more than precisionMs apart
+        clock.waitUntil(clock.getTime() + TimeUnit.SECONDS.toMillis(61));
+        store.runBackgroundOperations();
+
+        // step 4 : then take a checkpoint refering to the last rev in the split doc
+        // (which is 'deleted')
+        final String checkpoint = store.checkpoint(TimeUnit.DAYS.toMillis(42));
+
+        // step 5 : ensure another precisionMs apart between checkpoint and
+        // split-triggering change
+        clock.waitUntil(clock.getTime() + TimeUnit.SECONDS.toMillis(61));
+
+        // step 6 : trigger the split - main doc will contain "_deleted"="false"
+        createLeaf(store, "t", "target");
+        store.runBackgroundOperations();
+
+        // step 7 : wait for 25h - to also be more than 24 away from maxRev
+        clock.waitUntil(clock.getTime() + TimeUnit.HOURS.toMillis(25));
+
+        // step 8 : do the gc
+        // expect a split doc at depth 4 for /t/target to exist
+        assertEquals(1, store.getDocumentStore()
+                .query(NODES, "4:p/t/target/", "4:p/t/target/z", 5).size());
+        gc.gc(24, HOURS);
+        // before a fix the split doc is GCed (but can't make that an assert)
+        //assertEquals(0, store.getDocumentStore()
+        //        .query(NODES, "4:p/t/target/", "4:p/t/target/z", 5).size());
+
+        // step 9 : make assertions about /t/target at root and checkpoint
+        // invalidate node cache to ensure readNode/getNodeAtRevision is called below
+        store.getNodeCache().invalidateAll();
+        assertTrue(store.getRoot().getChildNode("t").getChildNode("target").exists());
+        // invalidate node cache to ensure readNode/getNodeAtRevision is called below
+        store.getNodeCache().invalidateAll();
+        assertEquals(false, store.retrieve(checkpoint).getChildNode("t")
+                .getChildNode("target").exists());
+    }
+
+    /**
+     * OAK-10542 : This reproduces a case where a split doc is created that contains
+     * a revision of _deleted that is still referred by a checkpoint. The fact that
+     * _deleted is split "in the middle" used to confuse the getLiveRevision lookup,
+     * as it was not considering split document for the _deleted property as long as
+     * it found a valid revision in the main document. This variant tests a
+     * checkpoint when /t/target is deleted.
+     */
+    @Test
+    public void gcSplitDocWithReferencedDeleted_true() throws Exception {
+        // step 1 : create some _deleted entries with clusterId 2
+        final DocumentNodeStore store2 = createSecondary();
+        createLeaf(store2, "t", "target");
+        deleteLeaf(store2, "t", "target");
+        store2.runBackgroundOperations();
+
+        // step 2 : create a _deleted=true entry with clusterId 1
+        store.runBackgroundOperations();
+        createLeaf(store, "t", "target");
+        // create a checkpoint where /t/target should exist
+        final String checkpoint = store.checkpoint(TimeUnit.DAYS.toMillis(42));
+
+        // step 3 : cause a split doc with _deleted with clusterId 1
+        for (int i = 0; i < NUM_REVS_THRESHOLD; i++) {
+            createLeaf(store, "t", "target");
+            deleteLeaf(store, "t", "target");
+        }
+        store.runBackgroundOperations();
+
+        // step 4 : make assertions about /t/target at root and checkpoint
+        // invalidate node cache to ensure readNode is called below
+        store.getNodeCache().invalidateAll();
+        assertFalse(store.getRoot().getChildNode("t").getChildNode("target").exists());
+        // invalidate node cache to ensure readNode is called below
+        store.getNodeCache().invalidateAll();
+        assertEquals(true, store.retrieve(checkpoint).getChildNode("t")
+                .getChildNode("target").exists());
+
+    }
+
+    /**
+     * OAK-10542 : This reproduces a case where a split doc is created that contains
+     * a revision of _deleted that is still referred by a checkpoint. The fact that
+     * _deleted is split "in the middle" used to confuse the getLiveRevision lookup,
+     * as it was not considering split document for the _deleted property as long as
+     * it found a valid revision in the main document. This variant tests a
+     * checkpoint when /t/target exists.
+     */
+    @Test
+    public void gcSplitDocWithReferencedDeleted_false() throws Exception {
+        // step 1 : create a _delete entry with clusterId 2
+        final DocumentNodeStore store2 = createSecondary();
+        createLeaf(store2, "t", "target");
+        store2.runBackgroundOperations();
+
+        // step 2 : create a _deleted=true entry with clusterId 1
+        store.runBackgroundOperations();
+        deleteLeaf(store, "t", "target");
+        // create a checkpoint where /t/target should not exist
+        final String checkpoint = store.checkpoint(TimeUnit.DAYS.toMillis(42));
+
+        // step 2 : cause a split doc with _deleted with clusterId 1
+        for (int i = 0; i < NUM_REVS_THRESHOLD; i++) {
+            createLeaf(store, "t", "target");
+            deleteLeaf(store, "t", "target");
+        }
+        store.runBackgroundOperations();
+
+        // step 4 : make assertions about /t/target at root and checkpoint
+        // invalidate node cache to ensure readNode/getNodeAtRevision is called below
+        store.getNodeCache().invalidateAll();
+        assertFalse(store.getRoot().getChildNode("t").getChildNode("target").exists());
+        // invalidate node cache to ensure readNode/getNodeAtRevision is called below
+        store.getNodeCache().invalidateAll();
+        assertEquals(false, store.retrieve(checkpoint).getChildNode("t")
+                .getChildNode("target").exists());
+
+    }
+
+    private DocumentNodeStore createSecondary() {
+        return new DocumentMK.Builder().clock(clock)
+                .setLeaseCheckMode(LeaseCheckMode.DISABLED)
+                .setDocumentStore(store.getDocumentStore()).setAsyncDelay(0)
+                .setClusterId(2).getNodeStore();
+    }
+
+    private void createLeaf(DocumentNodeStore s, String... pathElems) throws Exception {
+        createOrDeleteLeaf(s, false, pathElems);
+    }
+
+    private void deleteLeaf(DocumentNodeStore s, String... pathElems) throws Exception {
+        createOrDeleteLeaf(s, true, pathElems);
+    }
+
+    private void createOrDeleteLeaf(DocumentNodeStore s, boolean delete,
+            String... pathElems) throws Exception {
+        clock.waitUntil(clock.getTime() + TimeUnit.SECONDS.toMillis(10));
+        final NodeBuilder rb = s.getRoot().builder();
+        NodeBuilder b = rb;
+        for (String pathElem : pathElems) {
+            b = b.child(pathElem);
+        }
+        if (delete) {
+            b.remove();
+        }
+        s.merge(rb, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+    }
+
+    /**
      * OAK-10526 : This reproduces a case where a split doc is created then GCed,
      * while there is a checkpoint that still refers to a revision contained in that
      * split doc.
      */
     @Test
-    @Ignore(value = "requires fix for OAK-10526")
     public void gcSplitDocsWithReferencedRevisions() throws Exception {
         final String exp;
 
@@ -327,7 +495,14 @@
         exp = lastValue;
         store.runBackgroundOperations();
 
-        // step 5 : create a checkpoint at t(+1w)
+        // step 4b : another change to further lastRev for clusterId 1
+        // required to ensure 5sec rounding of mongo variant is also covered
+        clock.waitUntil(clock.getTime() + TimeUnit.SECONDS.toMillis(6));
+        b1 = store.getRoot().builder();
+        b1.child("unrelated").setProperty("unrelated", "unrelated");
+        store.merge(b1, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+
+        // step 5 : create a checkpoint at t(+1w+6sec)
         String checkpoint = store.checkpoint(TimeUnit.DAYS.toMillis(42));
         assertEquals(exp, store.getRoot().getChildNode("t").getString("foo"));
         assertEquals(exp, store.retrieve(checkpoint).getChildNode("t").getString("foo"));
@@ -348,8 +523,8 @@
         // as we'd be in the same rounded second) -> t(+2w:30s)
         clock.waitUntil(clock.getTime() + TimeUnit.SECONDS.toMillis(30));
 
-        // step 9 : trigger another GC - this now splits away the referenced revision
-        assertEquals(1, gc.gc(24, HOURS).splitDocGCCount);
+        // step 9 : trigger another GC - previously split away the referenced revision
+        assertEquals(0, gc.gc(24, HOURS).splitDocGCCount);
         // flush the caches as otherwise it might deliver stale data
         store.getNodeCache().invalidateAll();
         assertEquals("barZ", store.getRoot().getChildNode("t").getString("foo"));