Replacing master with contents of rel/v1.15.1
diff --git a/build-tools/geode-dependency-management/src/main/groovy/org/apache/geode/gradle/plugins/DependencyConstraints.groovy b/build-tools/geode-dependency-management/src/main/groovy/org/apache/geode/gradle/plugins/DependencyConstraints.groovy
index f8fb6e3..89ef3f6 100644
--- a/build-tools/geode-dependency-management/src/main/groovy/org/apache/geode/gradle/plugins/DependencyConstraints.groovy
+++ b/build-tools/geode-dependency-management/src/main/groovy/org/apache/geode/gradle/plugins/DependencyConstraints.groovy
@@ -41,7 +41,7 @@
     deps.put("jgroups.version", "3.6.14.Final")
     deps.put("log4j.version", "2.17.2")
     deps.put("micrometer.version", "1.9.0")
-    deps.put("shiro.version", "1.9.0")
+    deps.put("shiro.version", "1.9.1")
     deps.put("slf4j-api.version", "1.7.32")
     deps.put("jboss-modules.version", "1.11.0.Final")
     deps.put("jackson.version", "2.13.2")
@@ -61,7 +61,7 @@
 
     // The jetty version is also hard-coded in geode-assembly:test
     // at o.a.g.sessions.tests.GenericAppServerInstall.java
-    deps.put("jetty.version", "9.4.46.v20220331")
+    deps.put("jetty.version", "9.4.47.v20220610")
 
     // These versions are referenced in test.gradle, which is aggressively injected into all projects.
     deps.put("junit.version", "4.13.2")
diff --git a/ci/pipelines/shared/jinja.variables.yml b/ci/pipelines/shared/jinja.variables.yml
index bb6a516..a54f263 100644
--- a/ci/pipelines/shared/jinja.variables.yml
+++ b/ci/pipelines/shared/jinja.variables.yml
@@ -17,7 +17,7 @@
 
 benchmarks:
   baseline_branch_default: ''
-  baseline_version_default: '1.14.4'
+  baseline_version_default: '1.15.0'
   benchmark_branch: ((geode-build-branch))
   flavors:
   - title: 'base'
diff --git a/dev-tools/docker/docs/Dockerfile b/dev-tools/docker/docs/Dockerfile
index 31f841c..7833db9 100644
--- a/dev-tools/docker/docs/Dockerfile
+++ b/dev-tools/docker/docs/Dockerfile
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM ruby:2.5
+FROM ruby:2.6.8
 
 LABEL Vendor="Apache Geode"
 LABEL version=unstable
@@ -25,16 +25,16 @@
     apt-get install -y nodejs
 RUN gem install bundler:1.17.3 \
     rake multi_json:1.13.1 \
-    elasticsearch:2.0.2 \
+    elasticsearch:7.5.0 \
     multipart-post:2.0.0 \
-    faraday:0.15.4 \
+    faraday:0.17.4 \
     libv8:3.16.14.15 \
-    mini_portile2:2.5.0 \
-    racc:1.5.2 \
-    nokogiri:1.11.2 \
-    mimemagic:0.3.9 \
-    puma:4.3.8 \
-    rack:2.1.4 \
+    mini_portile2:2.8.0 \
+    racc:1.6.0 \
+    nokogiri:1.13.3 \
+    mimemagic:0.3.10 \
+    puma:5.6.2 \
+    rack:2.2.3 \
     smtpapi:0.1.0 \
     sendgrid-ruby:1.1.6 \
     therubyracer:0.12.2
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 4e6d9fc..7022d52 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -16,23 +16,22 @@
 
 FROM bellsoft/liberica-openjdk-alpine:8
 
-RUN echo "This is a TEMPLATE, DO NOT build from this Dockerfile.  Instead checkout master or any released support/x.y branch." ; exit 1
 
 # runtime dependencies
 RUN apk add --no-cache \
 		bash \
 		ncurses
 
-ENV GEODE_GPG $$PLACEHOLDER$$
+ENV GEODE_GPG DB5476815A475574577D442B468A4800EAFB2498
 
 ENV GEODE_HOME /geode
 ENV PATH $PATH:$GEODE_HOME/bin
 
 # https://geode.apache.org/releases/
-ENV GEODE_VERSION $$PLACEHOLDER$$
+ENV GEODE_VERSION 1.15.0
 # Binaries TGZ SHA-256
 # https://dist.apache.org/repos/dist/release/geode/VERSION/apache-geode-VERSION.tgz.sha256
-ENV GEODE_SHA256 $$PLACEHOLDER$$
+ENV GEODE_SHA256 97cd96e94991cbd433d93e8474e1c2e65deb92f022d810a1931464017701701b
 
 # http://apache.org/dyn/closer.cgi/geode/1.3.0/apache-geode-1.3.0.tgz
 
diff --git a/geode-assembly/build.gradle b/geode-assembly/build.gradle
index 78f1f5e..8e62fab 100755
--- a/geode-assembly/build.gradle
+++ b/geode-assembly/build.gradle
@@ -442,14 +442,8 @@
         exclude 'wrapper'
 
         // These exclude the 'build' and 'out' artifact directories from Gradle and IntelliJ for each project
-        exclude 'buildSrc/build'
-        exclude 'buildSrc/out'
-        rootProject.allprojects.each {
-          def relPath = Paths.get(rootDir.getPath()).relativize(Paths.get(it.projectDir.getPath()))
-          def relOut = relPath.resolve("out").toString()
-          def relBuild = relPath.resolve("build").toString()
-          exclude relOut
-          exclude relBuild
+        exclude { FileTreeElement details ->
+          details.directory && (details.name == "build" || details.name == "out")
         }
       }
     }
diff --git a/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/GenericAppServerInstall.java b/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/GenericAppServerInstall.java
index 42bd6e7..88d0e5c 100644
--- a/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/GenericAppServerInstall.java
+++ b/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/GenericAppServerInstall.java
@@ -34,7 +34,7 @@
  * specific code outside of the {@link GenericAppServerVersion}.
  */
 public class GenericAppServerInstall extends ContainerInstall {
-  private static final String JETTY_VERSION = "9.4.46.v20220331";
+  private static final String JETTY_VERSION = "9.4.47.v20220610";
 
   /**
    * Get the version number, download URL, and container name of a generic app server using
diff --git a/geode-assembly/src/integrationTest/resources/assembly_content.txt b/geode-assembly/src/integrationTest/resources/assembly_content.txt
index 2b41f9c..da0614a 100644
--- a/geode-assembly/src/integrationTest/resources/assembly_content.txt
+++ b/geode-assembly/src/integrationTest/resources/assembly_content.txt
@@ -1007,6 +1007,8 @@
 lib/jackson-annotations-2.13.2.jar
 lib/jackson-core-2.13.2.jar
 lib/jackson-databind-2.13.2.2.jar
+lib/jackson-datatype-joda-2.13.2.jar
+lib/jackson-datatype-jsr310-2.13.2.jar
 lib/javax.activation-api-1.2.0.jar
 lib/javax.mail-api-1.6.2.jar
 lib/javax.resource-api-1.7.1.jar
@@ -1014,19 +1016,20 @@
 lib/javax.transaction-api-1.3.jar
 lib/jaxb-api-2.3.1.jar
 lib/jaxb-impl-2.3.2.jar
-lib/jetty-http-9.4.46.v20220331.jar
-lib/jetty-io-9.4.46.v20220331.jar
-lib/jetty-security-9.4.46.v20220331.jar
-lib/jetty-server-9.4.46.v20220331.jar
-lib/jetty-servlet-9.4.46.v20220331.jar
-lib/jetty-util-9.4.46.v20220331.jar
-lib/jetty-util-ajax-9.4.46.v20220331.jar
-lib/jetty-webapp-9.4.46.v20220331.jar
-lib/jetty-xml-9.4.46.v20220331.jar
+lib/jetty-http-9.4.47.v20220610.jar
+lib/jetty-io-9.4.47.v20220610.jar
+lib/jetty-security-9.4.47.v20220610.jar
+lib/jetty-server-9.4.47.v20220610.jar
+lib/jetty-servlet-9.4.47.v20220610.jar
+lib/jetty-util-9.4.47.v20220610.jar
+lib/jetty-util-ajax-9.4.47.v20220610.jar
+lib/jetty-webapp-9.4.47.v20220610.jar
+lib/jetty-xml-9.4.47.v20220610.jar
 lib/jgroups-3.6.14.Final.jar
 lib/jline-2.12.jar
 lib/jna-5.11.0.jar
 lib/jna-platform-5.11.0.jar
+lib/joda-time-2.10.14.jar
 lib/jopt-simple-5.0.4.jar
 lib/log4j-api-2.17.2.jar
 lib/log4j-core-2.17.2.jar
@@ -1044,16 +1047,17 @@
 lib/mx4j-tools-3.0.1.jar
 lib/ra.jar
 lib/rmiio-2.1.2.jar
-lib/shiro-cache-1.9.0.jar
-lib/shiro-config-core-1.9.0.jar
-lib/shiro-config-ogdl-1.9.0.jar
-lib/shiro-core-1.9.0.jar
-lib/shiro-crypto-cipher-1.9.0.jar
-lib/shiro-crypto-core-1.9.0.jar
-lib/shiro-crypto-hash-1.9.0.jar
-lib/shiro-event-1.9.0.jar
-lib/shiro-lang-1.9.0.jar
+lib/shiro-cache-1.9.1.jar
+lib/shiro-config-core-1.9.1.jar
+lib/shiro-config-ogdl-1.9.1.jar
+lib/shiro-core-1.9.1.jar
+lib/shiro-crypto-cipher-1.9.1.jar
+lib/shiro-crypto-core-1.9.1.jar
+lib/shiro-crypto-hash-1.9.1.jar
+lib/shiro-event-1.9.1.jar
+lib/shiro-lang-1.9.1.jar
 lib/slf4j-api-1.7.32.jar
+lib/slf4j-api-1.7.36.jar
 lib/snappy-0.4.jar
 lib/spring-beans-5.3.20.jar
 lib/spring-context-5.3.20.jar
@@ -1070,6 +1074,3 @@
 tools/Modules/Apache_Geode_Modules-0.0.0-tcServer.zip
 tools/Modules/Apache_Geode_Modules-0.0.0-tcServer30.zip
 tools/Pulse/geode-pulse-0.0.0.war
-lib/jackson-datatype-joda-2.13.2.jar
-lib/jackson-datatype-jsr310-2.13.2.jar
-lib/joda-time-2.10.14.jar
diff --git a/geode-assembly/src/integrationTest/resources/gfsh_dependency_classpath.txt b/geode-assembly/src/integrationTest/resources/gfsh_dependency_classpath.txt
index 8105a12..6563825 100644
--- a/geode-assembly/src/integrationTest/resources/gfsh_dependency_classpath.txt
+++ b/geode-assembly/src/integrationTest/resources/gfsh_dependency_classpath.txt
@@ -21,8 +21,10 @@
 spring-web-5.3.20.jar
 commons-lang3-3.12.0.jar
 rmiio-2.1.2.jar
+jackson-datatype-joda-2.13.2.jar
 jackson-annotations-2.13.2.jar
 jackson-core-2.13.2.jar
+jackson-datatype-jsr310-2.13.2.jar
 jackson-databind-2.13.2.2.jar
 swagger-annotations-2.2.0.jar
 jopt-simple-5.0.4.jar
@@ -45,8 +47,8 @@
 istack-commons-runtime-4.0.1.jar
 jaxb-impl-2.3.2.jar
 commons-validator-1.7.jar
-shiro-core-1.9.0.jar
-shiro-config-ogdl-1.9.0.jar
+shiro-core-1.9.1.jar
+shiro-config-ogdl-1.9.1.jar
 commons-beanutils-1.9.4.jar
 commons-codec-1.15.jar
 commons-collections-3.2.2.jar
@@ -57,23 +59,24 @@
 micrometer-core-1.9.0.jar
 fastutil-8.5.8.jar
 javax.resource-api-1.7.1.jar
-jetty-webapp-9.4.46.v20220331.jar
-jetty-servlet-9.4.46.v20220331.jar
-jetty-security-9.4.46.v20220331.jar
-jetty-server-9.4.46.v20220331.jar
+jetty-webapp-9.4.47.v20220610.jar
+jetty-servlet-9.4.47.v20220610.jar
+jetty-security-9.4.47.v20220610.jar
+jetty-server-9.4.47.v20220610.jar
 javax.servlet-api-3.1.0.jar
+joda-time-2.10.14.jar
 jna-platform-5.11.0.jar
 jna-5.11.0.jar
 snappy-0.4.jar
 jgroups-3.6.14.Final.jar
-shiro-cache-1.9.0.jar
-shiro-crypto-hash-1.9.0.jar
-shiro-crypto-cipher-1.9.0.jar
-shiro-config-core-1.9.0.jar
-shiro-event-1.9.0.jar
-shiro-crypto-core-1.9.0.jar
-shiro-lang-1.9.0.jar
-slf4j-api-1.7.32.jar
+shiro-cache-1.9.1.jar
+shiro-crypto-hash-1.9.1.jar
+shiro-crypto-cipher-1.9.1.jar
+shiro-config-core-1.9.1.jar
+shiro-event-1.9.1.jar
+shiro-crypto-core-1.9.1.jar
+shiro-lang-1.9.1.jar
+slf4j-api-1.7.36.jar
 spring-beans-5.3.20.jar
 javax.activation-api-1.2.0.jar
 jline-2.12.jar
@@ -82,11 +85,8 @@
 HdrHistogram-2.1.12.jar
 LatencyUtils-2.0.3.jar
 javax.transaction-api-1.3.jar
-jetty-xml-9.4.46.v20220331.jar
-jetty-http-9.4.46.v20220331.jar
-jetty-io-9.4.46.v20220331.jar
-jetty-util-ajax-9.4.46.v20220331.jar
-jetty-util-9.4.46.v20220331.jar
-jackson-datatype-joda-2.13.2.jar
-jackson-datatype-jsr310-2.13.2.jar
-joda-time-2.10.14.jar
\ No newline at end of file
+jetty-xml-9.4.47.v20220610.jar
+jetty-http-9.4.47.v20220610.jar
+jetty-io-9.4.47.v20220610.jar
+jetty-util-ajax-9.4.47.v20220610.jar
+jetty-util-9.4.47.v20220610.jar
diff --git a/geode-book/master_middleman/source/subnavs/geode-subnav.erb b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
index c88b7fd..018b621 100644
--- a/geode-book/master_middleman/source/subnavs/geode-subnav.erb
+++ b/geode-book/master_middleman/source/subnavs/geode-subnav.erb
@@ -39,6 +39,9 @@
                                 <a href="/docs/guide/<%=vars.product_version_nodot%>/getting_started/system_requirements/host_machine.html">Host Machine Requirements</a>
                             </li>
                             <li>
+                                <a href="/docs/guide/<%=vars.product_version_nodot%>/getting_started/system_requirements/java_support.html">Java Support</a>
+                            </li>
+                            <li>
                                 <a href="/docs/guide/<%=vars.product_version_nodot%>/getting_started/installation/install_standalone.html">How to Install</a>
                             </li>
                             <li>
@@ -672,7 +675,7 @@
                         <a href="/docs/guide/<%=vars.product_version_nodot%>/managing/monitor_tune/chapter_overview.html">Performance Tuning and Configuration</a>
                         <ul>
                             <li>
-                                <a href="/docs/guide/<%=vars.product_version_nodot%>/managing/monitor_tune/disabling_tcp_syn_cookies.html">Disabling TCP SYN Cookies</a>
+                                <a href="/docs/guide/<%=vars.product_version_nodot%>/managing/monitor_tune/disabling_tcp_syn_cookies.html">Disable TCP SYN Cookies</a>
                             </li>
                             <li>
                                 <a href="/docs/guide/<%=vars.product_version_nodot%>/managing/monitor_tune/performance_on_vsphere.html">Improving Performance on vSphere</a>
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest.java
deleted file mode 100644
index a76a715..0000000
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest.java
+++ /dev/null
@@ -1,1117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under the License
- * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
- * or implied. See the License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.geode.internal.cache.execute;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.logging.log4j.Logger;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.UseParametersRunnerFactory;
-
-import org.apache.geode.cache.CacheClosedException;
-import org.apache.geode.cache.Region;
-import org.apache.geode.cache.execute.Execution;
-import org.apache.geode.cache.execute.Function;
-import org.apache.geode.cache.execute.FunctionAdapter;
-import org.apache.geode.cache.execute.FunctionContext;
-import org.apache.geode.cache.execute.FunctionException;
-import org.apache.geode.cache.execute.FunctionInvocationTargetException;
-import org.apache.geode.cache.execute.FunctionService;
-import org.apache.geode.cache.execute.ResultCollector;
-import org.apache.geode.cache.execute.ResultSender;
-import org.apache.geode.distributed.DistributedSystem;
-import org.apache.geode.internal.cache.PartitionedRegion;
-import org.apache.geode.internal.cache.functions.TestFunction;
-import org.apache.geode.logging.internal.log4j.api.LogService;
-import org.apache.geode.test.dunit.Assert;
-import org.apache.geode.test.dunit.AsyncInvocation;
-import org.apache.geode.test.dunit.IgnoredException;
-import org.apache.geode.test.dunit.SerializableRunnableIF;
-import org.apache.geode.test.dunit.ThreadUtils;
-import org.apache.geode.test.dunit.Wait;
-import org.apache.geode.test.dunit.WaitCriterion;
-import org.apache.geode.test.junit.categories.ClientServerTest;
-import org.apache.geode.test.junit.categories.FunctionServiceTest;
-import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
-
-@Category({ClientServerTest.class, FunctionServiceTest.class})
-@RunWith(Parameterized.class)
-@UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
-public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest
-    extends PRClientServerTestBase {
-
-  private static final Logger logger = LogService.getLogger();
-
-  private static final String TEST_FUNCTION7 = TestFunction.TEST_FUNCTION7;
-
-  private static final String TEST_FUNCTION2 = TestFunction.TEST_FUNCTION2;
-
-  private Boolean isByName = null;
-
-  private static final int retryCount = 0;
-
-  public PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest() {
-    super();
-  }
-
-  /*
-   * Execution of the function on server with
-   */
-  @Test
-  public void testServerAllKeyExecution_byInstance() {
-    createScenario();
-    Function function = new TestFunction(true, TEST_FUNCTION2);
-    registerFunctionAtServer(function);
-    isByName = Boolean.FALSE;
-    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest
-        .serverAllKeyExecution(isByName));
-  }
-
-  /*
-   * Execution of the function on server with
-   */
-  @Test
-  public void testServerGetAllFunction() {
-    createScenario();
-    client.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::getAll);
-  }
-
-  /*
-   * Execution of the function on server with
-   */
-  @Test
-  public void testServerPutAllFunction() {
-    createScenario();
-    client.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::putAll);
-  }
-
-  /*
-   * Execution of the function on server with single key as the routing object and using the name of
-   * the function
-   */
-  @Test
-  public void testServerSingleKeyExecution_byName() {
-    createScenario();
-    Function function = new TestFunction(true, TEST_FUNCTION2);
-    registerFunctionAtServer(function);
-    isByName = Boolean.TRUE;
-    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest
-        .serverSingleKeyExecution(isByName));
-  }
-
-  /*
-   * Execution of the function on server with single key as the routing. Function throws the
-   * FunctionInvocationTargetException. As this is the case of HA then system should retry the
-   * function execution. After 5th attempt function will send Boolean as last result.
-   */
-  @Test
-  public void testserverSingleKeyExecution_FunctionInvocationTargetException() {
-    createScenario();
-    client.invoke(
-        PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::serverSingleKeyExecution_FunctionInvocationTargetException);
-  }
-
-  /*
-   * Execution of the function on server with bucket as filter
-   */
-  @Test
-  public void testBucketFilter() {
-    createScenarioForBucketFilter();
-    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_BUCKET_FILTER);
-    registerFunctionAtServer(function);
-
-    Set<Integer> bucketFilterSet = new HashSet<>();
-    bucketFilterSet.add(3);
-    bucketFilterSet.add(6);
-    bucketFilterSet.add(8);
-    client.invoke(() -> serverBucketFilterExecution(bucketFilterSet));
-    bucketFilterSet.clear();
-    // Test single filter
-    bucketFilterSet.add(7);
-    client.invoke(() -> serverBucketFilterExecution(bucketFilterSet));
-  }
-
-  @Test
-  public void testBucketFilterOverride() {
-    createScenarioForBucketFilter();
-    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_BUCKET_FILTER);
-    registerFunctionAtServer(function);
-    // test multi key filter
-    Set<Integer> bucketFilterSet = new HashSet<>();
-    bucketFilterSet.add(3);
-    bucketFilterSet.add(6);
-    bucketFilterSet.add(8);
-
-    Set<Integer> keyFilterSet = new HashSet<>();
-    keyFilterSet.add(75);
-    keyFilterSet.add(25);
-
-    client.invoke(() -> serverBucketFilterOverrideExecution(bucketFilterSet,
-        keyFilterSet));
-
-  }
-
-  @Test
-  public void testServerSingleKeyExecution_SocketTimeOut() {
-    createScenario();
-    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_SOCKET_TIMEOUT);
-    registerFunctionAtServer(function);
-    isByName = Boolean.TRUE;
-    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest
-        .serverSingleKeyExecutionSocketTimeOut(isByName));
-  }
-
-  /*
-   * Execution of the function on server with single key as the routing object and using the
-   * instance of the function
-   */
-  @Test
-  public void testServerSingleKeyExecution_byInstance() {
-    createScenario();
-    Function function = new TestFunction(true, TEST_FUNCTION2);
-    registerFunctionAtServer(function);
-    isByName = Boolean.FALSE;
-    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest
-        .serverSingleKeyExecution(isByName));
-  }
-
-  /*
-   * Execution of the inline function on server with single key as the routing object
-   */
-  @Test
-  public void testServerSingleKeyExecution_byInlineFunction() {
-    createScenario();
-    client.invoke(
-        PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::serverSingleKeyExecution_Inline);
-  }
-
-  /*
-   * Execution of the function on server with set multiple keys as the routing object and using the
-   * name of the function
-   */
-  @Test
-  public void testserverMultiKeyExecution_byName() {
-    createScenario();
-    Function function = new TestFunction(true, TEST_FUNCTION2);
-    registerFunctionAtServer(function);
-    isByName = Boolean.TRUE;
-    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest
-        .serverMultiKeyExecution(isByName));
-    server1.invoke(
-        PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::checkBucketsOnServer);
-    server2.invoke(
-        PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::checkBucketsOnServer);
-    server3.invoke(
-        PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::checkBucketsOnServer);
-  }
-
-  @Test
-  public void testserverMultiKeyExecution_SocektTimeOut() {
-    createScenario();
-    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_SOCKET_TIMEOUT);
-    registerFunctionAtServer(function);
-    isByName = Boolean.TRUE;
-    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest
-        .serverMultiKeyExecutionSocketTimeOut(isByName));
-  }
-
-  /*
-   * Execution of the inline function on server with set multiple keys as the routing object
-   */
-  @Test
-  public void testserverMultiKeyExecution_byInlineFunction() {
-    createScenario();
-    client.invoke(
-        PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::serverMultiKeyExecution_Inline);
-  }
-
-  /*
-   * Execution of the inline function on server with set multiple keys as the routing object
-   * Function throws the FunctionInvocationTargetException. As this is the case of HA then system
-   * should retry the function execution. After 5th attempt function will send Boolean as last
-   * result.
-   */
-  @Test
-  public void testserverMultiKeyExecution_FunctionInvocationTargetException() {
-    createScenario();
-    client.invoke(
-        PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::serverMultiKeyExecution_FunctionInvocationTargetException);
-  }
-
-  /*
-   * Execution of the function on server with set multiple keys as the routing object and using the
-   * name of the function
-   */
-  @Test
-  public void testserverMultiKeyExecutionNoResult_byName() {
-    createScenario();
-    Function function = new TestFunction(false, TEST_FUNCTION7);
-    registerFunctionAtServer(function);
-    isByName = Boolean.TRUE;
-    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest
-        .serverMultiKeyExecutionNoResult(isByName));
-  }
-
-  /*
-   * Execution of the function on server with set multiple keys as the routing object and using the
-   * instance of the function
-   */
-  @Test
-  public void testserverMultiKeyExecution_byInstance() {
-    createScenario();
-    Function function = new TestFunction(true, TEST_FUNCTION2);
-    registerFunctionAtServer(function);
-    isByName = Boolean.FALSE;
-    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest
-        .serverMultiKeyExecution(isByName));
-  }
-
-  /*
-   * Ensure that the execution is limited to a single bucket put another way, that the routing logic
-   * works correctly such that there is not extra execution
-   */
-  @Test
-  public void testserverMultiKeyExecutionOnASingleBucket_byName() {
-    createScenario();
-    Function function = new TestFunction(true, TEST_FUNCTION2);
-    registerFunctionAtServer(function);
-    isByName = Boolean.TRUE;
-    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest
-        .serverMultiKeyExecutionOnASingleBucket(isByName));
-  }
-
-  /*
-   * Ensure that the execution is limited to a single bucket put another way, that the routing logic
-   * works correctly such that there is not extra execution
-   */
-  @Test
-  public void testserverMultiKeyExecutionOnASingleBucket_byInstance() {
-    createScenario();
-    Function function = new TestFunction(true, TEST_FUNCTION2);
-    registerFunctionAtServer(function);
-    isByName = Boolean.FALSE;
-    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest
-        .serverMultiKeyExecutionOnASingleBucket(isByName));
-  }
-
-  /*
-   * Ensure that the while executing the function if the servers is down then the execution is
-   * failover to other available server
-   */
-  @Test
-  public void testServerFailoverWithTwoServerAliveHA() {
-    IgnoredException.addIgnoredException("FunctionInvocationTargetException");
-    ArrayList commonAttributes =
-        createCommonServerAttributes("TestPartitionedRegion", null, 1, null);
-    createClientServerScenarion(commonAttributes, 20, 20, 20);
-    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_HA);
-    registerFunctionAtServer(function);
-    server2.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::stopServerHA);
-    server3.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::stopServerHA);
-    client.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::putOperation);
-
-    int AsyncInvocationArrSize = 1;
-    AsyncInvocation[] async = new AsyncInvocation[AsyncInvocationArrSize];
-    async[0] = client.invokeAsync(
-        PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::executeFunctionHA);
-    server2.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::startServerHA);
-    server3.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::startServerHA);
-    server1.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::stopServerHA);
-    client.invoke(() -> PRClientServerRegionFunctionExecutionDUnitTest
-        .verifyDeadAndLiveServers(2));
-    ThreadUtils.join(async[0], 6 * 60 * 1000);
-    if (async[0].getException() != null) {
-      Assert.fail("UnExpected Exception Occurred : ", async[0].getException());
-    }
-    List l = (List) async[0].getReturnValue();
-
-    assertEquals(2, l.size());
-  }
-
-  /*
-   * Ensure that the while executing the function if the servers is down then the execution is
-   * failover to other available server
-   */
-  @Test
-  public void testServerCacheClosedFailoverWithTwoServerAliveHA() {
-    IgnoredException.addIgnoredException("FunctionInvocationTargetException");
-    ArrayList commonAttributes =
-        createCommonServerAttributes("TestPartitionedRegion", null, 1, null);
-    createClientServerScenarion(commonAttributes, 20, 20, 20);
-    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_HA);
-    registerFunctionAtServer(function);
-    server2.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::stopServerHA);
-    server3.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::stopServerHA);
-    client.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::putOperation);
-    int AsyncInvocationArrSize = 1;
-    AsyncInvocation[] async = new AsyncInvocation[AsyncInvocationArrSize];
-    async[0] = client.invokeAsync(
-        PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::executeFunctionHA);
-    server2.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::startServerHA);
-    server3.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::startServerHA);
-    server1.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::closeCacheHA);
-    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest
-        .verifyDeadAndLiveServers(2));
-    ThreadUtils.join(async[0], 5 * 60 * 1000);
-    if (async[0].getException() != null) {
-      Assert.fail("UnExpected Exception Occurred : ", async[0].getException());
-    }
-    List l = (List) async[0].getReturnValue();
-    assertEquals(2, l.size());
-  }
-
-  @Test
-  public void testBug40714() {
-    createScenario();
-    server1
-        .invoke(
-            (SerializableRunnableIF) PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::registerFunction);
-    server1
-        .invoke(
-            (SerializableRunnableIF) PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::registerFunction);
-    server1
-        .invoke(
-            (SerializableRunnableIF) PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::registerFunction);
-    client
-        .invoke(
-            (SerializableRunnableIF) PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest::registerFunction);
-    client.invoke(
-        PRClientServerRegionFunctionExecutionDUnitTest::FunctionExecution_Inline_Bug40714);
-  }
-
-  public static void registerFunction() {
-    FunctionService.registerFunction(new FunctionAdapter() {
-      @Override
-      public void execute(FunctionContext context) {
-        @SuppressWarnings("unchecked")
-        final ResultSender<Object> resultSender = context.getResultSender();
-        if (context.getArguments() instanceof String) {
-          resultSender.lastResult("Failure");
-        } else if (context.getArguments() instanceof Boolean) {
-          resultSender.lastResult(Boolean.FALSE);
-        }
-      }
-
-      @Override
-      public String getId() {
-        return "Function";
-      }
-
-      @Override
-      public boolean hasResult() {
-        return true;
-      }
-    });
-  }
-
-  public static void verifyDeadAndLiveServers(final Integer expectedLiveServers) {
-    WaitCriterion wc = new WaitCriterion() {
-      String excuse;
-
-      @Override
-      public boolean done() {
-        int sz = pool.getConnectedServerCount();
-        logger.info("Checking for the Live Servers : Expected  : " + expectedLiveServers
-            + " Available :" + sz);
-        if (sz == expectedLiveServers) {
-          return true;
-        }
-        excuse = "Expected " + expectedLiveServers + " but found " + sz;
-        return false;
-      }
-
-      @Override
-      public String description() {
-        return excuse;
-      }
-    };
-    Wait.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
-  }
-
-  public static void executeFunction() {
-
-    Region region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final HashSet<String> testKeysSet = new HashSet<>();
-    for (int i = (totalNumBuckets * 10); i > 0; i--) {
-      testKeysSet.add("execKey-" + i);
-    }
-    DistributedSystem.setThreadsSocketPolicy(false);
-    Function function = new TestFunction(true, TEST_FUNCTION2);
-    FunctionService.registerFunction(function);
-    Execution dataSet = FunctionService.onRegion(region);
-    try {
-      ResultCollector rc1 =
-          dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(function.getId());
-
-      HashMap resultMap = ((HashMap) rc1.getResult());
-      assertEquals(3, resultMap.size());
-
-      for (Object o : resultMap.entrySet()) {
-        Map.Entry entry = (Map.Entry) o;
-        ArrayList resultListForMember = (ArrayList) entry.getValue();
-
-        for (Object result : resultListForMember) {
-          assertEquals(Boolean.TRUE, result);
-        }
-      }
-    } catch (Exception e) {
-      logger.info("Got an exception : " + e.getMessage());
-      assertTrue(e instanceof CacheClosedException);
-    }
-  }
-
-  private static Object executeFunctionHA() {
-    Region region = cache.getRegion(PartitionedRegionName);
-    final HashSet<String> testKeysSet = new HashSet<>();
-    for (int i = (totalNumBuckets * 10); i > 0; i--) {
-      testKeysSet.add("execKey-" + i);
-    }
-    DistributedSystem.setThreadsSocketPolicy(false);
-    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_HA);
-    FunctionService.registerFunction(function);
-    Execution dataSet = FunctionService.onRegion(region);
-    ResultCollector rc1 =
-        dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(function.getId());
-    List l = ((List) rc1.getResult());
-    logger.info("Result size : " + l.size());
-    return l;
-  }
-
-  private static void putOperation() {
-    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final HashSet<String> testKeysSet = new HashSet<>();
-    for (int i = (totalNumBuckets * 10); i > 0; i--) {
-      testKeysSet.add("execKey-" + i);
-    }
-    int j = 0;
-    for (String s : testKeysSet) {
-      Integer val = j++;
-      region.put(s, val);
-    }
-  }
-
-  private void createScenario() {
-    ArrayList commonAttributes =
-        createCommonServerAttributes("TestPartitionedRegion", null, 0, null);
-    createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20);
-  }
-
-  private void createScenarioForBucketFilter() {
-    ArrayList commonAttributes = createCommonServerAttributes("TestPartitionedRegion",
-        new BucketFilterPRResolver(), 0, null);
-    createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20);
-  }
-
-  private static void checkBucketsOnServer() {
-    PartitionedRegion region = (PartitionedRegion) cache.getRegion(PartitionedRegionName);
-    HashMap localBucket2RegionMap = (HashMap) region.getDataStore().getSizeLocally();
-    logger.info(
-        "Size of the " + PartitionedRegionName + " in this VM :- " + localBucket2RegionMap.size());
-    Set entrySet = localBucket2RegionMap.entrySet();
-    assertNotNull(entrySet);
-  }
-
-  private static void serverAllKeyExecution(Boolean isByName) {
-    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final HashSet<String> testKeysSet = new HashSet<>();
-    for (int i = (totalNumBuckets / 2); i > 0; i--) {
-      testKeysSet.add("execKey-" + i);
-    }
-    DistributedSystem.setThreadsSocketPolicy(false);
-    Function function = new TestFunction(true, TEST_FUNCTION2);
-    FunctionService.registerFunction(function);
-    Execution dataSet = FunctionService.onRegion(region);
-    try {
-      int j = 0;
-      HashSet<Integer> origVals = new HashSet<>();
-      for (String item : testKeysSet) {
-        Integer val = j++;
-        origVals.add(val);
-        region.put(item, val);
-      }
-      ResultCollector rc1 = executeOnAll(dataSet, Boolean.TRUE, function, isByName);
-      List resultList = (List) rc1.getResult();
-      logger.info("Result size : " + resultList.size());
-      logger.info("Result are SSSS : " + resultList);
-      assertEquals(3, resultList.size());
-
-      for (Object result : resultList) {
-        assertEquals(Boolean.TRUE, result);
-      }
-      ResultCollector rc2 = executeOnAll(dataSet, testKeysSet, function, isByName);
-      List l2 = ((List) rc2.getResult());
-      assertEquals(3, l2.size());
-      HashSet<Integer> foundVals = new HashSet<>();
-      for (Object value : l2) {
-        ArrayList subL = (ArrayList) (value);
-        assertTrue(subL.size() > 0);
-        for (Object o : subL) {
-          assertTrue(foundVals.add((Integer) o));
-        }
-      }
-      assertEquals(origVals, foundVals);
-
-    } catch (Exception e) {
-      Assert.fail("Test failed after the put operation", e);
-
-    }
-  }
-
-  public static void getAll() {
-    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final List<String> testKeysList = new ArrayList<>();
-    for (int i = (totalNumBuckets * 3); i > 0; i--) {
-      testKeysList.add("execKey-" + i);
-    }
-    DistributedSystem.setThreadsSocketPolicy(false);
-    try {
-      int j = 0;
-      Map<String, Integer> origVals = new HashMap<>();
-      for (String key : testKeysList) {
-        Integer val = j++;
-        origVals.put(key, val);
-        region.put(key, val);
-      }
-      Map resultMap = region.getAll(testKeysList);
-      assertEquals(resultMap, origVals);
-      Wait.pause(2000);
-      Map secondResultMap = region.getAll(testKeysList);
-      assertEquals(secondResultMap, origVals);
-
-    } catch (Exception e) {
-      Assert.fail("Test failed after the put operation", e);
-
-    }
-  }
-
-  public static void putAll() {
-    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final List<String> testKeysList = new ArrayList<>();
-    for (int i = (totalNumBuckets * 3); i > 0; i--) {
-      testKeysList.add("execKey-" + i);
-    }
-    DistributedSystem.setThreadsSocketPolicy(false);
-    try {
-      int j = 0;
-      Map<String, Integer> origVals = new HashMap<>();
-      for (String key : testKeysList) {
-        Integer val = j++;
-        origVals.put(key, val);
-        region.put(key, val);
-      }
-      Map resultMap = region.getAll(testKeysList);
-      assertEquals(resultMap, origVals);
-      Wait.pause(2000);
-      Map secondResultMap = region.getAll(testKeysList);
-      assertEquals(secondResultMap, origVals);
-
-    } catch (Exception e) {
-      Assert.fail("Test failed after the put operation", e);
-
-    }
-  }
-
-  private static void serverMultiKeyExecutionOnASingleBucket(Boolean isByName) {
-    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final HashSet<String> testKeysSet = new HashSet<>();
-    for (int i = (totalNumBuckets * 2); i > 0; i--) {
-      testKeysSet.add("execKey-" + i);
-    }
-    int j = 0;
-    for (String value : testKeysSet) {
-      Integer val = j++;
-      region.put(value, val);
-    }
-    DistributedSystem.setThreadsSocketPolicy(false);
-    for (String o : testKeysSet) {
-      try {
-        Set<String> singleKeySet = Collections.singleton(o);
-        Function function = new TestFunction(true, TEST_FUNCTION2);
-        FunctionService.registerFunction(function);
-        Execution dataSet = FunctionService.onRegion(region);
-        ResultCollector rc1 = execute(dataSet, singleKeySet, Boolean.TRUE, function, isByName);
-        List l = ((List) rc1.getResult());
-        assertEquals(1, l.size());
-
-        ResultCollector rc2 =
-            execute(dataSet, singleKeySet, new HashSet<>(singleKeySet), function, isByName);
-        List l2 = ((List) rc2.getResult());
-
-        assertEquals(1, l2.size());
-        List subList = (List) l2.iterator().next();
-        assertEquals(1, subList.size());
-        assertEquals(region.get(singleKeySet.iterator().next()), subList.iterator().next());
-      } catch (Exception expected) {
-        logger.info("Exception : " + expected.getMessage());
-        expected.printStackTrace();
-        fail("Test failed after the put operation");
-      }
-    }
-  }
-
-  private static void serverMultiKeyExecution(Boolean isByName) {
-    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final HashSet<String> testKeysSet = new HashSet<>();
-    for (int i = (totalNumBuckets * 2); i > 0; i--) {
-      testKeysSet.add("execKey-" + i);
-    }
-    DistributedSystem.setThreadsSocketPolicy(false);
-    Function function = new TestFunction(true, TEST_FUNCTION2);
-    FunctionService.registerFunction(function);
-    Execution dataSet = FunctionService.onRegion(region);
-    try {
-      int j = 0;
-      HashSet<Integer> origVals = new HashSet<>();
-      for (String element : testKeysSet) {
-        Integer val = j++;
-        origVals.add(val);
-        region.put(element, val);
-      }
-      ResultCollector rc1 = execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
-      List l = ((List) rc1.getResult());
-      logger.info("Result size : " + l.size());
-      assertEquals(3, l.size());
-      for (Object item : l) {
-        assertEquals(Boolean.TRUE, item);
-      }
-
-      ResultCollector rc2 = execute(dataSet, testKeysSet, testKeysSet, function, isByName);
-      List l2 = ((List) rc2.getResult());
-      assertEquals(3, l2.size());
-      HashSet<Integer> foundVals = new HashSet<>();
-      for (Object value : l2) {
-        ArrayList subL = (ArrayList) value;
-        assertTrue(subL.size() > 0);
-        for (Object o : subL) {
-          assertTrue(foundVals.add((Integer) o));
-        }
-      }
-      assertEquals(origVals, foundVals);
-
-    } catch (Exception e) {
-      Assert.fail("Test failed after the put operation", e);
-
-    }
-  }
-
-
-  private static void serverMultiKeyExecutionSocketTimeOut(Boolean isByName) {
-    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final HashSet<String> testKeysSet = new HashSet<>();
-    for (int i = (totalNumBuckets * 2); i > 0; i--) {
-      testKeysSet.add("execKey-" + i);
-    }
-    DistributedSystem.setThreadsSocketPolicy(false);
-    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_SOCKET_TIMEOUT);
-    FunctionService.registerFunction(function);
-    Execution dataSet = FunctionService.onRegion(region);
-    try {
-      int j = 0;
-      for (String value : testKeysSet) {
-        Integer val = j++;
-        region.put(value, val);
-      }
-      ResultCollector rc1 = execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
-      List l = ((List) rc1.getResult());
-      logger.info("Result size : " + l.size());
-      assertEquals(3, l.size());
-      for (Object o : l) {
-        assertEquals(Boolean.TRUE, o);
-      }
-
-    } catch (Exception e) {
-      Assert.fail("Test failed after the function execution", e);
-
-    }
-  }
-
-  private static void serverSingleKeyExecutionSocketTimeOut(Boolean isByName) {
-    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final String testKey = "execKey";
-    final Set<String> testKeysSet = new HashSet<>();
-    testKeysSet.add(testKey);
-    DistributedSystem.setThreadsSocketPolicy(false);
-
-    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_SOCKET_TIMEOUT);
-    FunctionService.registerFunction(function);
-    Execution dataSet = FunctionService.onRegion(region);
-
-    region.put(testKey, 1);
-    try {
-      ResultCollector rs = execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
-      assertEquals(Boolean.TRUE, ((List) rs.getResult()).get(0));
-
-      ResultCollector rs2 = execute(dataSet, testKeysSet, testKey, function, isByName);
-      assertEquals(testKey, ((List) rs2.getResult()).get(0));
-
-    } catch (Exception ex) {
-      ex.printStackTrace();
-      logger.info("Exception : ", ex);
-      Assert.fail("Test failed after the put operation", ex);
-    }
-  }
-
-  private static void serverMultiKeyExecution_Inline() {
-    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final HashSet<String> testKeysSet = new HashSet<>();
-    for (int i = (totalNumBuckets * 2); i > 0; i--) {
-      testKeysSet.add("execKey-" + i);
-    }
-    DistributedSystem.setThreadsSocketPolicy(false);
-    Execution dataSet = FunctionService.onRegion(region);
-    try {
-      int j = 0;
-      for (String value : testKeysSet) {
-        Integer val = j++;
-        region.put(value, val);
-      }
-      ResultCollector rc1 =
-          dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(new FunctionAdapter() {
-            @Override
-            public void execute(FunctionContext context) {
-              @SuppressWarnings("unchecked")
-              final ResultSender<Object> resultSender = context.getResultSender();
-              if (context.getArguments() instanceof String) {
-                resultSender.lastResult("Success");
-              } else if (context.getArguments() instanceof Boolean) {
-                resultSender.lastResult(Boolean.TRUE);
-              }
-            }
-
-            @Override
-            public String getId() {
-              return getClass().getName();
-            }
-
-            @Override
-            public boolean hasResult() {
-              return true;
-            }
-          });
-      List l = ((List) rc1.getResult());
-      logger.info("Result size : " + l.size());
-      assertEquals(3, l.size());
-      for (Object o : l) {
-        assertEquals(Boolean.TRUE, o);
-      }
-    } catch (Exception e) {
-      logger.info("Exception : " + e.getMessage());
-      e.printStackTrace();
-      fail("Test failed after the put operation");
-
-    }
-  }
-
-  private static void serverMultiKeyExecution_FunctionInvocationTargetException() {
-    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final HashSet<String> testKeysSet = new HashSet<>();
-    for (int i = (totalNumBuckets * 2); i > 0; i--) {
-      testKeysSet.add("execKey-" + i);
-    }
-    DistributedSystem.setThreadsSocketPolicy(false);
-    Execution dataSet = FunctionService.onRegion(region);
-    int j = 0;
-    for (String o : testKeysSet) {
-      Integer val = j++;
-      region.put(o, val);
-    }
-    try {
-      ResultCollector rc1 =
-          dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(new FunctionAdapter() {
-            @Override
-            public void execute(FunctionContext context) {
-              if (context.isPossibleDuplicate()) {
-                context.getResultSender().lastResult(retryCount);
-                return;
-              }
-              if (context.getArguments() instanceof Boolean) {
-                throw new FunctionInvocationTargetException("I have been thrown from TestFunction");
-              }
-            }
-
-            @Override
-            public String getId() {
-              return getClass().getName();
-            }
-
-            @Override
-            public boolean hasResult() {
-              return true;
-            }
-          });
-
-      List list = (ArrayList) rc1.getResult();
-      assertEquals(list.get(0), 0);
-    } catch (Throwable e) {
-      e.printStackTrace();
-      Assert.fail("This is not expected Exception", e);
-    }
-
-  }
-
-  private static void serverMultiKeyExecutionNoResult(Boolean isByName) {
-    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final HashSet<String> testKeysSet = new HashSet<>();
-    for (int i = (totalNumBuckets * 2); i > 0; i--) {
-      testKeysSet.add("execKey-" + i);
-    }
-    DistributedSystem.setThreadsSocketPolicy(false);
-    Function function = new TestFunction(false, TEST_FUNCTION7);
-    FunctionService.registerFunction(function);
-    Execution dataSet = FunctionService.onRegion(region);
-    try {
-      String msg = "<ExpectedException action=add>" + "FunctionException" + "</ExpectedException>";
-      cache.getLogger().info(msg);
-      int j = 0;
-      for (String o : testKeysSet) {
-        Integer val = j++;
-        region.put(o, val);
-      }
-      ResultCollector rc1 = execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
-      rc1.getResult();
-      Thread.sleep(20000);
-      fail("Test failed after the put operation");
-    } catch (FunctionException expected) {
-      expected.printStackTrace();
-      logger.info("Exception : " + expected.getMessage());
-      assertTrue(expected.getMessage()
-          .startsWith((String.format("Cannot %s result as the Function#hasResult() is false",
-              "return any"))));
-    } catch (Exception notexpected) {
-      Assert.fail("Test failed during execute or sleeping", notexpected);
-    } finally {
-      cache.getLogger()
-          .info("<ExpectedException action=remove>" + "FunctionException" + "</ExpectedException>");
-    }
-  }
-
-  private static void serverSingleKeyExecution(Boolean isByName) {
-    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final String testKey = "execKey";
-    final Set<String> testKeysSet = new HashSet<>();
-    testKeysSet.add(testKey);
-    DistributedSystem.setThreadsSocketPolicy(false);
-
-    Function function = new TestFunction(true, TEST_FUNCTION2);
-    FunctionService.registerFunction(function);
-    Execution dataSet = FunctionService.onRegion(region);
-    try {
-      execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
-    } catch (Exception expected) {
-      assertTrue(expected.getMessage().contains("No target node found for KEY = " + testKey)
-          || expected.getMessage().startsWith("Server could not send the reply")
-          || expected.getMessage().startsWith("Unexpected exception during"));
-    }
-
-    region.put(testKey, 1);
-    try {
-      ResultCollector rs = execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
-      assertEquals(Boolean.TRUE, ((List) rs.getResult()).get(0));
-
-      ResultCollector rs2 = execute(dataSet, testKeysSet, testKey, function, isByName);
-      assertEquals(1, ((List) rs2.getResult()).get(0));
-
-      HashMap<String, Integer> putData = new HashMap<>();
-      putData.put(testKey + "1", 2);
-      putData.put(testKey + "2", 3);
-
-      ResultCollector rs1 = execute(dataSet, testKeysSet, putData, function, isByName);
-      assertEquals(Boolean.TRUE, ((List) rs1.getResult()).get(0));
-
-      assertEquals((Integer) 2, region.get(testKey + "1"));
-      assertEquals((Integer) 3, region.get(testKey + "2"));
-
-    } catch (Exception ex) {
-      ex.printStackTrace();
-      logger.info("Exception : ", ex);
-      Assert.fail("Test failed after the put operation", ex);
-    }
-  }
-
-  private static void serverSingleKeyExecution_FunctionInvocationTargetException() {
-    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final String testKey = "execKey";
-    final Set<String> testKeysSet = new HashSet<>();
-    testKeysSet.add(testKey);
-    DistributedSystem.setThreadsSocketPolicy(false);
-
-    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_REEXECUTE_EXCEPTION);
-    FunctionService.registerFunction(function);
-    Execution dataSet = FunctionService.onRegion(region);
-
-    region.put(testKey, 1);
-    try {
-      ResultCollector rs = execute(dataSet, testKeysSet, Boolean.TRUE, function, false);
-      ArrayList list = (ArrayList) rs.getResult();
-      assertTrue(((Integer) list.get(0)) >= 5);
-    } catch (Exception ex) {
-      ex.printStackTrace();
-      Assert.fail("This is not expected Exception", ex);
-    }
-  }
-
-  private static void serverSingleKeyExecution_Inline() {
-    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
-    final String testKey = "execKey";
-    final Set<String> testKeysSet = new HashSet<>();
-    testKeysSet.add(testKey);
-    DistributedSystem.setThreadsSocketPolicy(false);
-
-    Execution dataSet = FunctionService.onRegion(region);
-    try {
-      cache.getLogger()
-          .info("<ExpectedException action=add>" + "No target node found for KEY = "
-              + "|Server could not send the reply" + "|Unexpected exception during"
-              + "</ExpectedException>");
-      dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(new FunctionAdapter() {
-        @Override
-        public void execute(FunctionContext context) {
-          @SuppressWarnings("unchecked")
-          final ResultSender<Object> resultSender = context.getResultSender();
-          if (context.getArguments() instanceof String) {
-            resultSender.lastResult("Success");
-          }
-          resultSender.lastResult("Failure");
-        }
-
-        @Override
-        public String getId() {
-          return getClass().getName();
-        }
-
-        @Override
-        public boolean hasResult() {
-          return true;
-        }
-      });
-    } catch (Exception expected) {
-      logger.debug("Exception occurred : " + expected.getMessage());
-      assertTrue(expected.getMessage().contains("No target node found for KEY = " + testKey)
-          || expected.getMessage().startsWith("Server could not send the reply")
-          || expected.getMessage().startsWith("Unexpected exception during"));
-    } finally {
-      cache.getLogger()
-          .info("<ExpectedException action=remove>" + "No target node found for KEY = "
-              + "|Server could not send the reply" + "|Unexpected exception during"
-              + "</ExpectedException>");
-    }
-
-    region.put(testKey, 1);
-    try {
-      ResultCollector rs =
-          dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(new FunctionAdapter() {
-            @Override
-            public void execute(FunctionContext context) {
-              @SuppressWarnings("unchecked")
-              final ResultSender<Object> resultSender = context.getResultSender();
-              if (context.getArguments() instanceof String) {
-                resultSender.lastResult("Success");
-              } else {
-                resultSender.lastResult("Failure");
-              }
-            }
-
-            @Override
-            public String getId() {
-              return getClass().getName();
-            }
-
-            @Override
-            public boolean hasResult() {
-              return true;
-            }
-          });
-      assertEquals("Failure", ((List) rs.getResult()).get(0));
-
-      ResultCollector rs2 =
-          dataSet.withFilter(testKeysSet).setArguments(testKey).execute(new FunctionAdapter() {
-            @Override
-            public void execute(FunctionContext context) {
-              @SuppressWarnings("unchecked")
-              final ResultSender<Object> resultSender = context.getResultSender();
-              if (context.getArguments() instanceof String) {
-                resultSender.lastResult("Success");
-              } else {
-                resultSender.lastResult("Failure");
-              }
-            }
-
-            @Override
-            public String getId() {
-              return getClass().getName();
-            }
-
-            @Override
-            public boolean hasResult() {
-              return true;
-            }
-          });
-      assertEquals("Success", ((List) rs2.getResult()).get(0));
-
-    } catch (Exception ex) {
-      ex.printStackTrace();
-      logger.info("Exception : ", ex);
-      Assert.fail("Test failed after the put operation", ex);
-    }
-  }
-
-  private static ResultCollector execute(Execution dataSet, Set testKeysSet, Serializable args,
-      Function function, Boolean isByName) {
-    if (isByName) {// by name
-      return dataSet.withFilter(testKeysSet).setArguments(args).execute(function.getId());
-    } else { // By Instance
-      return dataSet.withFilter(testKeysSet).setArguments(args).execute(function);
-    }
-  }
-
-  private static ResultCollector executeOnAll(Execution dataSet, Serializable args,
-      Function function, Boolean isByName) {
-    if (isByName) {// by name
-      return dataSet.setArguments(args).execute(function.getId());
-    } else { // By Instance
-      return dataSet.setArguments(args).execute(function);
-    }
-  }
-}
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest.java
new file mode 100644
index 0000000..1c66b3d
--- /dev/null
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest.java
@@ -0,0 +1,1096 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache.execute;
+
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.IntStream;
+
+import org.apache.logging.log4j.Logger;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.UseParametersRunnerFactory;
+
+import org.apache.geode.cache.CacheClosedException;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.client.ServerConnectivityException;
+import org.apache.geode.cache.execute.Execution;
+import org.apache.geode.cache.execute.Function;
+import org.apache.geode.cache.execute.FunctionAdapter;
+import org.apache.geode.cache.execute.FunctionContext;
+import org.apache.geode.cache.execute.FunctionException;
+import org.apache.geode.cache.execute.FunctionInvocationTargetException;
+import org.apache.geode.cache.execute.FunctionService;
+import org.apache.geode.cache.execute.ResultCollector;
+import org.apache.geode.cache.execute.ResultSender;
+import org.apache.geode.distributed.DistributedSystem;
+import org.apache.geode.internal.cache.PartitionedRegion;
+import org.apache.geode.internal.cache.functions.TestFunction;
+import org.apache.geode.logging.internal.log4j.api.LogService;
+import org.apache.geode.test.awaitility.GeodeAwaitility;
+import org.apache.geode.test.dunit.AsyncInvocation;
+import org.apache.geode.test.dunit.IgnoredException;
+import org.apache.geode.test.dunit.SerializableRunnableIF;
+import org.apache.geode.test.dunit.Wait;
+import org.apache.geode.test.dunit.WaitCriterion;
+import org.apache.geode.test.junit.categories.ClientServerTest;
+import org.apache.geode.test.junit.categories.FunctionServiceTest;
+import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
+
+@Category({ClientServerTest.class, FunctionServiceTest.class})
+@RunWith(Parameterized.class)
+@UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
+public class PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest
+    extends PRClientServerTestBase {
+
+  private static final Logger logger = LogService.getLogger();
+
+  private static final String TEST_FUNCTION7 = TestFunction.TEST_FUNCTION7;
+
+  private static final String TEST_FUNCTION2 = TestFunction.TEST_FUNCTION2;
+
+  private Boolean isByName = null;
+
+  private static final int retryCount = 0;
+
+  public PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest() {
+    super();
+  }
+
+  /*
+   * Execution of the function on server with
+   */
+  @Test
+  public void testServerAllKeyExecution_byInstance() {
+    createScenario();
+    Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2);
+    registerFunctionAtServer(function);
+    isByName = Boolean.FALSE;
+    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest
+        .serverAllKeyExecution(isByName));
+  }
+
+  /*
+   * Execution of the function on server with
+   */
+  @Test
+  public void testServerGetAllFunction() {
+    createScenario();
+    client.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::getAll);
+  }
+
+  /*
+   * Execution of the function on server with
+   */
+  @Test
+  public void testServerPutAllFunction() {
+    createScenario();
+    client.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::putAll);
+  }
+
+  /*
+   * Execution of the function on server with single key as the routing object and using the name of
+   * the function
+   */
+  @Test
+  public void testServerSingleKeyExecution_byName() {
+    createScenario();
+    Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2);
+    registerFunctionAtServer(function);
+    isByName = Boolean.TRUE;
+    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest
+        .serverSingleKeyExecution(isByName));
+  }
+
+  /*
+   * Execution of the function on server with single key as the routing. Function throws the
+   * FunctionInvocationTargetException. As this is the case of HA then system should retry the
+   * function execution. After 5th attempt function will send Boolean as last result.
+   */
+  @Test
+  public void testserverSingleKeyExecution_FunctionInvocationTargetException() {
+    createScenario();
+    client.invoke(
+        PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::serverSingleKeyExecution_FunctionInvocationTargetException);
+  }
+
+  /*
+   * Execution of the function on server with bucket as filter
+   */
+  @Test
+  public void testBucketFilter() {
+    createScenarioForBucketFilter();
+    Function<Object> function = new TestFunction<>(true, TestFunction.TEST_FUNCTION_BUCKET_FILTER);
+    registerFunctionAtServer(function);
+
+    Set<Integer> bucketFilterSet = new HashSet<>();
+    bucketFilterSet.add(3);
+    bucketFilterSet.add(6);
+    bucketFilterSet.add(8);
+    client.invoke(() -> serverBucketFilterExecution(bucketFilterSet));
+    bucketFilterSet.clear();
+    // Test single filter
+    bucketFilterSet.add(7);
+    client.invoke(() -> serverBucketFilterExecution(bucketFilterSet));
+  }
+
+  @Test
+  public void testBucketFilterOverride() {
+    createScenarioForBucketFilter();
+    Function<Object> function = new TestFunction<>(true, TestFunction.TEST_FUNCTION_BUCKET_FILTER);
+    registerFunctionAtServer(function);
+    // test multi key filter
+    Set<Integer> bucketFilterSet = new HashSet<>();
+    bucketFilterSet.add(3);
+    bucketFilterSet.add(6);
+    bucketFilterSet.add(8);
+
+    Set<Integer> keyFilterSet = new HashSet<>();
+    keyFilterSet.add(75);
+    keyFilterSet.add(25);
+
+    client.invoke(() -> serverBucketFilterOverrideExecution(bucketFilterSet,
+        keyFilterSet));
+
+  }
+
+  @Test
+  public void testServerSingleKeyExecution_SocketTimeOut() {
+    createScenario();
+    Function<Object> function = new TestFunction<>(true, TestFunction.TEST_FUNCTION_SOCKET_TIMEOUT);
+    registerFunctionAtServer(function);
+    isByName = Boolean.TRUE;
+    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest
+        .serverSingleKeyExecutionSocketTimeOut(isByName));
+  }
+
+  /*
+   * Execution of the function on server with single key as the routing object and using the
+   * instance of the function
+   */
+  @Test
+  public void testServerSingleKeyExecution_byInstance() {
+    createScenario();
+    Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2);
+    registerFunctionAtServer(function);
+    isByName = Boolean.FALSE;
+    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest
+        .serverSingleKeyExecution(isByName));
+  }
+
+  /*
+   * Execution of the inline function on server with single key as the routing object
+   */
+  @Test
+  public void testServerSingleKeyExecution_byInlineFunction() {
+    createScenario();
+    client.invoke(
+        PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::serverSingleKeyExecution_Inline);
+  }
+
+  /*
+   * Execution of the function on server with set multiple keys as the routing object and using the
+   * name of the function
+   */
+  @Test
+  public void testserverMultiKeyExecution_byName() {
+    createScenario();
+    Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2);
+    registerFunctionAtServer(function);
+    isByName = Boolean.TRUE;
+    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest
+        .serverMultiKeyExecution(isByName));
+    server1.invoke(
+        PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::checkBucketsOnServer);
+    server2.invoke(
+        PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::checkBucketsOnServer);
+    server3.invoke(
+        PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::checkBucketsOnServer);
+  }
+
+  @Test
+  public void testserverMultiKeyExecution_SocketTimeOut() {
+    createScenario();
+    Function<Object> function = new TestFunction<>(true, TestFunction.TEST_FUNCTION_SOCKET_TIMEOUT);
+    registerFunctionAtServer(function);
+    isByName = Boolean.TRUE;
+    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest
+        .serverMultiKeyExecutionSocketTimeOut(isByName));
+  }
+
+  /*
+   * Execution of the inline function on server with set multiple keys as the routing object
+   */
+  @Test
+  public void testserverMultiKeyExecution_byInlineFunction() {
+    createScenario();
+    client.invoke(
+        PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::serverMultiKeyExecution_Inline);
+  }
+
+  /*
+   * Execution of the inline function on server with set multiple keys as the routing object
+   * Function throws the FunctionInvocationTargetException. As this is the case of HA then system
+   * should retry the function execution. After 5th attempt function will send Boolean as last
+   * result.
+   */
+  @Test
+  public void testserverMultiKeyExecution_FunctionInvocationTargetException() {
+    createScenario();
+    client.invoke(
+        PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::serverMultiKeyExecution_FunctionInvocationTargetException);
+  }
+
+  /*
+   * Execution of the function on server with set multiple keys as the routing object and using the
+   * name of the function
+   */
+  @Test
+  public void testserverMultiKeyExecutionNoResult_byName() {
+    createScenario();
+    Function<Object> function = new TestFunction<>(false, TEST_FUNCTION7);
+    registerFunctionAtServer(function);
+    isByName = Boolean.TRUE;
+    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest
+        .serverMultiKeyExecutionNoResult(isByName));
+  }
+
+  /*
+   * Execution of the function on server with set multiple keys as the routing object and using the
+   * instance of the function
+   */
+  @Test
+  public void testserverMultiKeyExecution_byInstance() {
+    createScenario();
+    Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2);
+    registerFunctionAtServer(function);
+    isByName = Boolean.FALSE;
+    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest
+        .serverMultiKeyExecution(isByName));
+  }
+
+  /*
+   * Ensure that the execution is limited to a single bucket put another way, that the routing logic
+   * works correctly such that there is not extra execution
+   */
+  @Test
+  public void testserverMultiKeyExecutionOnASingleBucket_byName() {
+    createScenario();
+    Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2);
+    registerFunctionAtServer(function);
+    isByName = Boolean.TRUE;
+    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest
+        .serverMultiKeyExecutionOnASingleBucket(isByName));
+  }
+
+  /*
+   * Ensure that the execution is limited to a single bucket put another way, that the routing logic
+   * works correctly such that there is not extra execution
+   */
+  @Test
+  public void testserverMultiKeyExecutionOnASingleBucket_byInstance() {
+    createScenario();
+    Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2);
+    registerFunctionAtServer(function);
+    isByName = Boolean.FALSE;
+    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest
+        .serverMultiKeyExecutionOnASingleBucket(isByName));
+  }
+
+  /*
+   * Ensure that the while executing the function if the servers is down then the execution is
+   * failover to other available server
+   */
+  @Test
+  public void testServerFailoverWithTwoServerAliveHA() throws InterruptedException {
+    IgnoredException.addIgnoredException("FunctionInvocationTargetException");
+    ArrayList<Object> commonAttributes =
+        createCommonServerAttributes("TestPartitionedRegion", null, 1, null);
+    createClientServerScenarion(commonAttributes, 20, 20, 20);
+    Function<Object> function = new TestFunction<>(true, TestFunction.TEST_FUNCTION_HA);
+    registerFunctionAtServer(function);
+    server2.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::stopServerHA);
+    server3.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::stopServerHA);
+    client.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::putOperation);
+
+    int AsyncInvocationArrSize = 1;
+    AsyncInvocation<?>[] async = new AsyncInvocation<?>[AsyncInvocationArrSize];
+    async[0] = client.invokeAsync(
+        PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::executeFunctionHA);
+    server2.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::startServerHA);
+    server3.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::startServerHA);
+    server1.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::stopServerHA);
+    client.invoke(() -> PRClientServerRegionFunctionExecutionDUnitTest
+        .verifyDeadAndLiveServers(2));
+
+    List<?> l = (List<?>) async[0].get();
+
+    assertThat(l).hasSize(2);
+  }
+
+  /*
+   * Ensure that the while executing the function if the servers is down then the execution is
+   * failover to other available server
+   */
+  @Test
+  public void testServerCacheClosedFailoverWithTwoServerAliveHA() throws InterruptedException {
+    IgnoredException.addIgnoredException("FunctionInvocationTargetException");
+    ArrayList<Object> commonAttributes =
+        createCommonServerAttributes("TestPartitionedRegion", null, 1, null);
+    createClientServerScenarion(commonAttributes, 20, 20, 20);
+    Function<Object> function = new TestFunction<>(true, TestFunction.TEST_FUNCTION_HA);
+    registerFunctionAtServer(function);
+    server2.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::stopServerHA);
+    server3.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::stopServerHA);
+    client.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::putOperation);
+    int AsyncInvocationArrSize = 1;
+    AsyncInvocation<?>[] async = new AsyncInvocation<?>[AsyncInvocationArrSize];
+    async[0] = client.invokeAsync(
+        PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::executeFunctionHA);
+    server2.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::startServerHA);
+    server3.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::startServerHA);
+    server1.invoke(PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::closeCacheHA);
+    client.invoke(() -> PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest
+        .verifyDeadAndLiveServers(2));
+
+    List<?> l = (List<?>) async[0].get();
+    assertThat(l).hasSize(2);
+  }
+
+  @Test
+  public void testBug40714() {
+    createScenario();
+    server1
+        .invoke(
+            (SerializableRunnableIF) PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::registerFunction);
+    server1
+        .invoke(
+            (SerializableRunnableIF) PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::registerFunction);
+    server1
+        .invoke(
+            (SerializableRunnableIF) PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::registerFunction);
+    client
+        .invoke(
+            (SerializableRunnableIF) PRClientServerRegionFunctionExecutionNoSingleHopDistributedTest::registerFunction);
+    client.invoke(
+        PRClientServerRegionFunctionExecutionDUnitTest::FunctionExecution_Inline_Bug40714);
+  }
+
+  /**
+   * This test case verifies that if the execution of a function handled
+   * by a Function Execution thread times out at the client, the ServerConnection
+   * thread will eventually be released.
+   * In order to test this, a slow function will be executed by a client
+   * with a small time-out a number of times equal to the number of servers
+   * in the cluster * the max number of threads configured.
+   * After the function executions have timed-out, another request will be
+   * sent by the client to any server and it should be served timely.
+   * If the ServerConnection threads had not been released, this new
+   * request will never be served because there would be not ServerConnection
+   * threads available and the test case will time-out.
+   */
+  @Test
+  public void testClientFunctionExecutionTimingOutDoesNotLeaveServerConnectionThreadsHanged() {
+    // Set client connect-timeout to a very high value so that if there are no
+    // ServerConnection threads available the test will time-out before the client times-out.
+    int connectTimeout = (int) (GeodeAwaitility.getTimeout().toMillis() * 2);
+    int maxThreads = 2;
+    createScenarioWithClientConnectTimeout(connectTimeout, maxThreads);
+
+    // The function must be executed a number of times equal
+    // to the number of servers * the max-threads, to check if all the
+    // threads are hanged.
+    int executions = (3 * maxThreads);
+
+    // functionTimeoutSecs should be lower than the
+    // time taken by the slow function to return all
+    // the results
+    int functionTimeoutSecs = 2;
+
+    Function<?> function = new TestFunction<>(true, TestFunction.TEST_FUNCTION_SLOW);
+    registerFunctionAtServer(function);
+
+    // Run the function that will time-out at the client
+    // the number of specified times.
+    IntStream.range(0, executions)
+        .forEach(i -> assertThatThrownBy(() -> client
+            .invoke(() -> executeSlowFunctionOnRegionNoFilter(function, PartitionedRegionName,
+                functionTimeoutSecs)))
+                    .getCause().getCause().isInstanceOf(ServerConnectivityException.class));
+
+    // Make sure that the get returns timely. If it hangs, it means
+    // that there are no threads available in the servers to handle the
+    // request because they were hanged due to the previous function
+    // executions.
+    await().until(() -> {
+      client.invoke(() -> executeGet(PartitionedRegionName, "key"));
+      return true;
+    });
+  }
+
+  private Object executeGet(String regionName, Object key) {
+    Region<?, ?> region = cache.getRegion(regionName);
+    return region.get(key);
+  }
+
+  private Object executeSlowFunctionOnRegionNoFilter(Function<?> function, String regionName,
+      int functionTimeoutSecs) {
+    FunctionService.registerFunction(function);
+    Region<?, ?> region = cache.getRegion(regionName);
+
+    Execution execution = FunctionService.onRegion(region);
+
+    Object[] args = {Boolean.TRUE};
+    return execution.setArguments(args).execute(function.getId(), functionTimeoutSecs,
+        TimeUnit.SECONDS).getResult();
+  }
+
+
+  public static void registerFunction() {
+    FunctionService.registerFunction(new FunctionAdapter() {
+      @Override
+      public void execute(FunctionContext context) {
+        @SuppressWarnings("unchecked")
+        final ResultSender<Object> resultSender = context.getResultSender();
+        if (context.getArguments() instanceof String) {
+          resultSender.lastResult("Failure");
+        } else if (context.getArguments() instanceof Boolean) {
+          resultSender.lastResult(Boolean.FALSE);
+        }
+      }
+
+      @Override
+      public String getId() {
+        return "Function";
+      }
+
+      @Override
+      public boolean hasResult() {
+        return true;
+      }
+    });
+  }
+
+  public static void verifyDeadAndLiveServers(final Integer expectedLiveServers) {
+    WaitCriterion wc = new WaitCriterion() {
+      String excuse;
+
+      @Override
+      public boolean done() {
+        int sz = pool.getConnectedServerCount();
+        logger.info("Checking for the Live Servers : Expected  : " + expectedLiveServers
+            + " Available :" + sz);
+        if (sz == expectedLiveServers) {
+          return true;
+        }
+        excuse = "Expected " + expectedLiveServers + " but found " + sz;
+        return false;
+      }
+
+      @Override
+      public String description() {
+        return excuse;
+      }
+    };
+    Wait.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
+  }
+
+  public static void executeFunction() {
+
+    Region<Object, Object> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final HashSet<String> testKeysSet = new HashSet<>();
+    for (int i = (totalNumBuckets * 10); i > 0; i--) {
+      testKeysSet.add("execKey-" + i);
+    }
+    DistributedSystem.setThreadsSocketPolicy(false);
+    Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2);
+    FunctionService.registerFunction(function);
+    Execution dataSet = FunctionService.onRegion(region);
+    try {
+      ResultCollector<?, ?> rc1 =
+          dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(function.getId());
+
+      HashMap<?, ?> resultMap = ((HashMap<?, ?>) rc1.getResult());
+      assertThat(resultMap).hasSize(3);
+
+      for (Map.Entry<?, ?> o : resultMap.entrySet()) {
+        ArrayList<?> resultListForMember = (ArrayList<?>) o.getValue();
+
+        for (Object result : resultListForMember) {
+          assertThat(result).isEqualTo(true);
+        }
+      }
+    } catch (Exception e) {
+      logger.info("Got an exception : " + e.getMessage());
+      assertThat(e).isInstanceOf(CacheClosedException.class);
+    }
+  }
+
+  private static Object executeFunctionHA() {
+    Region<Object, Object> region = cache.getRegion(PartitionedRegionName);
+    final HashSet<String> testKeysSet = new HashSet<>();
+    for (int i = (totalNumBuckets * 10); i > 0; i--) {
+      testKeysSet.add("execKey-" + i);
+    }
+    DistributedSystem.setThreadsSocketPolicy(false);
+    Function<Object> function = new TestFunction<>(true, TestFunction.TEST_FUNCTION_HA);
+    FunctionService.registerFunction(function);
+    Execution dataSet = FunctionService.onRegion(region);
+    ResultCollector<?, ?> rc1 =
+        dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(function.getId());
+    List<?> l = ((List<?>) rc1.getResult());
+    logger.info("Result size : " + l.size());
+    return l;
+  }
+
+  private static void putOperation() {
+    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final HashSet<String> testKeysSet = new HashSet<>();
+    for (int i = (totalNumBuckets * 10); i > 0; i--) {
+      testKeysSet.add("execKey-" + i);
+    }
+    int j = 0;
+    for (String s : testKeysSet) {
+      Integer val = j++;
+      region.put(s, val);
+    }
+  }
+
+  private void createScenario() {
+    ArrayList<Object> commonAttributes =
+        createCommonServerAttributes("TestPartitionedRegion", null, 0, null);
+    createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20);
+  }
+
+  private void createScenarioWithClientConnectTimeout(int connectTimeout, int maxThreads) {
+    ArrayList<Object> commonAttributes =
+        createCommonServerAttributes("TestPartitionedRegion", null, 0, null);
+    createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20, maxThreads, connectTimeout);
+  }
+
+
+  private void createScenarioForBucketFilter() {
+    ArrayList<Object> commonAttributes = createCommonServerAttributes("TestPartitionedRegion",
+        new BucketFilterPRResolver(), 0, null);
+    createClientServerScenarioNoSingleHop(commonAttributes, 20, 20, 20);
+  }
+
+  private static void checkBucketsOnServer() {
+    PartitionedRegion region = (PartitionedRegion) cache.getRegion(PartitionedRegionName);
+    HashMap<Integer, Integer> localBucket2RegionMap =
+        (HashMap<Integer, Integer>) region.getDataStore().getSizeLocally();
+    logger.info(
+        "Size of the " + PartitionedRegionName + " in this VM :- " + localBucket2RegionMap.size());
+    Set<Map.Entry<Integer, Integer>> entrySet = localBucket2RegionMap.entrySet();
+    assertThat(entrySet).isNotNull();
+  }
+
+  private static void serverAllKeyExecution(Boolean isByName) {
+    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final HashSet<String> testKeysSet = new HashSet<>();
+    for (int i = (totalNumBuckets / 2); i > 0; i--) {
+      testKeysSet.add("execKey-" + i);
+    }
+    DistributedSystem.setThreadsSocketPolicy(false);
+    Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2);
+    FunctionService.registerFunction(function);
+    Execution dataSet = FunctionService.onRegion(region);
+
+    int j = 0;
+    HashSet<Integer> origVals = new HashSet<>();
+    for (String item : testKeysSet) {
+      Integer val = j++;
+      origVals.add(val);
+      region.put(item, val);
+    }
+    ResultCollector<?, ?> rc1 = executeOnAll(dataSet, Boolean.TRUE, function, isByName);
+    List<?> resultList = (List<?>) rc1.getResult();
+    assertThat(resultList).hasSize(3);
+
+    for (Object result : resultList) {
+      assertThat(result).isEqualTo(true);
+    }
+    ResultCollector<?, ?> rc2 = executeOnAll(dataSet, testKeysSet, function, isByName);
+    List<?> l2 = (List<?>) rc2.getResult();
+    assertThat(l2).hasSize(3);
+    HashSet<Integer> foundVals = new HashSet<>();
+    for (Object value : l2) {
+      List<?> subL = (List<?>) value;
+      assertThat(subL).hasSizeGreaterThan(0);
+      for (Object o : subL) {
+        assertThat(foundVals.add((Integer) o)).isTrue();
+      }
+    }
+    assertThat(foundVals).containsExactlyInAnyOrderElementsOf(origVals);
+  }
+
+  public static void getAll() {
+    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final List<String> testKeysList = new ArrayList<>();
+    for (int i = (totalNumBuckets * 3); i > 0; i--) {
+      testKeysList.add("execKey-" + i);
+    }
+    DistributedSystem.setThreadsSocketPolicy(false);
+    int j = 0;
+    Map<String, Integer> origVals = new HashMap<>();
+    for (String key : testKeysList) {
+      Integer val = j++;
+      origVals.put(key, val);
+      region.put(key, val);
+    }
+    Map<String, Integer> resultMap = region.getAll(testKeysList);
+    assertThat(resultMap).containsExactlyInAnyOrderEntriesOf(origVals);
+    await().untilAsserted(
+        () -> assertThat(region.getAll(testKeysList)).containsExactlyInAnyOrderEntriesOf(origVals));
+  }
+
+  public static void putAll() {
+    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final List<String> testKeysList = new ArrayList<>();
+    for (int i = (totalNumBuckets * 3); i > 0; i--) {
+      testKeysList.add("execKey-" + i);
+    }
+    DistributedSystem.setThreadsSocketPolicy(false);
+    int j = 0;
+    Map<String, Integer> origVals = new HashMap<>();
+    for (String key : testKeysList) {
+      Integer val = j++;
+      origVals.put(key, val);
+      region.put(key, val);
+    }
+    Map<String, Integer> resultMap = region.getAll(testKeysList);
+    assertThat(resultMap).containsExactlyInAnyOrderEntriesOf(origVals);
+    await().untilAsserted(
+        () -> assertThat(region.getAll(testKeysList)).containsExactlyInAnyOrderEntriesOf(origVals));
+  }
+
+  private static void serverMultiKeyExecutionOnASingleBucket(Boolean isByName) {
+    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final HashSet<String> testKeysSet = new HashSet<>();
+    for (int i = (totalNumBuckets * 2); i > 0; i--) {
+      testKeysSet.add("execKey-" + i);
+    }
+    int j = 0;
+    for (String value : testKeysSet) {
+      Integer val = j++;
+      region.put(value, val);
+    }
+    DistributedSystem.setThreadsSocketPolicy(false);
+    for (String key : testKeysSet) {
+      Set<String> singleKeySet = Collections.singleton(key);
+      Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2);
+      FunctionService.registerFunction(function);
+      Execution dataSet = FunctionService.onRegion(region);
+      ResultCollector<?, ?> rc1 = execute(dataSet, singleKeySet, Boolean.TRUE, function, isByName);
+      List<?> list1 = (List<?>) rc1.getResult();
+      assertThat(list1).hasSize(1);
+
+      ResultCollector<?, ?> rc2 =
+          execute(dataSet, singleKeySet, new HashSet<>(singleKeySet), function, isByName);
+      List<?> list2 = (List<?>) rc2.getResult();
+
+      assertThat(list2).hasSize(1);
+      List<Integer> subList = (List<Integer>) list2.iterator().next();
+      assertThat(subList).hasSize(1);
+      assertThat(subList).containsOnly(region.get(singleKeySet.iterator().next()));
+    }
+  }
+
+  private static void serverMultiKeyExecution(Boolean isByName) {
+    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final HashSet<String> testKeysSet = new HashSet<>();
+    for (int i = (totalNumBuckets * 2); i > 0; i--) {
+      testKeysSet.add("execKey-" + i);
+    }
+    DistributedSystem.setThreadsSocketPolicy(false);
+    Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2);
+    FunctionService.registerFunction(function);
+    Execution dataSet = FunctionService.onRegion(region);
+
+    int j = 0;
+    HashSet<Integer> origVals = new HashSet<>();
+    for (String element : testKeysSet) {
+      Integer val = j++;
+      origVals.add(val);
+      region.put(element, val);
+    }
+    ResultCollector<?, ?> rc1 = execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
+    List<?> l = (List<?>) rc1.getResult();
+    assertThat(l).hasSize(3);
+    for (Object item : l) {
+      assertThat(item).isEqualTo(true);
+    }
+
+    ResultCollector<?, ?> rc2 = execute(dataSet, testKeysSet, testKeysSet, function, isByName);
+    List<?> l2 = (List<?>) rc2.getResult();
+    assertThat(l2).hasSize(3);
+    HashSet<Integer> foundVals = new HashSet<>();
+    for (Object value : l2) {
+      List<?> subL = (List<?>) value;
+      assertThat(subL).hasSizeGreaterThan(0);
+      for (Object o : subL) {
+        assertThat(foundVals.add((Integer) o)).isTrue();
+      }
+    }
+    assertThat(foundVals).containsExactlyInAnyOrderElementsOf(origVals);
+  }
+
+
+  private static void serverMultiKeyExecutionSocketTimeOut(Boolean isByName) {
+    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final HashSet<String> testKeysSet = new HashSet<>();
+    for (int i = (totalNumBuckets * 2); i > 0; i--) {
+      testKeysSet.add("execKey-" + i);
+    }
+    DistributedSystem.setThreadsSocketPolicy(false);
+    Function<Object> function = new TestFunction<>(true, TestFunction.TEST_FUNCTION_SOCKET_TIMEOUT);
+    FunctionService.registerFunction(function);
+    Execution dataSet = FunctionService.onRegion(region);
+
+    int j = 0;
+    for (String value : testKeysSet) {
+      Integer val = j++;
+      region.put(value, val);
+    }
+    ResultCollector<?, ?> rc1 = execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
+    List<?> l = (List<?>) rc1.getResult();
+    logger.info("Result size : " + l.size());
+    assertThat(l).hasSize(3);
+    for (Object o : l) {
+      assertThat(o).isEqualTo(true);
+    }
+  }
+
+  private static void serverSingleKeyExecutionSocketTimeOut(Boolean isByName) {
+    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final String testKey = "execKey";
+    final Set<String> testKeysSet = new HashSet<>();
+    testKeysSet.add(testKey);
+    DistributedSystem.setThreadsSocketPolicy(false);
+
+    Function<Object> function = new TestFunction<>(true, TestFunction.TEST_FUNCTION_SOCKET_TIMEOUT);
+    FunctionService.registerFunction(function);
+    Execution dataSet = FunctionService.onRegion(region);
+
+    region.put(testKey, 1);
+
+    ResultCollector<?, ?> rs = execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
+    assertThat(((List<?>) rs.getResult()).get(0)).isEqualTo(true);
+
+    ResultCollector<?, ?> rs2 = execute(dataSet, testKeysSet, testKey, function, isByName);
+    assertThat(((List<?>) rs2.getResult()).get(0)).isEqualTo(testKey);
+  }
+
+  private static void serverMultiKeyExecution_Inline() {
+    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final HashSet<String> testKeysSet = new HashSet<>();
+    for (int i = (totalNumBuckets * 2); i > 0; i--) {
+      testKeysSet.add("execKey-" + i);
+    }
+    DistributedSystem.setThreadsSocketPolicy(false);
+    Execution dataSet = FunctionService.onRegion(region);
+
+    int j = 0;
+    for (String value : testKeysSet) {
+      Integer val = j++;
+      region.put(value, val);
+    }
+    ResultCollector<?, ?> rc1 =
+        dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(new FunctionAdapter() {
+          @Override
+          public void execute(FunctionContext context) {
+            @SuppressWarnings("unchecked")
+            final ResultSender<Object> resultSender = context.getResultSender();
+            if (context.getArguments() instanceof String) {
+              resultSender.lastResult("Success");
+            } else if (context.getArguments() instanceof Boolean) {
+              resultSender.lastResult(Boolean.TRUE);
+            }
+          }
+
+          @Override
+          public String getId() {
+            return getClass().getName();
+          }
+
+          @Override
+          public boolean hasResult() {
+            return true;
+          }
+        });
+    List<?> list = (List<?>) rc1.getResult();
+    logger.info("Result size : " + list.size());
+    assertThat(list).hasSize(3);
+    for (Object item : list) {
+      assertThat(item).isEqualTo(true);
+    }
+  }
+
+  private static void serverMultiKeyExecution_FunctionInvocationTargetException() {
+    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final HashSet<String> testKeysSet = new HashSet<>();
+    for (int i = (totalNumBuckets * 2); i > 0; i--) {
+      testKeysSet.add("execKey-" + i);
+    }
+    DistributedSystem.setThreadsSocketPolicy(false);
+    Execution dataSet = FunctionService.onRegion(region);
+    int j = 0;
+    for (String o : testKeysSet) {
+      Integer val = j++;
+      region.put(o, val);
+    }
+    ResultCollector<?, ?> rc1 =
+        dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(new FunctionAdapter() {
+          @Override
+          public void execute(FunctionContext context) {
+            if (context.isPossibleDuplicate()) {
+              context.getResultSender().lastResult(retryCount);
+              return;
+            }
+            if (context.getArguments() instanceof Boolean) {
+              throw new FunctionInvocationTargetException("I have been thrown from TestFunction");
+            }
+          }
+
+          @Override
+          public String getId() {
+            return getClass().getName();
+          }
+
+          @Override
+          public boolean hasResult() {
+            return true;
+          }
+        });
+
+    List<?> list = (List<?>) rc1.getResult();
+    assertThat(list.get(0)).isEqualTo(0);
+  }
+
+  private static void serverMultiKeyExecutionNoResult(Boolean isByName) {
+    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final HashSet<String> testKeysSet = new HashSet<>();
+    for (int i = (totalNumBuckets * 2); i > 0; i--) {
+      testKeysSet.add("execKey-" + i);
+    }
+    DistributedSystem.setThreadsSocketPolicy(false);
+    Function<Object> function = new TestFunction<>(false, TEST_FUNCTION7);
+    FunctionService.registerFunction(function);
+    Execution dataSet = FunctionService.onRegion(region);
+    try {
+      String msg = "<ExpectedException action=add>" + "FunctionException" + "</ExpectedException>";
+      cache.getLogger().info(msg);
+      int j = 0;
+      for (String o : testKeysSet) {
+        Integer val = j++;
+        region.put(o, val);
+      }
+      ResultCollector<?, ?> rc1 = execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
+      assertThatThrownBy(rc1::getResult).isExactlyInstanceOf(FunctionException.class)
+          .hasMessageStartingWith(
+              String.format("Cannot %s result as the Function#hasResult() is false",
+                  "return any"));
+    } finally {
+      cache.getLogger()
+          .info("<ExpectedException action=remove>" + "FunctionException" + "</ExpectedException>");
+    }
+  }
+
+  private static void serverSingleKeyExecution(Boolean isByName) {
+    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final String testKey = "execKey";
+    final Set<String> testKeysSet = new HashSet<>();
+    testKeysSet.add(testKey);
+    DistributedSystem.setThreadsSocketPolicy(false);
+
+    Function<Object> function = new TestFunction<>(true, TEST_FUNCTION2);
+    FunctionService.registerFunction(function);
+    Execution dataSet = FunctionService.onRegion(region);
+
+    execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
+
+    region.put(testKey, 1);
+
+    ResultCollector<?, ?> rs = execute(dataSet, testKeysSet, Boolean.TRUE, function, isByName);
+    assertThat(((List<?>) rs.getResult()).get(0)).isEqualTo(true);
+
+    ResultCollector<?, ?> rs2 = execute(dataSet, testKeysSet, testKey, function, isByName);
+    assertThat(((List<?>) rs2.getResult()).get(0)).isEqualTo(1);
+
+    HashMap<String, Integer> putData = new HashMap<>();
+    putData.put(testKey + "1", 2);
+    putData.put(testKey + "2", 3);
+
+    ResultCollector<?, ?> rs1 = execute(dataSet, testKeysSet, putData, function, isByName);
+    assertThat(((List<?>) rs1.getResult()).get(0)).isEqualTo(true);
+
+    assertThat(region.get(testKey + "1")).isEqualTo(2);
+    assertThat(region.get(testKey + "2")).isEqualTo(3);
+  }
+
+  private static void serverSingleKeyExecution_FunctionInvocationTargetException() {
+    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final String testKey = "execKey";
+    final Set<String> testKeysSet = new HashSet<>();
+    testKeysSet.add(testKey);
+    DistributedSystem.setThreadsSocketPolicy(false);
+
+    Function<Object> function =
+        new TestFunction<>(true, TestFunction.TEST_FUNCTION_REEXECUTE_EXCEPTION);
+    FunctionService.registerFunction(function);
+    Execution dataSet = FunctionService.onRegion(region);
+
+    region.put(testKey, 1);
+
+    ResultCollector<?, ?> rs = execute(dataSet, testKeysSet, Boolean.TRUE, function, false);
+    ArrayList<?> list = (ArrayList<?>) rs.getResult();
+    assertThat(((Integer) list.get(0))).isGreaterThanOrEqualTo(5);
+  }
+
+  private static void serverSingleKeyExecution_Inline() {
+    Region<String, Integer> region = cache.getRegion(PartitionedRegionName);
+    assertThat(region).isNotNull();
+    final String testKey = "execKey";
+    final Set<String> testKeysSet = new HashSet<>();
+    testKeysSet.add(testKey);
+    DistributedSystem.setThreadsSocketPolicy(false);
+
+    Execution dataSet = FunctionService.onRegion(region);
+
+    FunctionAdapter functionAdapter = new FunctionAdapter() {
+      @Override
+      public void execute(FunctionContext context) {
+        @SuppressWarnings("unchecked")
+        final ResultSender<Object> resultSender = context.getResultSender();
+        if (context.getArguments() instanceof String) {
+          resultSender.lastResult("Success");
+        }
+        resultSender.lastResult("Failure");
+      }
+
+      @Override
+      public String getId() {
+        return getClass().getName();
+      }
+
+      @Override
+      public boolean hasResult() {
+        return true;
+      }
+    };
+
+    dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(functionAdapter);
+
+    region.put(testKey, 1);
+
+    ResultCollector<?, ?> rs =
+        dataSet.withFilter(testKeysSet).setArguments(Boolean.TRUE).execute(new FunctionAdapter() {
+          @Override
+          public void execute(FunctionContext context) {
+            @SuppressWarnings("unchecked")
+            final ResultSender<Object> resultSender = context.getResultSender();
+            if (context.getArguments() instanceof String) {
+              resultSender.lastResult("Success");
+            } else {
+              resultSender.lastResult("Failure");
+            }
+          }
+
+          @Override
+          public String getId() {
+            return getClass().getName();
+          }
+
+          @Override
+          public boolean hasResult() {
+            return true;
+          }
+        });
+    assertThat(((List<?>) rs.getResult()).get(0)).isEqualTo("Failure");
+
+    ResultCollector<?, ?> rs2 =
+        dataSet.withFilter(testKeysSet).setArguments(testKey).execute(new FunctionAdapter() {
+          @Override
+          public void execute(FunctionContext context) {
+            @SuppressWarnings("unchecked")
+            final ResultSender<Object> resultSender = context.getResultSender();
+            if (context.getArguments() instanceof String) {
+              resultSender.lastResult("Success");
+            } else {
+              resultSender.lastResult("Failure");
+            }
+          }
+
+          @Override
+          public String getId() {
+            return getClass().getName();
+          }
+
+          @Override
+          public boolean hasResult() {
+            return true;
+          }
+        });
+
+    assertThat(((List<?>) rs2.getResult()).get(0)).isEqualTo("Success");
+
+  }
+
+  private static ResultCollector<?, ?> execute(Execution dataSet, Set<?> testKeysSet,
+      Serializable args,
+      Function<?> function, Boolean isByName) {
+    if (isByName) {// by name
+      return dataSet.withFilter(testKeysSet).setArguments(args).execute(function.getId());
+    } else { // By Instance
+      return dataSet.withFilter(testKeysSet).setArguments(args).execute(function);
+    }
+  }
+
+  private static ResultCollector<?, ?> executeOnAll(Execution dataSet, Serializable args,
+      Function<?> function, Boolean isByName) {
+    if (isByName) {// by name
+      return dataSet.setArguments(args).execute(function.getId());
+    } else { // By Instance
+      return dataSet.setArguments(args).execute(function);
+    }
+  }
+}
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerTestBase.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerTestBase.java
index 1a0c92d..107b50a 100755
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerTestBase.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/PRClientServerTestBase.java
@@ -17,11 +17,9 @@
 import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS;
 import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
 import static org.apache.geode.internal.AvailablePortHelper.getRandomAvailableTCPPort;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatNoException;
 
-import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -46,6 +44,7 @@
 import org.apache.geode.cache.RegionAttributes;
 import org.apache.geode.cache.Scope;
 import org.apache.geode.cache.client.Pool;
+import org.apache.geode.cache.client.PoolFactory;
 import org.apache.geode.cache.client.PoolManager;
 import org.apache.geode.cache.client.internal.PoolImpl;
 import org.apache.geode.cache.execute.Function;
@@ -57,8 +56,6 @@
 import org.apache.geode.internal.cache.functions.TestFunction;
 import org.apache.geode.internal.cache.tier.sockets.CacheServerTestUtil;
 import org.apache.geode.logging.internal.log4j.api.LogService;
-import org.apache.geode.test.dunit.Assert;
-import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.NetworkUtils;
 import org.apache.geode.test.dunit.SerializableCallableIF;
 import org.apache.geode.test.dunit.SerializableRunnable;
@@ -79,6 +76,8 @@
 
   protected static Cache cache = null;
 
+  protected String hostName;
+
   static String PartitionedRegionName = "TestPartitionedRegion"; // default name
 
   protected static String regionName = "TestRegion"; // default name
@@ -93,15 +92,15 @@
 
   @Override
   public final void postSetUp() throws Exception {
-    Host host = Host.getHost(0);
-    server1 = host.getVM(0);
-    server2 = host.getVM(1);
-    server3 = host.getVM(2);
-    client = host.getVM(3);
+    server1 = VM.getVM(0);
+    server2 = VM.getVM(1);
+    server3 = VM.getVM(2);
+    client = VM.getVM(3);
+    hostName = NetworkUtils.getServerHostName();
     postSetUpPRClientServerTestBase();
   }
 
-  protected void postSetUpPRClientServerTestBase() throws Exception {}
+  protected void postSetUpPRClientServerTestBase() {}
 
   private enum ExecuteFunctionMethod {
     ExecuteFunctionByObject, ExecuteFunctionById
@@ -119,7 +118,8 @@
     return ExecuteFunctionMethod.ExecuteFunctionByObject == functionExecutionType;
   }
 
-  ArrayList createCommonServerAttributes(String regionName, PartitionResolver pr, int red,
+  ArrayList<Object> createCommonServerAttributes(String regionName, PartitionResolver<?, ?> pr,
+      int red,
       String colocatedWithRegion) {
     ArrayList<Object> commonAttributes = new ArrayList<>();
     commonAttributes.add(regionName); // 0
@@ -130,123 +130,111 @@
     return commonAttributes;
   }
 
-  public static Integer createCacheServer(ArrayList commonAttributes, Integer localMaxMemory) {
-    AttributesFactory factory = new AttributesFactory();
-    PartitionAttributesFactory paf = new PartitionAttributesFactory();
+  public static Integer createCacheServer(ArrayList<Object> commonAttributes,
+      Integer localMaxMemory) {
+    return createCacheServer(commonAttributes, localMaxMemory, -1);
+  }
 
-    paf.setPartitionResolver((PartitionResolver) commonAttributes.get(1));
+  public static Integer createCacheServer(ArrayList<Object> commonAttributes,
+      Integer localMaxMemory,
+      int maxThreads) {
+    AttributesFactory<Object, Object> factory = new AttributesFactory<>();
+    PartitionAttributesFactory<Object, Object> paf = new PartitionAttributesFactory<>();
+
+    paf.setPartitionResolver((PartitionResolver<Object, Object>) commonAttributes.get(1));
     paf.setRedundantCopies((Integer) commonAttributes.get(2));
     paf.setTotalNumBuckets((Integer) commonAttributes.get(3));
     paf.setColocatedWith((String) commonAttributes.get(4));
     paf.setLocalMaxMemory(localMaxMemory);
-    PartitionAttributes partitionAttributes = paf.create();
+    PartitionAttributes<?, ?> partitionAttributes = paf.create();
     factory.setDataPolicy(DataPolicy.PARTITION);
     factory.setPartitionAttributes(partitionAttributes);
-    RegionAttributes attrs = factory.create();
+    RegionAttributes<Object, Object> attrs = factory.create();
 
-    Region region = cache.createRegion((String) commonAttributes.get(0), attrs);
-    assertNotNull(region);
+    Region<Object, Object> region = cache.createRegion((String) commonAttributes.get(0), attrs);
+    assertThat(region).isNotNull();
     CacheServer server1 = cache.addCacheServer();
-    assertNotNull(server1);
+    assertThat(server1).isNotNull();
     int port = getRandomAvailableTCPPort();
     server1.setPort(port);
-    try {
-      server1.start();
-    } catch (IOException e) {
-      Assert.fail("Failed to start the Server", e);
+    if (maxThreads > 0) {
+      server1.setMaxThreads(maxThreads);
     }
-    assertTrue(server1.isRunning());
+    assertThatNoException().isThrownBy(server1::start);
+    assertThat(server1.isRunning()).isTrue();
 
     return server1.getPort();
   }
 
-  private static Integer createSelectorCacheServer(ArrayList commonAttributes,
+  private static Integer createSelectorCacheServer(ArrayList<Object> commonAttributes,
       Integer localMaxMemory) throws Exception {
-    AttributesFactory factory = new AttributesFactory();
-    PartitionAttributesFactory paf = new PartitionAttributesFactory();
+    AttributesFactory<Object, Object> factory = new AttributesFactory<>();
+    PartitionAttributesFactory<Object, Object> paf = new PartitionAttributesFactory<>();
 
-    paf.setPartitionResolver((PartitionResolver) commonAttributes.get(1));
+    paf.setPartitionResolver((PartitionResolver<Object, Object>) commonAttributes.get(1));
     paf.setRedundantCopies((Integer) commonAttributes.get(2));
     paf.setTotalNumBuckets((Integer) commonAttributes.get(3));
     paf.setColocatedWith((String) commonAttributes.get(4));
     paf.setLocalMaxMemory(localMaxMemory);
-    PartitionAttributes partitionAttributes = paf.create();
+    PartitionAttributes<?, ?> partitionAttributes = paf.create();
     factory.setDataPolicy(DataPolicy.PARTITION);
     factory.setPartitionAttributes(partitionAttributes);
-    RegionAttributes attrs = factory.create();
+    RegionAttributes<Object, Object> attrs = factory.create();
 
-    Region region = cache.createRegion((String) commonAttributes.get(0), attrs);
-    assertNotNull(region);
+    Region<Object, Object> region = cache.createRegion((String) commonAttributes.get(0), attrs);
+    assertThat(region).isNotNull();
     CacheServer server1 = cache.addCacheServer();
-    assertNotNull(server1);
+    assertThat(server1).isNotNull();
     int port = getRandomAvailableTCPPort();
     server1.setPort(port);
     server1.setMaxThreads(16);
     server1.start();
-    assertTrue(server1.isRunning());
+    assertThat(server1.isRunning()).isTrue();
 
     return server1.getPort();
   }
 
-  private static Integer createCacheServerWith2Regions(ArrayList commonAttributes,
+  private static Integer createCacheServerWith2Regions(ArrayList<Object> commonAttributes,
       Integer localMaxMemory) throws Exception {
-    AttributesFactory factory = new AttributesFactory();
-    PartitionAttributesFactory paf = new PartitionAttributesFactory();
+    AttributesFactory<Object, Object> factory = new AttributesFactory<>();
+    PartitionAttributesFactory<Object, Object> paf = new PartitionAttributesFactory<>();
 
-    paf.setPartitionResolver((PartitionResolver) commonAttributes.get(1));
+    paf.setPartitionResolver((PartitionResolver<Object, Object>) commonAttributes.get(1));
     paf.setRedundantCopies((Integer) commonAttributes.get(2));
     paf.setTotalNumBuckets((Integer) commonAttributes.get(3));
     paf.setColocatedWith((String) commonAttributes.get(4));
     paf.setLocalMaxMemory(localMaxMemory);
-    PartitionAttributes partitionAttributes = paf.create();
+    PartitionAttributes<?, ?> partitionAttributes = paf.create();
     factory.setDataPolicy(DataPolicy.PARTITION);
     factory.setPartitionAttributes(partitionAttributes);
-    RegionAttributes attrs = factory.create();
+    RegionAttributes<Object, Object> attrs = factory.create();
 
-    Region region1 = cache.createRegion(PartitionedRegionName + "1", attrs);
-    assertNotNull(region1);
-    Region region2 = cache.createRegion(PartitionedRegionName + "2", attrs);
-    assertNotNull(region2);
+    Region<Object, Object> region1 = cache.createRegion(PartitionedRegionName + "1", attrs);
+    assertThat(region1).isNotNull();
+    Region<Object, Object> region2 = cache.createRegion(PartitionedRegionName + "2", attrs);
+    assertThat(region2).isNotNull();
     CacheServer server1 = cache.addCacheServer();
-    assertNotNull(server1);
+    assertThat(server1).isNotNull();
     int port = getRandomAvailableTCPPort();
     server1.setPort(port);
     server1.start();
-    assertTrue(server1.isRunning());
+    assertThat(server1.isRunning()).isTrue();
 
     return server1.getPort();
   }
 
   public static Integer createCacheServer() throws Exception {
     CacheServer server1 = cache.addCacheServer();
-    assertNotNull(server1);
+    assertThat(server1).isNotNull();
     int port = getRandomAvailableTCPPort();
     server1.setPort(port);
     server1.start();
-    assertTrue(server1.isRunning());
+    assertThat(server1.isRunning()).isTrue();
 
     return server1.getPort();
   }
 
-  private static Integer createCacheServerWithDR() throws Exception {
-    AttributesFactory factory = new AttributesFactory();
-    factory.setScope(Scope.DISTRIBUTED_ACK);
-    factory.setDataPolicy(DataPolicy.REPLICATE);
-    assertNotNull(cache);
-    Region region = cache.createRegion(regionName, factory.create());
-    assertNotNull(region);
-
-    CacheServer server1 = cache.addCacheServer();
-    assertNotNull(server1);
-    int port = getRandomAvailableTCPPort();
-    server1.setPort(port);
-    server1.start();
-    assertTrue(server1.isRunning());
-
-    return server1.getPort();
-  }
-
-  public static void createCacheClient(String host, Integer port1, Integer port2, Integer port3) {
+  public static void createCacheClient(String host, int port1, int port2, int port3) {
     CacheServerTestUtil.disableShufflingOfEndpoints();
     Pool p;
 
@@ -260,16 +248,16 @@
       CacheServerTestUtil.enableShufflingOfEndpoints();
     }
     pool = (PoolImpl) p;
-    AttributesFactory factory = new AttributesFactory();
+    AttributesFactory<Object, Object> factory = new AttributesFactory<>();
     factory.setScope(Scope.LOCAL);
     factory.setDataPolicy(DataPolicy.EMPTY);
     factory.setPoolName(p.getName());
-    RegionAttributes attrs = factory.create();
-    Region region = cache.createRegion(PartitionedRegionName, attrs);
-    assertNotNull(region);
+    RegionAttributes<Object, Object> attrs = factory.create();
+    Region<Object, Object> region = cache.createRegion(PartitionedRegionName, attrs);
+    assertThat(region).isNotNull();
   }
 
-  private static void createCacheClient_SingleConnection(String host, Integer port1) {
+  private static void createCacheClient_SingleConnection(String host, int port1) {
     CacheServerTestUtil.disableShufflingOfEndpoints();
     Pool p;
 
@@ -282,17 +270,17 @@
       CacheServerTestUtil.enableShufflingOfEndpoints();
     }
     pool = (PoolImpl) p;
-    AttributesFactory factory = new AttributesFactory();
+    AttributesFactory<Object, Object> factory = new AttributesFactory<>();
     factory.setScope(Scope.LOCAL);
     factory.setDataPolicy(DataPolicy.EMPTY);
     factory.setPoolName(p.getName());
-    RegionAttributes attrs = factory.create();
-    Region region = cache.createRegion(PartitionedRegionName, attrs);
-    assertNotNull(region);
+    RegionAttributes<Object, Object> attrs = factory.create();
+    Region<Object, Object> region = cache.createRegion(PartitionedRegionName, attrs);
+    assertThat(region).isNotNull();
   }
 
-  private static void createCacheClientWith2Regions(String host, Integer port1, Integer port2,
-      Integer port3) {
+  private static void createCacheClientWith2Regions(String host, int port1, int port2,
+      int port3) {
     CacheServerTestUtil.disableShufflingOfEndpoints();
     Pool p;
 
@@ -306,22 +294,22 @@
       CacheServerTestUtil.enableShufflingOfEndpoints();
     }
     pool = (PoolImpl) p;
-    AttributesFactory factory = new AttributesFactory();
+    AttributesFactory<Object, Object> factory = new AttributesFactory<>();
     factory.setDataPolicy(DataPolicy.EMPTY);
     factory.setPoolName(p.getName());
-    RegionAttributes attrs = factory.create();
-    Region region1 = cache.createRegion(PartitionedRegionName + "1", attrs);
-    assertNotNull(region1);
+    RegionAttributes<Object, Object> attrs = factory.create();
+    Region<Object, Object> region1 = cache.createRegion(PartitionedRegionName + "1", attrs);
+    assertThat(region1).isNotNull();
 
-    factory = new AttributesFactory();
+    factory = new AttributesFactory<>();
     factory.setDataPolicy(DataPolicy.EMPTY);
     attrs = factory.create();
-    Region region2 = cache.createRegion(PartitionedRegionName + "2", attrs);
-    assertNotNull(region2);
+    Region<Object, Object> region2 = cache.createRegion(PartitionedRegionName + "2", attrs);
+    assertThat(region2).isNotNull();
   }
 
-  private static void createSingleHopCacheClient(String host, Integer port1, Integer port2,
-      Integer port3) {
+  private static void createSingleHopCacheClient(String host, int port1, int port2,
+      int port3) {
     CacheServerTestUtil.disableShufflingOfEndpoints();
 
     Pool p;
@@ -335,17 +323,17 @@
       CacheServerTestUtil.enableShufflingOfEndpoints();
     }
     pool = (PoolImpl) p;
-    AttributesFactory factory = new AttributesFactory();
+    AttributesFactory<Object, Object> factory = new AttributesFactory<>();
     factory.setScope(Scope.LOCAL);
     factory.setDataPolicy(DataPolicy.EMPTY);
     factory.setPoolName(p.getName());
-    RegionAttributes attrs = factory.create();
-    Region region = cache.createRegion(PartitionedRegionName, attrs);
-    assertNotNull(region);
+    RegionAttributes<Object, Object> attrs = factory.create();
+    Region<Object, Object> region = cache.createRegion(PartitionedRegionName, attrs);
+    assertThat(region).isNotNull();
   }
 
-  private static void createNoSingleHopCacheClient(String host, Integer port1, Integer port2,
-      Integer port3) {
+  private static void createNoSingleHopCacheClient(String host, int port1, int port2,
+      int port3) {
     CacheServerTestUtil.disableShufflingOfEndpoints();
 
     Pool p;
@@ -359,17 +347,44 @@
       CacheServerTestUtil.enableShufflingOfEndpoints();
     }
     pool = (PoolImpl) p;
-    AttributesFactory factory = new AttributesFactory();
+    AttributesFactory<Object, Object> factory = new AttributesFactory<>();
     factory.setScope(Scope.LOCAL);
     factory.setDataPolicy(DataPolicy.EMPTY);
     factory.setPoolName(p.getName());
-    RegionAttributes attrs = factory.create();
-    Region region = cache.createRegion(PartitionedRegionName, attrs);
-    assertNotNull(region);
+    RegionAttributes<Object, Object> attrs = factory.create();
+    Region<Object, Object> region = cache.createRegion(PartitionedRegionName, attrs);
+    assertThat(region).isNotNull();
   }
 
-  private static void createCacheClientWithoutRegion(String host, Integer port1, Integer port2,
-      Integer port3) {
+  private static void createNoSingleHopCacheClient(String host,
+      int port1, int port2, int port3, int connectTimeout) {
+    CacheServerTestUtil.disableShufflingOfEndpoints();
+
+    Pool p;
+    try {
+      PoolFactory factory = PoolManager.createFactory().addServer(host, port1)
+          .addServer(host, port2).addServer(host, port3).setPingInterval(2000)
+          .setSubscriptionEnabled(true).setReadTimeout(2000)
+          .setSocketBufferSize(1000).setRetryAttempts(0)
+          .setSocketConnectTimeout(connectTimeout)
+          .setPRSingleHopEnabled(false);
+
+      p = factory.create("PRClientServerTestBase");
+    } finally {
+      CacheServerTestUtil.enableShufflingOfEndpoints();
+    }
+    pool = (PoolImpl) p;
+    AttributesFactory<Object, Object> factory = new AttributesFactory<>();
+    factory.setScope(Scope.LOCAL);
+    factory.setDataPolicy(DataPolicy.EMPTY);
+    factory.setPoolName(p.getName());
+    RegionAttributes<Object, Object> attrs = factory.create();
+    Region<Object, Object> region = cache.createRegion(PartitionedRegionName, attrs);
+    assertThat(region).isNotNull();
+  }
+
+  private static void createCacheClientWithoutRegion(String host, int port1, int port2,
+      int port3) {
     CacheServerTestUtil.disableShufflingOfEndpoints();
     logger
         .info("PRClientServerTestBase#createCacheClientWithoutRegion : creating pool");
@@ -387,81 +402,56 @@
     pool = (PoolImpl) p;
   }
 
-  private static void createCacheClientWithDistributedRegion(String host, Integer port1,
-      Integer port2, Integer port3) throws Exception {
-    CacheServerTestUtil.disableShufflingOfEndpoints();
-    logger
-        .info("PRClientServerTestBase#createCacheClientWithoutRegion : creating pool");
-
-    Pool p;
-    try {
-      p = PoolManager.createFactory().addServer(host, port1)
-          .addServer(host, port2).addServer(host, port3).setPingInterval(250)
-          .setSubscriptionEnabled(true).setSubscriptionRedundancy(-1).setReadTimeout(2000)
-          .setSocketBufferSize(1000).setMinConnections(6).setMaxConnections(10).setRetryAttempts(0)
-          .create("PRClientServerTestBaseWithoutRegion");
-    } finally {
-      CacheServerTestUtil.enableShufflingOfEndpoints();
-    }
-    pool = (PoolImpl) p;
-    AttributesFactory factory = new AttributesFactory();
-    factory.setScope(Scope.DISTRIBUTED_ACK);
-    factory.setDataPolicy(DataPolicy.REPLICATE);
-    assertNotNull(cache);
-    Region region = cache.createRegion(regionName, factory.create());
-    assertNotNull(region);
-  }
-
-  void createClientServerScenarion(ArrayList commonAttributes, int localMaxMemoryServer1,
+  void createClientServerScenarion(ArrayList<Object> commonAttributes, int localMaxMemoryServer1,
       int localMaxMemoryServer2, int localMaxMemoryServer3) {
     createCacheInClientServer();
-    Integer port1 = server1.invoke(() -> PRClientServerTestBase
+    int port1 = server1.invoke(() -> PRClientServerTestBase
         .createCacheServer(commonAttributes, localMaxMemoryServer1));
-    Integer port2 = server2.invoke(() -> PRClientServerTestBase
+    int port2 = server2.invoke(() -> PRClientServerTestBase
         .createCacheServer(commonAttributes, localMaxMemoryServer2));
-    Integer port3 = server3.invoke(() -> PRClientServerTestBase
+    int port3 = server3.invoke(() -> PRClientServerTestBase
         .createCacheServer(commonAttributes, localMaxMemoryServer3));
     client.invoke(() -> PRClientServerTestBase
-        .createCacheClient(NetworkUtils.getServerHostName(server1.getHost()), port1, port2, port3));
+        .createCacheClient(hostName, port1, port2, port3));
   }
 
-  void createClientServerScenarion_SingleConnection(ArrayList commonAttributes,
+  void createClientServerScenarion_SingleConnection(ArrayList<Object> commonAttributes,
       int localMaxMemoryServer1,
       int localMaxMemoryServer2) {
     createCacheInClientServer();
-    Integer port1 = server1.invoke(() -> PRClientServerTestBase
+    int port1 = server1.invoke(() -> PRClientServerTestBase
         .createCacheServer(commonAttributes, localMaxMemoryServer1));
     server2.invoke(() -> PRClientServerTestBase.createCacheServer(commonAttributes,
         localMaxMemoryServer2));
     client.invoke(() -> PRClientServerTestBase.createCacheClient_SingleConnection(
-        NetworkUtils.getServerHostName(server1.getHost()), port1));
+        hostName, port1));
   }
 
 
 
-  void createClientServerScenarionWith2Regions(ArrayList commonAttributes,
+  void createClientServerScenarionWith2Regions(ArrayList<Object> commonAttributes,
       int localMaxMemoryServer1, int localMaxMemoryServer2,
       int localMaxMemoryServer3) {
     createCacheInClientServer();
-    Integer port1 = server1.invoke(() -> PRClientServerTestBase
+    int port1 = server1.invoke(() -> PRClientServerTestBase
         .createCacheServerWith2Regions(commonAttributes, localMaxMemoryServer1));
-    Integer port2 = server2.invoke(() -> PRClientServerTestBase
+    int port2 = server2.invoke(() -> PRClientServerTestBase
         .createCacheServerWith2Regions(commonAttributes, localMaxMemoryServer2));
-    Integer port3 = server3.invoke(() -> PRClientServerTestBase
+    int port3 = server3.invoke(() -> PRClientServerTestBase
         .createCacheServerWith2Regions(commonAttributes, localMaxMemoryServer3));
     client.invoke(() -> PRClientServerTestBase.createCacheClientWith2Regions(
-        NetworkUtils.getServerHostName(server1.getHost()), port1, port2, port3));
+        hostName, port1, port2, port3));
   }
 
-  void createClientServerScenarioSingleHop(ArrayList commonAttributes,
+  void createClientServerScenarioSingleHop(ArrayList<Object> commonAttributes,
       int localMaxMemoryServer1, int localMaxMemoryServer2,
       int localMaxMemoryServer3) {
     createCacheInClientServer();
-    Integer port1 = server1.invoke(() -> PRClientServerTestBase
+    int port1 = server1.invoke(() -> PRClientServerTestBase
         .createCacheServer(commonAttributes, localMaxMemoryServer1));
-    Integer port2 = server2.invoke(() -> PRClientServerTestBase
+    int port2 = server2.invoke(() -> PRClientServerTestBase
         .createCacheServer(commonAttributes, localMaxMemoryServer2));
-    Integer port3 = server3.invoke(() -> PRClientServerTestBase
+    int port3 = server3.invoke(() -> PRClientServerTestBase
         .createCacheServer(commonAttributes, localMaxMemoryServer3));
     // Workaround for the issue that hostnames returned by the client metadata may
     // not match those configured by the pool, leading to multiple copies
@@ -475,33 +465,49 @@
     return cache.getDistributedSystem().getDistributedMember().getHost();
   }
 
-  void createClientServerScenarioNoSingleHop(ArrayList commonAttributes,
+  void createClientServerScenarioNoSingleHop(ArrayList<Object> commonAttributes,
       int localMaxMemoryServer1, int localMaxMemoryServer2,
       int localMaxMemoryServer3) {
     createCacheInClientServer();
-    Integer port1 = server1.invoke(() -> PRClientServerTestBase
+    int port1 = server1.invoke(() -> PRClientServerTestBase
         .createCacheServer(commonAttributes, localMaxMemoryServer1));
-    Integer port2 = server2.invoke(() -> PRClientServerTestBase
+    int port2 = server2.invoke(() -> PRClientServerTestBase
         .createCacheServer(commonAttributes, localMaxMemoryServer2));
-    Integer port3 = server3.invoke(() -> PRClientServerTestBase
+    int port3 = server3.invoke(() -> PRClientServerTestBase
         .createCacheServer(commonAttributes, localMaxMemoryServer3));
     client.invoke(() -> PRClientServerTestBase.createNoSingleHopCacheClient(
-        NetworkUtils.getServerHostName(server1.getHost()), port1, port2, port3));
+        hostName, port1, port2, port3));
   }
 
-  void createClientServerScenarioSelectorNoSingleHop(ArrayList commonAttributes,
+  void createClientServerScenarioNoSingleHop(ArrayList<Object> commonAttributes,
+      int localMaxMemoryServer1, int localMaxMemoryServer2,
+      int localMaxMemoryServer3,
+      int maxThreads,
+      int connectTimeout) {
+    createCacheInClientServer();
+    int port1 = server1.invoke(() -> PRClientServerTestBase
+        .createCacheServer(commonAttributes, localMaxMemoryServer1, maxThreads));
+    int port2 = server2.invoke(() -> PRClientServerTestBase
+        .createCacheServer(commonAttributes, localMaxMemoryServer2, maxThreads));
+    int port3 = server3.invoke(() -> PRClientServerTestBase
+        .createCacheServer(commonAttributes, localMaxMemoryServer3, maxThreads));
+    client.invoke(() -> PRClientServerTestBase.createNoSingleHopCacheClient(
+        hostName, port1, port2, port3, connectTimeout));
+  }
+
+  void createClientServerScenarioSelectorNoSingleHop(ArrayList<Object> commonAttributes,
       int localMaxMemoryServer1,
       int localMaxMemoryServer2,
       int localMaxMemoryServer3) {
     createCacheInClientServer();
-    Integer port1 = server1.invoke(() -> PRClientServerTestBase
+    int port1 = server1.invoke(() -> PRClientServerTestBase
         .createSelectorCacheServer(commonAttributes, localMaxMemoryServer1));
-    Integer port2 = server2.invoke(() -> PRClientServerTestBase
+    int port2 = server2.invoke(() -> PRClientServerTestBase
         .createSelectorCacheServer(commonAttributes, localMaxMemoryServer2));
-    Integer port3 = server3.invoke(() -> PRClientServerTestBase
+    int port3 = server3.invoke(() -> PRClientServerTestBase
         .createSelectorCacheServer(commonAttributes, localMaxMemoryServer3));
     client.invoke(() -> PRClientServerTestBase.createNoSingleHopCacheClient(
-        NetworkUtils.getServerHostName(server1.getHost()), port1, port2, port3));
+        hostName, port1, port2, port3));
   }
 
 
@@ -509,15 +515,15 @@
     logger.info(
         "PRClientServerTestBase#createClientServerScenarionWithoutRegion : creating client server");
     createCacheInClientServer();
-    Integer port1 = server1.invoke(
+    int port1 = server1.invoke(
         (SerializableCallableIF<Integer>) PRClientServerTestBase::createCacheServer);
-    Integer port2 = server2.invoke(
+    int port2 = server2.invoke(
         (SerializableCallableIF<Integer>) PRClientServerTestBase::createCacheServer);
-    Integer port3 = server3.invoke(
+    int port3 = server3.invoke(
         (SerializableCallableIF<Integer>) PRClientServerTestBase::createCacheServer);
 
     client.invoke(() -> PRClientServerTestBase.createCacheClientWithoutRegion(
-        NetworkUtils.getServerHostName(server1.getHost()), port1, port2, port3));
+        hostName, port1, port2, port3));
   }
 
   void runOnAllServers(SerializableRunnable runnable) {
@@ -526,7 +532,7 @@
     server3.invoke(runnable);
   }
 
-  void registerFunctionAtServer(Function function) {
+  void registerFunctionAtServer(Function<?> function) {
     server1.invoke(PRClientServerTestBase.class, "registerFunction", new Object[] {function});
 
     server2.invoke(PRClientServerTestBase.class, "registerFunction", new Object[] {function});
@@ -534,7 +540,7 @@
     server3.invoke(PRClientServerTestBase.class, "registerFunction", new Object[] {function});
   }
 
-  public static void registerFunction(Function function) {
+  public static void registerFunction(Function<?> function) {
     FunctionService.registerFunction(function);
   }
 
@@ -559,24 +565,20 @@
   }
 
   private void createCache(Properties props) {
-    try {
-      DistributedSystem ds = getSystem(props);
-      assertNotNull(ds);
-      ds.disconnect();
-      ds = getSystem(props);
-      cache = CacheFactory.create(ds);
-      assertNotNull(cache);
-    } catch (Exception e) {
-      Assert.fail("Failed while creating the cache", e);
-    }
+    DistributedSystem ds = getSystem(props);
+    assertThat(ds).isNotNull();
+    ds.disconnect();
+    ds = getSystem(props);
+    cache = CacheFactory.create(ds);
+    assertThat(cache).isNotNull();
   }
 
   static void startServerHA() throws Exception {
     Wait.pause(2000);
-    Collection bridgeServers = cache.getCacheServers();
+    Collection<?> bridgeServers = cache.getCacheServers();
     logger
         .info("Start Server cache servers list : " + bridgeServers.size());
-    Iterator bridgeIterator = bridgeServers.iterator();
+    Iterator<?> bridgeIterator = bridgeServers.iterator();
     CacheServer bridgeServer = (CacheServer) bridgeIterator.next();
     logger.info("start Server cache server" + bridgeServer);
     bridgeServer.start();
@@ -584,7 +586,7 @@
 
   static void stopServerHA() {
     Wait.pause(1000);
-    Iterator iter = cache.getCacheServers().iterator();
+    Iterator<?> iter = cache.getCacheServers().iterator();
     if (iter.hasNext()) {
       CacheServer server = (CacheServer) iter.next();
       server.stop();
@@ -616,13 +618,13 @@
 
   void serverBucketFilterExecution(Set<Integer> bucketFilterSet) {
     Region<Integer, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
+    assertThat(region).isNotNull();
     final HashSet<Integer> testKeysSet = new HashSet<>();
     for (int i = 150; i > 0; i--) {
       testKeysSet.add(i);
     }
     DistributedSystem.setThreadsSocketPolicy(false);
-    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_BUCKET_FILTER);
+    Function<?> function = new TestFunction<>(true, TestFunction.TEST_FUNCTION_BUCKET_FILTER);
     if (shouldRegisterFunctionsOnClient()) {
       FunctionService.registerFunction(function);
     }
@@ -636,24 +638,24 @@
     ResultCollector<Integer, List<Integer>> rc =
         dataSet.withBucketFilter(bucketFilterSet).execute(function.getId());
     List<Integer> results = rc.getResult();
-    assertEquals(bucketFilterSet.size(), results.size());
+    assertThat(results.size()).isEqualTo(bucketFilterSet.size());
     for (Integer bucket : results) {
       bucketFilterSet.remove(bucket);
     }
-    assertTrue(bucketFilterSet.isEmpty());
+    assertThat(bucketFilterSet).isEmpty();
   }
 
   void serverBucketFilterOverrideExecution(Set<Integer> bucketFilterSet,
       Set<Integer> ketFilterSet) {
 
     Region<Integer, Integer> region = cache.getRegion(PartitionedRegionName);
-    assertNotNull(region);
+    assertThat(region).isNotNull();
     final HashSet<Integer> testKeysSet = new HashSet<>();
     for (int i = 150; i > 0; i--) {
       testKeysSet.add(i);
     }
     DistributedSystem.setThreadsSocketPolicy(false);
-    Function function = new TestFunction(true, TestFunction.TEST_FUNCTION_BUCKET_FILTER);
+    Function<?> function = new TestFunction<>(true, TestFunction.TEST_FUNCTION_BUCKET_FILTER);
     if (shouldRegisterFunctionsOnClient()) {
       FunctionService.registerFunction(function);
     }
@@ -671,20 +673,21 @@
     ResultCollector<Integer, List<Integer>> rc = dataSet.withBucketFilter(bucketFilterSet)
         .withFilter(ketFilterSet).execute(function.getId());
     List<Integer> results = rc.getResult();
-    assertEquals(expectedBucketSet.size(), results.size());
+    assertThat(results.size()).isEqualTo(expectedBucketSet.size());
     for (Integer bucket : results) {
       expectedBucketSet.remove(bucket);
     }
-    assertTrue(expectedBucketSet.isEmpty());
+    assertThat(expectedBucketSet).isEmpty();
   }
 
-  public static class BucketFilterPRResolver implements PartitionResolver, Serializable {
+  public static class BucketFilterPRResolver
+      implements PartitionResolver<Object, Object>, Serializable {
 
     @Override
     public void close() {}
 
     @Override
-    public Object getRoutingObject(EntryOperation opDetails) {
+    public Object getRoutingObject(EntryOperation<Object, Object> opDetails) {
       Object key = opDetails.getKey();
       return getBucketID(key);
     }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/UpdatePropagationDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/UpdatePropagationDistributedTest.java
similarity index 78%
rename from geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/UpdatePropagationDUnitTest.java
rename to geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/UpdatePropagationDistributedTest.java
index 0b99a14..0557807 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/UpdatePropagationDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/UpdatePropagationDistributedTest.java
@@ -20,6 +20,7 @@
 import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT;
 import static org.apache.geode.internal.AvailablePortHelper.getRandomAvailableTCPPort;
 import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertEquals;
 
 import java.io.IOException;
@@ -50,6 +51,8 @@
 import org.apache.geode.cache.util.CacheListenerAdapter;
 import org.apache.geode.cache30.CacheSerializableRunnable;
 import org.apache.geode.distributed.internal.ServerLocationAndMemberId;
+import org.apache.geode.distributed.internal.membership.api.MembershipManagerHelper;
+import org.apache.geode.test.dunit.AsyncInvocation;
 import org.apache.geode.test.dunit.Host;
 import org.apache.geode.test.dunit.IgnoredException;
 import org.apache.geode.test.dunit.NetworkUtils;
@@ -68,53 +71,89 @@
  * the same across servers
  */
 @Category({ClientSubscriptionTest.class})
-public class UpdatePropagationDUnitTest extends JUnit4CacheTestCase {
+public class UpdatePropagationDistributedTest extends JUnit4CacheTestCase {
 
   private static final String REGION_NAME = "UpdatePropagationDUnitTest_region";
 
   private VM server1 = null;
   private VM server2 = null;
+  private VM server3 = null;
   private VM client1 = null;
   private VM client2 = null;
 
   private int PORT1;
   private int PORT2;
+  private int PORT3;
+
+  private final int minNumEntries = 2;
+
+  private String hostnameServer1;
+  private String hostnameServer3;
 
   @Override
   public final void postSetUp() throws Exception {
     disconnectAllFromDS();
 
     final Host host = Host.getHost(0);
-    // Server1 VM
+
     server1 = host.getVM(0);
 
-    // Server2 VM
     server2 = host.getVM(1);
 
-    // Client 1 VM
-    client1 = host.getVM(2);
+    server3 = host.getVM(2);
 
-    // client 2 VM
-    client2 = host.getVM(3);
+    client1 = host.getVM(3);
 
-    PORT1 = server1.invoke(this::createServerCache);
-    PORT2 = server2.invoke(this::createServerCache);
+    client2 = host.getVM(4);
 
-    client1.invoke(
-        () -> createClientCache(NetworkUtils.getServerHostName(server1.getHost()), PORT1, PORT2));
-    client2.invoke(
-        () -> createClientCache(NetworkUtils.getServerHostName(server1.getHost()), PORT1, PORT2));
+    PORT1 = server1.invoke(() -> createServerCache());
+    PORT2 = server2.invoke(() -> createServerCache());
+    PORT3 = server3.invoke(() -> createServerCache());
+
+    hostnameServer1 = NetworkUtils.getServerHostName(server1.getHost());
+    hostnameServer3 = NetworkUtils.getServerHostName(server3.getHost());
 
     IgnoredException.addIgnoredException("java.net.SocketException");
     IgnoredException.addIgnoredException("Unexpected IOException");
   }
 
+
+
+  @Test
+  public void updatesArePropagatedToAllMembersWhenOneKilled() throws Exception {
+    client1.invoke(
+        () -> createClientCache(hostnameServer1, PORT1));
+    client2.invoke(
+        () -> createClientCache(hostnameServer3, PORT3));
+    int entries = 20;
+    AsyncInvocation invocation = client1.invokeAsync(() -> doPuts(entries));
+
+    // Wait for some entries to be put
+    server1.invoke(this::verifyMinEntriesInserted);
+
+    // Simulate crash
+    server2.invoke(() -> {
+      MembershipManagerHelper.crashDistributedSystem(getSystemStatic());
+    });
+
+    invocation.await();
+
+    int notNullEntriesIn1 = client1.invoke(() -> getNotNullEntriesNumber(entries));
+    int notNullEntriesIn3 = client2.invoke(() -> getNotNullEntriesNumber(entries));
+    assertThat(notNullEntriesIn3).isEqualTo(notNullEntriesIn1);
+  }
+
   /**
    * This tests whether the updates are received by other clients or not , if there are situation of
    * Interest List fail over
    */
   @Test
   public void updatesAreProgegatedAfterFailover() {
+    client1.invoke(
+        () -> createClientCache(hostnameServer1, PORT1, PORT2));
+    client2.invoke(
+        () -> createClientCache(hostnameServer1, PORT1, PORT2));
+
     // First create entries on both servers via the two client
     client1.invoke(this::createEntriesK1andK2);
     client2.invoke(this::createEntriesK1andK2);
@@ -248,6 +287,18 @@
         .addCacheListener(new EventTrackingCacheListener()).create(REGION_NAME);
   }
 
+  private void createClientCache(String host, Integer port1) {
+    Properties props = new Properties();
+    props.setProperty(LOCATORS, "");
+    ClientCacheFactory cf = new ClientCacheFactory();
+    cf.addPoolServer(host, port1).setPoolSubscriptionEnabled(false)
+        .setPoolSubscriptionRedundancy(-1).setPoolMinConnections(4).setPoolSocketBufferSize(1000)
+        .setPoolReadTimeout(100).setPoolPingInterval(300);
+    ClientCache cache = getClientCache(cf);
+    cache.createClientRegionFactory(ClientRegionShortcut.PROXY)
+        .create(REGION_NAME);
+  }
+
   private Integer createServerCache() throws Exception {
     Cache cache = getCache();
     RegionAttributes attrs = createCacheServerAttributes();
@@ -305,6 +356,36 @@
     });
   }
 
+  private void verifyMinEntriesInserted() {
+    await().untilAsserted(() -> assertThat(getCache().getRegion(SEPARATOR + REGION_NAME))
+        .hasSizeGreaterThan(minNumEntries));
+  }
+
+  private void doPuts(int entries) throws Exception {
+    Region<String, String> r1 = getCache().getRegion(REGION_NAME);
+    assertThat(r1).isNotNull();
+    for (int i = 0; i < entries; i++) {
+      try {
+        r1.put("" + i, "" + i);
+      } catch (Exception e) {
+      }
+      Thread.sleep(1000);
+    }
+  }
+
+  private int getNotNullEntriesNumber(int entries) {
+    int notNullEntries = 0;
+    Region<String, String> r1 = getCache().getRegion(SEPARATOR + REGION_NAME);
+    assertThat(r1).isNotNull();
+    for (int i = 0; i < entries; i++) {
+      Object value = r1.get("" + i, "" + i);
+      if (value != null) {
+        notNullEntries++;
+      }
+    }
+    return notNullEntries;
+  }
+
   private static class EventTrackingCacheListener extends CacheListenerAdapter {
 
     List<EntryEvent> receivedEvents = new ArrayList<>();
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/UpdatePropagationPRDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/UpdatePropagationPRDistributedTest.java
similarity index 93%
rename from geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/UpdatePropagationPRDUnitTest.java
rename to geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/UpdatePropagationPRDistributedTest.java
index 47721ce..77d903e 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/UpdatePropagationPRDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/tier/sockets/UpdatePropagationPRDistributedTest.java
@@ -21,7 +21,7 @@
 /**
  * subclass of UpdatePropagationDUnitTest to exercise partitioned regions
  */
-public class UpdatePropagationPRDUnitTest extends UpdatePropagationDUnitTest {
+public class UpdatePropagationPRDistributedTest extends UpdatePropagationDistributedTest {
 
   @Override
   protected RegionAttributes createCacheServerAttributes() {
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/versions/TombstoneDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/versions/TombstoneDUnitTest.java
index bbcf0ca..d4bd0cc 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/versions/TombstoneDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/versions/TombstoneDUnitTest.java
@@ -14,11 +14,13 @@
  */
 package org.apache.geode.internal.cache.versions;
 
+import static org.apache.geode.cache.RegionShortcut.PARTITION_PERSISTENT;
 import static org.apache.geode.cache.RegionShortcut.REPLICATE;
 import static org.apache.geode.cache.RegionShortcut.REPLICATE_PERSISTENT;
 import static org.apache.geode.internal.AvailablePortHelper.getRandomAvailableTCPPort;
 import static org.apache.geode.internal.cache.InitialImageOperation.GIITestHookType.DuringApplyDelta;
 import static org.apache.geode.internal.cache.InitialImageOperation.resetAllGIITestHooks;
+import static org.apache.geode.internal.cache.TombstoneService.EXPIRED_TOMBSTONE_LIMIT;
 import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertEquals;
@@ -121,6 +123,35 @@
     });
   }
 
+  @Test
+  public void testTombstoneExpiredAndNonExpiredAreClearedAfterRegionIsDestroyed() {
+    VM vm0 = VM.getVM(0);
+
+    vm0.invoke(() -> {
+      // reduce timeout so that tombstone is immediately marked as expired
+      TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT = 100;
+      createCacheAndRegion(PARTITION_PERSISTENT);
+      region.put("K1", "V1");
+      region.destroy("K1");
+    });
+
+    vm0.invoke(() -> {
+      waitForScheduledTombstoneCount(0);
+      // increase timeout so that next tombstone doesn't expire
+      TombstoneService.REPLICATE_TOMBSTONE_TIMEOUT = 150000;
+      region.put("K1", "V1");
+      region.destroy("K1");
+
+      region.destroyRegion();
+      // force expiry of batch - there is only one expired tombstone at this moment
+      EXPIRED_TOMBSTONE_LIMIT = 1;
+    });
+
+    vm0.invoke(() -> {
+      createCacheAndRegion(PARTITION_PERSISTENT);
+      checkExpiredTombstones(0);
+    });
+  }
 
   @Test
   public void testWhenAnOutOfRangeTimeStampIsSeenWeExpireItInReplicateTombstoneSweeper() {
@@ -562,6 +593,17 @@
     }
   }
 
+  private void waitForScheduledTombstoneCount(int count) {
+    LocalRegion region = (LocalRegion) cache.getRegion(REGION_NAME);
+    await().until(() -> ((InternalCache) cache).getTombstoneService().getSweeper(region).tombstones
+        .size() == count);
+  }
+
+  private void checkExpiredTombstones(int count) {
+    await().until(
+        () -> ((InternalCache) cache).getTombstoneService().getScheduledTombstoneCount() == count);
+  }
+
   private void performGC(int count) throws Exception {
     ((InternalCache) cache).getTombstoneService().forceBatchExpirationForTests(count);
   }
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/CloseConnectionTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/CloseConnectionTest.java
index cdb5432..5aeba3f 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/CloseConnectionTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/CloseConnectionTest.java
@@ -110,7 +110,7 @@
       InternalDistributedSystem distributedSystem = getCache().getInternalDistributedSystem();
       InternalDistributedMember otherMember = distributedSystem.getDistributionManager()
           .getOtherNormalDistributionManagerIds().iterator().next();
-      Connection connection = conTable.getConduit().getConnection(otherMember, true, false,
+      Connection connection = conTable.getConduit().getConnection(otherMember, true,
           System.currentTimeMillis(), 15000, 0);
       await().untilAsserted(() -> {
         // grab the shared, ordered "sender" connection to vm0. It should have a residual
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/TCPConduitDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/TCPConduitDUnitTest.java
index 41d64c6..794d6e0 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/TCPConduitDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/TCPConduitDUnitTest.java
@@ -110,7 +110,7 @@
     assertThat(connectionTable.hasReceiversFor(otherMember)).isTrue();
 
     Connection sharedUnordered = connectionTable.get(otherMember, false,
-        System.currentTimeMillis(), 15000, 0);
+        System.currentTimeMillis(), 15000, 0, false);
     sharedUnordered.requestClose("for testing");
     // the sender connection has been closed so we should only have 2 senders now
     assertThat(ConnectionTable.getNumSenderSharedConnections()).isEqualTo(2);
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterOperationExecutors.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterOperationExecutors.java
index ba25e3b..7c45bbb 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterOperationExecutors.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterOperationExecutors.java
@@ -25,12 +25,14 @@
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.CancelException;
 import org.apache.geode.InternalGemFireError;
 import org.apache.geode.SystemFailure;
+import org.apache.geode.annotations.internal.MutableForTesting;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.distributed.internal.membership.gms.messages.ViewAckMessage;
 import org.apache.geode.internal.logging.CoreLoggingExecutors;
@@ -167,6 +169,8 @@
 
   private SerialQueuedExecutorPool serialQueuedExecutorPool;
 
+  @MutableForTesting
+  public static final AtomicInteger maxPrThreadsForTest = new AtomicInteger(-1);
 
   ClusterOperationExecutors(DistributionStats stats,
       InternalDistributedSystem system) {
@@ -252,10 +256,11 @@
             this::doWaitingThread, stats.getWaitingPoolHelper(),
             threadMonitor);
 
-    if (MAX_PR_THREADS > 1) {
+    int maxPrThreads = maxPrThreadsForTest.get() > 0 ? maxPrThreadsForTest.get() : MAX_PR_THREADS;
+    if (maxPrThreads > 1) {
       partitionedRegionPool =
           CoreLoggingExecutors.newThreadPoolWithFeedStatistics(
-              MAX_PR_THREADS, INCOMING_QUEUE_LIMIT, stats.getPartitionedRegionQueueHelper(),
+              maxPrThreads, INCOMING_QUEUE_LIMIT, stats.getPartitionedRegionQueueHelper(),
               "PartitionedRegion Message Processor",
               thread -> stats.incPartitionedRegionThreadStarts(), this::doPartitionRegionThread,
               stats.getPartitionedRegionPoolHelper(),
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/direct/DirectChannel.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/direct/DirectChannel.java
index a8a7bb8..eaac79f 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/direct/DirectChannel.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/direct/DirectChannel.java
@@ -281,11 +281,17 @@
           directReply = false;
         }
         if (ce != null) {
-          if (failedCe != null) {
-            failedCe.getMembers().addAll(ce.getMembers());
-            failedCe.getCauses().addAll(ce.getCauses());
+
+          if (!retry) {
+            retryInfo = ce;
           } else {
-            failedCe = ce;
+
+            if (failedCe != null) {
+              failedCe.getMembers().addAll(ce.getMembers());
+              failedCe.getCauses().addAll(ce.getCauses());
+            } else {
+              failedCe = ce;
+            }
           }
           ce = null;
         }
@@ -293,6 +299,9 @@
           if (failedCe != null) {
             throw failedCe;
           }
+          if (retryInfo != null) {
+            continue;
+          }
           return bytesWritten;
         }
 
@@ -338,7 +347,12 @@
         }
 
         if (ce != null) {
-          retryInfo = ce;
+          if (retryInfo != null) {
+            retryInfo.getMembers().addAll(ce.getMembers());
+            retryInfo.getCauses().addAll(ce.getCauses());
+          } else {
+            retryInfo = ce;
+          }
           ce = null;
         }
 
@@ -423,13 +437,13 @@
    * @param retry whether this is a retransmission
    * @param ackTimeout the ack warning timeout
    * @param ackSDTimeout the ack severe alert timeout
-   * @param cons a list to hold the connections
+   * @param connectionsList a list to hold the connections
    * @return null if everything went okay, or a ConnectExceptions object if some connections
    *         couldn't be obtained
    */
   private ConnectExceptions getConnections(Membership mgr, DistributionMessage msg,
       InternalDistributedMember[] destinations, boolean preserveOrder, boolean retry,
-      long ackTimeout, long ackSDTimeout, List cons) {
+      long ackTimeout, long ackSDTimeout, List<Connection> connectionsList) {
     ConnectExceptions ce = null;
     for (InternalDistributedMember destination : destinations) {
       if (destination == null) {
@@ -458,12 +472,18 @@
           if (ackTimeout > 0) {
             startTime = System.currentTimeMillis();
           }
-          Connection con = conduit.getConnection(destination, preserveOrder, retry, startTime,
-              ackTimeout, ackSDTimeout);
+          final Connection connection;
+          if (!retry) {
+            connection = conduit.getFirstScanForConnection(destination, preserveOrder, startTime,
+                ackTimeout, ackSDTimeout);
+          } else {
+            connection = conduit.getConnection(destination, preserveOrder, startTime,
+                ackTimeout, ackSDTimeout);
+          }
 
-          con.setInUse(true, startTime, 0, 0, null); // fix for bug#37657
-          cons.add(con);
-          if (con.isSharedResource() && msg instanceof DirectReplyMessage) {
+          connection.setInUse(true, startTime, 0, 0, null); // fix for bug#37657
+          connectionsList.add(connection);
+          if (connection.isSharedResource() && msg instanceof DirectReplyMessage) {
             DirectReplyMessage directMessage = (DirectReplyMessage) msg;
             directMessage.registerProcessor();
           }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegionQueue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegionQueue.java
index 71be25a..1a77466 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegionQueue.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegionQueue.java
@@ -466,6 +466,10 @@
       List<Object> elementsMatching = new ArrayList<>();
       for (final Object key : eventSeqNumDeque) {
         Object object = optimalGet(key);
+        if (object == null) {
+          continue;
+        }
+
         if (matchingPredicate.test(object)) {
           elementsMatching.add(object);
           eventSeqNumDeque.remove(key);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
index 9dee1c1..b544ca0 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java
@@ -181,12 +181,23 @@
       GeodeGlossary.GEMFIRE_PREFIX + "disk.recoverValuesSync";
 
   /**
+   * When configured threshold value is reached, then server will overflow to
+   * the new hashmap during the recovery of .drf files
+   */
+  public static final String DRF_HASHMAP_OVERFLOW_THRESHOLD_NAME =
+      GeodeGlossary.GEMFIRE_PREFIX + "disk.drfHashMapOverflowThreshold";
+
+  /**
    * Allows recovering values for LRU regions. By default values are not recovered for LRU regions
    * during recovery.
    */
   public static final String RECOVER_LRU_VALUES_PROPERTY_NAME =
       GeodeGlossary.GEMFIRE_PREFIX + "disk.recoverLruValues";
 
+  static final long DRF_HASHMAP_OVERFLOW_THRESHOLD_DEFAULT = 805306368;
+  static final long DRF_HASHMAP_OVERFLOW_THRESHOLD =
+      Long.getLong(DRF_HASHMAP_OVERFLOW_THRESHOLD_NAME, DRF_HASHMAP_OVERFLOW_THRESHOLD_DEFAULT);
+
   boolean RECOVER_VALUES = getBoolean(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, true);
 
   boolean RECOVER_VALUES_SYNC = getBoolean(DiskStoreImpl.RECOVER_VALUES_SYNC_PROPERTY_NAME, false);
@@ -3525,31 +3536,49 @@
       }
 
       try {
-        if (id > 0 && id <= 0x00000000FFFFFFFFL) {
-          currentInts.get().add((int) id);
+        if (shouldOverflow(id)) {
+          overflowToNewHashMap(id);
         } else {
-          currentLongs.get().add(id);
+          if (id > 0 && id <= 0x00000000FFFFFFFFL) {
+            this.currentInts.get().add((int) id);
+          } else {
+            this.currentLongs.get().add(id);
+          }
         }
       } catch (IllegalArgumentException illegalArgumentException) {
         // See GEODE-8029.
-        // Too many entries on the accumulated drf files, overflow and continue.
+        // Too many entries on the accumulated drf files, overflow next [Int|Long]OpenHashSet and
+        // continue.
+        overflowToNewHashMap(id);
+      }
+    }
+
+    boolean shouldOverflow(final long id) {
+      if (id > 0 && id <= 0x00000000FFFFFFFFL) {
+        return currentInts.get().size() == DRF_HASHMAP_OVERFLOW_THRESHOLD;
+      } else {
+        return currentLongs.get().size() == DRF_HASHMAP_OVERFLOW_THRESHOLD;
+      }
+    }
+
+    void overflowToNewHashMap(final long id) {
+      if (DRF_HASHMAP_OVERFLOW_THRESHOLD == DRF_HASHMAP_OVERFLOW_THRESHOLD_DEFAULT) {
         logger.warn(
             "There is a large number of deleted entries within the disk-store, please execute an offline compaction.");
+      }
 
-        // Overflow to the next [Int|Long]OpenHashSet and continue.
-        if (id > 0 && id <= 0x00000000FFFFFFFFL) {
-          IntOpenHashSet overflownHashSet = new IntOpenHashSet((int) INVALID_ID);
-          allInts.add(overflownHashSet);
-          currentInts.set(overflownHashSet);
+      if (id > 0 && id <= 0x00000000FFFFFFFFL) {
+        IntOpenHashSet overflownHashSet = new IntOpenHashSet((int) INVALID_ID);
+        allInts.add(overflownHashSet);
+        currentInts.set(overflownHashSet);
 
-          currentInts.get().add((int) id);
-        } else {
-          LongOpenHashSet overflownHashSet = new LongOpenHashSet((int) INVALID_ID);
-          allLongs.add(overflownHashSet);
-          currentLongs.set(overflownHashSet);
+        currentInts.get().add((int) id);
+      } else {
+        LongOpenHashSet overflownHashSet = new LongOpenHashSet((int) INVALID_ID);
+        allLongs.add(overflownHashSet);
+        currentLongs.set(overflownHashSet);
 
-          currentLongs.get().add(id);
-        }
+        currentLongs.get().add(id);
       }
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java
index 16adbec..6a521be 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java
@@ -340,8 +340,9 @@
     op = other.op;
     distributedMember = other.distributedMember;
     filterInfo = other.filterInfo;
-    keyInfo = other.keyInfo.isDistKeyInfo() ? new DistTxKeyInfo((DistTxKeyInfo) other.keyInfo)
-        : new KeyInfo(other.keyInfo);
+    keyInfo =
+        other.getKeyInfo().isDistKeyInfo() ? new DistTxKeyInfo((DistTxKeyInfo) other.getKeyInfo())
+            : new KeyInfo(other.getKeyInfo());
     if (other.getRawCallbackArgument() instanceof GatewaySenderEventCallbackArgument) {
       keyInfo.setCallbackArg((new GatewaySenderEventCallbackArgument(
           (GatewaySenderEventCallbackArgument) other.getRawCallbackArgument())));
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TombstoneService.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TombstoneService.java
index 242a3ff..dc3532a 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/TombstoneService.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TombstoneService.java
@@ -926,7 +926,11 @@
      * @return true if predicate ever returned true
      */
     private boolean removeIf(Predicate<Tombstone> predicate) {
-      return removeUnexpiredIf(predicate) || removeExpiredIf(predicate);
+      boolean isTombstoneRemoved = removeUnexpiredIf(predicate);
+      if (removeExpiredIf(predicate)) {
+        isTombstoneRemoved = true;
+      }
+      return isTombstoneRemoved;
     }
 
     synchronized void start() {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultSender.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultSender.java
index 69a0450..a63c8a1 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultSender.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultSender.java
@@ -16,6 +16,8 @@
 package org.apache.geode.internal.cache.execute;
 
 
+import java.util.function.BiFunction;
+
 import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.cache.execute.Function;
@@ -23,8 +25,10 @@
 import org.apache.geode.cache.execute.ResultCollector;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.internal.DistributionManager;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.internal.cache.ForceReattemptException;
 import org.apache.geode.internal.cache.PartitionedRegion;
+import org.apache.geode.internal.cache.execute.metrics.FunctionStats;
 import org.apache.geode.internal.cache.execute.metrics.FunctionStatsManager;
 import org.apache.geode.internal.cache.partitioned.PartitionedRegionFunctionStreamingMessage;
 import org.apache.geode.internal.serialization.KnownVersion;
@@ -43,7 +47,7 @@
 
   private static final Logger logger = LogService.getLogger();
 
-  PartitionedRegionFunctionStreamingMessage msg = null;
+  private final PartitionedRegionFunctionStreamingMessage msg;
 
   private final DistributionManager dm;
 
@@ -53,15 +57,15 @@
 
   private final boolean forwardExceptions;
 
-  private ResultCollector rc;
+  private final ResultCollector rc;
 
-  private ServerToClientFunctionResultSender serverSender;
+  private final ServerToClientFunctionResultSender serverSender;
 
   private boolean localLastResultReceived = false;
 
-  private boolean onlyLocal = false;
+  private final boolean onlyLocal;
 
-  private boolean onlyRemote = false;
+  private final boolean onlyRemote;
 
   private boolean completelyDoneFromRemote = false;
 
@@ -73,6 +77,7 @@
 
   private BucketMovedException bme;
 
+  private BiFunction<String, InternalDistributedSystem, FunctionStats> functionStatsFunctionProvider;
 
   public KnownVersion getClientVersion() {
     if (serverSender != null && serverSender.sc != null) { // is a client-server connection
@@ -81,41 +86,40 @@
     return null;
   }
 
-  /**
-   * Have to combine next two constructor in one and make a new class which will send Results back.
-   *
-   */
   public PartitionedRegionFunctionResultSender(DistributionManager dm, PartitionedRegion pr,
-      long time, PartitionedRegionFunctionStreamingMessage msg, Function function,
-      int[] bucketArray) {
-    this.msg = msg;
-    this.dm = dm;
-    this.pr = pr;
-    this.time = time;
-    this.function = function;
-    this.bucketArray = bucketArray;
-
-    forwardExceptions = false;
+      long time, PartitionedRegionFunctionStreamingMessage msg,
+      Function function, int[] bucketArray) {
+    this(dm, pr, time, null, null, false, false, false, function, bucketArray, msg,
+        (x, y) -> FunctionStatsManager.getFunctionStats((String) x, (InternalDistributedSystem) y));
   }
 
-  /**
-   * Have to combine next two constructor in one and make a new class which will send Results back.
-   *
-   */
   public PartitionedRegionFunctionResultSender(DistributionManager dm,
       PartitionedRegion partitionedRegion, long time, ResultCollector rc,
       ServerToClientFunctionResultSender sender, boolean onlyLocal, boolean onlyRemote,
       boolean forwardExceptions, Function function, int[] bucketArray) {
+    this(dm, partitionedRegion, time, rc, sender, onlyLocal, onlyRemote, forwardExceptions,
+        function, bucketArray, null,
+        (x, y) -> FunctionStatsManager.getFunctionStats((String) x, (InternalDistributedSystem) y));
+  }
+
+  PartitionedRegionFunctionResultSender(DistributionManager dm,
+      PartitionedRegion partitionedRegion, long time, ResultCollector rc,
+      ServerToClientFunctionResultSender sender, boolean onlyLocal, boolean onlyRemote,
+      boolean forwardExceptions, Function function, int[] bucketArray,
+      PartitionedRegionFunctionStreamingMessage msg,
+      BiFunction functionStatsFunctionProvider) {
     this.dm = dm;
     pr = partitionedRegion;
     this.time = time;
     this.rc = rc;
+    this.msg = msg;
     serverSender = sender;
     this.onlyLocal = onlyLocal;
     this.onlyRemote = onlyRemote;
     this.forwardExceptions = forwardExceptions;
     this.function = function;
     this.bucketArray = bucketArray;
+    this.functionStatsFunctionProvider = functionStatsFunctionProvider;
   }
 
   private void checkForBucketMovement(Object oneResult) {
@@ -201,7 +205,7 @@
           // call a synchronized method as local node is also waiting to send lastResult
           lastResult(oneResult, rc, false, true, dm.getDistributionManagerId());
         }
-        FunctionStatsManager.getFunctionStats(function.getId(), dm.getSystem())
+        functionStatsFunctionProvider.apply(function.getId(), dm.getSystem())
             .incResultsReceived();
       }
       // incrementing result sent stats.
@@ -210,7 +214,7 @@
       // time the stats for the result sent is again incremented : Once the PR team comes with the
       // concept of the Streaming FunctionOperation
       // for the partitioned Region then it will be simple to fix this problem.
-      FunctionStatsManager.getFunctionStats(function.getId(), dm.getSystem())
+      functionStatsFunctionProvider.apply(function.getId(), dm.getSystem())
           .incResultsReturned();
     }
   }
@@ -319,14 +323,14 @@
       if (dm == null) {
         FunctionStatsManager.getFunctionStats(function.getId()).incResultsReceived();
       } else {
-        FunctionStatsManager.getFunctionStats(function.getId(), dm.getSystem())
+        functionStatsFunctionProvider.apply(function.getId(), dm.getSystem())
             .incResultsReceived();
       }
     }
     if (dm == null) {
       FunctionStatsManager.getFunctionStats(function.getId()).incResultsReturned();
     } else {
-      FunctionStatsManager.getFunctionStats(function.getId(), dm.getSystem())
+      functionStatsFunctionProvider.apply(function.getId(), dm.getSystem())
           .incResultsReturned();
     }
   }
@@ -360,21 +364,31 @@
             "PartitionedRegionFunctionResultSender adding result to ResultCollector on local node {}",
             oneResult);
         rc.addResult(dm.getDistributionManagerId(), oneResult);
-        FunctionStatsManager.getFunctionStats(function.getId(), dm.getSystem())
+        functionStatsFunctionProvider.apply(function.getId(), dm.getSystem())
             .incResultsReceived();
       }
       // incrementing result sent stats.
-      FunctionStatsManager.getFunctionStats(function.getId(), dm.getSystem())
+      functionStatsFunctionProvider.apply(function.getId(), dm.getSystem())
           .incResultsReturned();
     }
   }
 
   private void clientSend(Object oneResult, DistributedMember memberID) {
-    serverSender.sendResult(oneResult, memberID);
+    try {
+      serverSender.sendResult(oneResult, memberID);
+    } catch (FunctionException e) {
+      logger.warn("Exception when sending result to client", e);
+      setException(e);
+    }
   }
 
   private void lastClientSend(DistributedMember memberID, Object lastResult) {
-    serverSender.lastResult(lastResult, memberID);
+    try {
+      serverSender.lastResult(lastResult, memberID);
+    } catch (FunctionException e) {
+      logger.warn("Exception when sending last result to client", e);
+      setException(e);
+    }
   }
 
   @Override
@@ -411,5 +425,4 @@
   public boolean isLastResultReceived() {
     return localLastResultReceived;
   }
-
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java
index e5fa44a..73d85dd 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySender.java
@@ -25,6 +25,7 @@
 import java.util.Set;
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.logging.log4j.Logger;
@@ -238,6 +239,9 @@
 
   protected boolean enforceThreadsConnectSameReceiver;
 
+  @MutableForTesting
+  public static final AtomicBoolean doSleepForTestingInDistribute = new AtomicBoolean(false);
+
   protected AbstractGatewaySender() {
     statisticsClock = disabledClock();
   }
@@ -1035,6 +1039,7 @@
       List<Integer> allRemoteDSIds, boolean isLastEventInTransaction) {
 
     final boolean isDebugEnabled = logger.isDebugEnabled();
+    boolean wasInterrupted = false;
 
     // released by this method or transfers ownership to TmpQueueEvent
     @Released
@@ -1122,16 +1127,17 @@
       }
 
       // If this gateway is not running, return
-      if (!isRunning()) {
-        if (isPrimary()) {
-          recordDroppedEvent(clonedEvent);
-        }
-        if (isDebugEnabled) {
-          logger.debug("Returning back without putting into the gateway sender queue:" + event);
-        }
+      if (!getIsRunningAndDropEventIfNotRunning(event, isDebugEnabled, clonedEvent)) {
         return;
       }
 
+      if (AbstractGatewaySender.doSleepForTestingInDistribute.get()) {
+        try {
+          Thread.sleep(5);
+        } catch (InterruptedException e) {
+          e.printStackTrace();
+        }
+      }
       if (!getLifeCycleLock().readLock().tryLock()) {
         synchronized (queuedEventsSync) {
           if (!enqueuedAllTempQueueEvents) {
@@ -1148,19 +1154,24 @@
           }
         }
         if (enqueuedAllTempQueueEvents) {
-          getLifeCycleLock().readLock().lock();
+          while (true) {
+            try {
+              while (!getLifeCycleLock().readLock().tryLock(10, TimeUnit.MILLISECONDS)) {
+                if (!getIsRunningAndDropEventIfNotRunning(event, isDebugEnabled, clonedEvent)) {
+                  return;
+                }
+              }
+              break;
+            } catch (InterruptedException e) {
+              wasInterrupted = true;
+            }
+          }
         }
       }
       try {
         // If this gateway is not running, return
         // The sender may have stopped, after we have checked the status in the beginning.
-        if (!isRunning()) {
-          if (isDebugEnabled) {
-            logger.debug("Returning back without putting into the gateway sender queue:" + event);
-          }
-          if (isPrimary()) {
-            recordDroppedEvent(clonedEvent);
-          }
+        if (!getIsRunningAndDropEventIfNotRunning(event, isDebugEnabled, clonedEvent)) {
           return;
         }
 
@@ -1202,9 +1213,26 @@
       if (freeClonedEvent) {
         clonedEvent.release(); // fix for bug 48035
       }
+      if (wasInterrupted) {
+        Thread.currentThread().interrupt();
+      }
     }
   }
 
+  private boolean getIsRunningAndDropEventIfNotRunning(EntryEventImpl event, boolean isDebugEnabled,
+      EntryEventImpl clonedEvent) {
+    if (isRunning()) {
+      return true;
+    }
+    if (isPrimary()) {
+      recordDroppedEvent(clonedEvent);
+    }
+    if (isDebugEnabled) {
+      logger.debug("Returning back without putting into the gateway sender queue:" + event);
+    }
+    return false;
+  }
+
   private void recordDroppedEvent(EntryEventImpl event) {
     if (eventProcessor != null) {
       eventProcessor.registerEventDroppedInPrimaryQueue(event);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/GatewaySenderEventImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/GatewaySenderEventImpl.java
index 494e499..d18a9a5 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/GatewaySenderEventImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/GatewaySenderEventImpl.java
@@ -853,7 +853,7 @@
     // If the message is an update, it may be conflatable. If it is a
     // create, destroy, invalidate or destroy-region, it is not conflatable.
     // Only updates are conflated.
-    return isUpdate();
+    return isUpdate() && !isConcurrencyConflict();
   }
 
   @Override
diff --git a/geode-core/src/main/java/org/apache/geode/internal/offheap/MemoryAllocatorImpl.java b/geode-core/src/main/java/org/apache/geode/internal/offheap/MemoryAllocatorImpl.java
index 4e433e4..d78bc40 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/offheap/MemoryAllocatorImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/offheap/MemoryAllocatorImpl.java
@@ -20,10 +20,8 @@
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.Supplier;
 
 import org.apache.logging.log4j.Logger;
 
@@ -39,7 +37,6 @@
 import org.apache.geode.internal.lang.SystemProperty;
 import org.apache.geode.internal.offheap.annotations.OffHeapIdentifier;
 import org.apache.geode.internal.offheap.annotations.Unretained;
-import org.apache.geode.logging.internal.executors.LoggingExecutors;
 import org.apache.geode.logging.internal.log4j.api.LogService;
 import org.apache.geode.util.internal.GeodeGlossary;
 
@@ -64,14 +61,14 @@
       SystemProperty.getProductIntegerProperty(
           "off-heap-stats-update-frequency-ms").orElse(3600000);
 
-  private final ScheduledExecutorService updateNonRealTimeStatsExecutor;
-
-  private final ScheduledFuture<?> updateNonRealTimeStatsFuture;
+  private final NonRealTimeStatsUpdater nonRealTimeStatsUpdater;
 
   private volatile OffHeapMemoryStats stats;
 
   private volatile OutOfOffHeapMemoryListener ooohml;
 
+  private final int updateOffHeapStatsFrequencyMs;
+
   OutOfOffHeapMemoryListener getOutOfOffHeapMemoryListener() {
     return ooohml;
   }
@@ -98,20 +95,17 @@
 
   public static MemoryAllocator create(OutOfOffHeapMemoryListener ooohml, OffHeapMemoryStats stats,
       int slabCount, long offHeapMemorySize, long maxSlabSize,
-      int updateOffHeapStatsFrequencyMs) {
+      Supplier<Integer> updateOffHeapStatsFrequencyMsSupplier,
+      Supplier<NonRealTimeStatsUpdater> nonRealTimeStatsUpdaterSupplier) {
     return create(ooohml, stats, slabCount, offHeapMemorySize, maxSlabSize, null,
-        SlabImpl::new, updateOffHeapStatsFrequencyMs);
+        SlabImpl::new, updateOffHeapStatsFrequencyMsSupplier, nonRealTimeStatsUpdaterSupplier);
   }
 
-  public static MemoryAllocator create(OutOfOffHeapMemoryListener ooohml, OffHeapMemoryStats stats,
-      int slabCount, long offHeapMemorySize, long maxSlabSize) {
-    return create(ooohml, stats, slabCount, offHeapMemorySize, maxSlabSize, null,
-        SlabImpl::new, UPDATE_OFF_HEAP_STATS_FREQUENCY_MS);
-  }
-
-  private static MemoryAllocatorImpl create(OutOfOffHeapMemoryListener ooohml,
+  static MemoryAllocatorImpl create(OutOfOffHeapMemoryListener ooohml,
       OffHeapMemoryStats stats, int slabCount, long offHeapMemorySize, long maxSlabSize,
-      Slab[] slabs, SlabFactory slabFactory, int updateOffHeapStatsFrequencyMs) {
+      Slab[] slabs, SlabFactory slabFactory,
+      Supplier<Integer> updateOffHeapStatsFrequencyMsSupplier,
+      Supplier<NonRealTimeStatsUpdater> nonRealTimeStatsUpdaterSupplier) {
     MemoryAllocatorImpl result = singleton;
     boolean created = false;
     try {
@@ -155,7 +149,10 @@
           }
         }
 
-        result = new MemoryAllocatorImpl(ooohml, stats, slabs, updateOffHeapStatsFrequencyMs);
+        result = new MemoryAllocatorImpl(ooohml, stats, slabs,
+            updateOffHeapStatsFrequencyMsSupplier == null ? UPDATE_OFF_HEAP_STATS_FREQUENCY_MS
+                : updateOffHeapStatsFrequencyMsSupplier.get(),
+            nonRealTimeStatsUpdaterSupplier);
         singleton = result;
         LifecycleListener.invokeAfterCreate(result);
         created = true;
@@ -170,16 +167,10 @@
         }
       }
     }
+    result.start();
     return result;
   }
 
-  static MemoryAllocatorImpl createForUnitTest(OutOfOffHeapMemoryListener ooohml,
-      OffHeapMemoryStats stats, int slabCount, long offHeapMemorySize, long maxSlabSize,
-      SlabFactory memChunkFactory) {
-    return create(ooohml, stats, slabCount, offHeapMemorySize, maxSlabSize, null, memChunkFactory,
-        UPDATE_OFF_HEAP_STATS_FREQUENCY_MS);
-  }
-
   public static MemoryAllocatorImpl createForUnitTest(OutOfOffHeapMemoryListener oooml,
       OffHeapMemoryStats stats, Slab[] slabs) {
     int slabCount = 0;
@@ -196,10 +187,9 @@
       }
     }
     return create(oooml, stats, slabCount, offHeapMemorySize, maxSlabSize, slabs, null,
-        UPDATE_OFF_HEAP_STATS_FREQUENCY_MS);
+        null, () -> new DummyNonRealTimeStatsUpdater());
   }
 
-
   private void reuse(OutOfOffHeapMemoryListener oooml, OffHeapMemoryStats newStats,
       long offHeapMemorySize, Slab[] slabs) {
     if (isClosed()) {
@@ -223,7 +213,8 @@
 
   private MemoryAllocatorImpl(final OutOfOffHeapMemoryListener oooml,
       final OffHeapMemoryStats stats, final Slab[] slabs,
-      int updateOffHeapStatsFrequencyMs) {
+      int updateOffHeapStatsFrequencyMs,
+      Supplier<NonRealTimeStatsUpdater> nonRealTimeStatsUpdaterSupplier) {
     if (oooml == null) {
       throw new IllegalArgumentException("OutOfOffHeapMemoryListener is null");
     }
@@ -239,11 +230,17 @@
     this.stats.incMaxMemory(freeList.getTotalMemory());
     this.stats.incFreeMemory(freeList.getTotalMemory());
 
-    updateNonRealTimeStatsExecutor =
-        LoggingExecutors.newSingleThreadScheduledExecutor("Update Freelist Stats thread");
-    updateNonRealTimeStatsFuture =
-        updateNonRealTimeStatsExecutor.scheduleAtFixedRate(freeList::updateNonRealTimeStats, 0,
-            updateOffHeapStatsFrequencyMs, TimeUnit.MILLISECONDS);
+    this.updateOffHeapStatsFrequencyMs = updateOffHeapStatsFrequencyMs;
+
+    if (nonRealTimeStatsUpdaterSupplier == null) {
+      nonRealTimeStatsUpdater = new NonRealTimeStatsUpdater(freeList::updateNonRealTimeStats);
+    } else {
+      nonRealTimeStatsUpdater = nonRealTimeStatsUpdaterSupplier.get();
+    }
+  }
+
+  void start() {
+    nonRealTimeStatsUpdater.start(updateOffHeapStatsFrequencyMs);
   }
 
   public List<OffHeapStoredObject> getLostChunks(InternalCache cache) {
@@ -407,8 +404,9 @@
     if (setClosed()) {
       freeList.freeSlabs();
       stats.close();
-      updateNonRealTimeStatsFuture.cancel(true);
-      updateNonRealTimeStatsExecutor.shutdown();
+      if (nonRealTimeStatsUpdater != null) {
+        nonRealTimeStatsUpdater.stop();
+      }
       singleton = null;
     }
   }
@@ -540,4 +538,16 @@
   public MemoryInspector getMemoryInspector() {
     return memoryInspector;
   }
+
+  public static class DummyNonRealTimeStatsUpdater extends NonRealTimeStatsUpdater {
+    public DummyNonRealTimeStatsUpdater() {
+      super(null);
+    }
+
+    @Override
+    void start(int frequency) {}
+
+    @Override
+    void stop() {};
+  }
 }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/offheap/NonRealTimeStatsUpdater.java b/geode-core/src/main/java/org/apache/geode/internal/offheap/NonRealTimeStatsUpdater.java
new file mode 100644
index 0000000..933b28b
--- /dev/null
+++ b/geode-core/src/main/java/org/apache/geode/internal/offheap/NonRealTimeStatsUpdater.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.offheap;
+
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.geode.logging.internal.executors.LoggingExecutors;
+
+public class NonRealTimeStatsUpdater {
+  private final Runnable updateTask;
+  private final ScheduledExecutorService updateNonRealTimeStatsExecutor;
+  private final AtomicReference<ScheduledFuture<?>> updateNonRealTimeStatsFuture =
+      new AtomicReference<>();
+
+  NonRealTimeStatsUpdater(Runnable updateTask) {
+    this.updateTask = updateTask;
+    updateNonRealTimeStatsExecutor =
+        LoggingExecutors.newSingleThreadScheduledExecutor("Update Freelist Stats thread");
+
+  }
+
+  void start(int updateOffHeapStatsFrequencyMs) {
+    updateNonRealTimeStatsFuture
+        .set(updateNonRealTimeStatsExecutor.scheduleAtFixedRate(updateTask, 0,
+            updateOffHeapStatsFrequencyMs, TimeUnit.MILLISECONDS));
+  }
+
+  void stop() {
+    updateNonRealTimeStatsFuture.get().cancel(true);
+    updateNonRealTimeStatsExecutor.shutdown();
+  }
+}
diff --git a/geode-core/src/main/java/org/apache/geode/internal/offheap/OffHeapStorage.java b/geode-core/src/main/java/org/apache/geode/internal/offheap/OffHeapStorage.java
index 755fef9..2bbd587 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/offheap/OffHeapStorage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/offheap/OffHeapStorage.java
@@ -15,6 +15,7 @@
 package org.apache.geode.internal.offheap;
 
 import java.lang.reflect.Method;
+import java.util.function.Supplier;
 
 import org.apache.geode.StatisticDescriptor;
 import org.apache.geode.Statistics;
@@ -219,22 +220,12 @@
     // ooohml provides the hook for disconnecting and closing cache on OutOfOffHeapMemoryException
     OutOfOffHeapMemoryListener ooohml =
         new DisconnectingOutOfOffHeapMemoryListener((InternalDistributedSystem) system);
-    return basicCreateOffHeapStorage(sf, offHeapMemorySize, ooohml);
+    return basicCreateOffHeapStorage(sf, offHeapMemorySize, ooohml, null, null);
   }
 
   static MemoryAllocator basicCreateOffHeapStorage(StatisticsFactory sf, long offHeapMemorySize,
-      OutOfOffHeapMemoryListener ooohml) {
-    final OffHeapMemoryStats stats = new OffHeapStorage(sf);
-
-    final long maxSlabSize = calcMaxSlabSize(offHeapMemorySize);
-
-    final int slabCount = calcSlabCount(maxSlabSize, offHeapMemorySize);
-
-    return MemoryAllocatorImpl.create(ooohml, stats, slabCount, offHeapMemorySize, maxSlabSize);
-  }
-
-  static MemoryAllocator basicCreateOffHeapStorage(StatisticsFactory sf, long offHeapMemorySize,
-      OutOfOffHeapMemoryListener ooohml, int updateOffHeapStatsFrequencyMs) {
+      OutOfOffHeapMemoryListener ooohml, Supplier<Integer> updateOffHeapStatsFrequencyMsSupplier,
+      Supplier<NonRealTimeStatsUpdater> nonRealTimeStatsUpdaterSupplier) {
     final OffHeapMemoryStats stats = new OffHeapStorage(sf);
 
     final long maxSlabSize = calcMaxSlabSize(offHeapMemorySize);
@@ -242,7 +233,7 @@
     final int slabCount = calcSlabCount(maxSlabSize, offHeapMemorySize);
 
     return MemoryAllocatorImpl.create(ooohml, stats, slabCount, offHeapMemorySize, maxSlabSize,
-        updateOffHeapStatsFrequencyMs);
+        updateOffHeapStatsFrequencyMsSupplier, nonRealTimeStatsUpdaterSupplier);
   }
 
   private static final long MAX_SLAB_SIZE = Integer.MAX_VALUE;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/tcp/Connection.java b/geode-core/src/main/java/org/apache/geode/internal/tcp/Connection.java
index 44205d4..9e921d7 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/tcp/Connection.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/tcp/Connection.java
@@ -961,7 +961,7 @@
       final ConnectionTable t,
       final boolean preserveOrder, final InternalDistributedMember remoteAddr,
       final boolean sharedResource,
-      final long startTime, final long ackTimeout, final long ackSATimeout)
+      final long startTime, final long ackTimeout, final long ackSATimeout, boolean doNotRetry)
       throws IOException, DistributedSystemDisconnectedException {
     boolean success = false;
     Connection conn = null;
@@ -1021,7 +1021,9 @@
             // do not change the text of this exception - it is looked for in exception handlers
             throw new IOException("Cannot form connection to alert listener " + remoteAddr);
           }
-
+          if (doNotRetry) {
+            throw new IOException("Connection not created in first try to " + remoteAddr);
+          }
           // Wait briefly...
           interrupted = Thread.interrupted() || interrupted;
           try {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/tcp/ConnectionTable.java b/geode-core/src/main/java/org/apache/geode/internal/tcp/ConnectionTable.java
index f54f7bd..f1d157d 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/tcp/ConnectionTable.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/tcp/ConnectionTable.java
@@ -269,6 +269,7 @@
    * @param startTime the ms clock start time for the operation
    * @param ackThreshold the ms ack-wait-threshold, or zero
    * @param ackSAThreshold the ms ack-severe_alert-threshold, or zero
+   * @param doNotRetry whether we should perform reattempt to create connection
    * @return the Connection, or null if someone else already created or closed it
    * @throws IOException if unable to connect
    */
@@ -276,13 +277,14 @@
       boolean sharedResource,
       boolean preserveOrder, Map<DistributedMember, Object> m, PendingConnection pc, long startTime,
       long ackThreshold,
-      long ackSAThreshold) throws IOException, DistributedSystemDisconnectedException {
+      long ackSAThreshold, boolean doNotRetry)
+      throws IOException, DistributedSystemDisconnectedException {
     // handle new pending connection
     Connection con = null;
     try {
       long senderCreateStartTime = owner.getStats().startSenderCreate();
       con = Connection.createSender(owner.getMembership(), this, preserveOrder, id,
-          sharedResource, startTime, ackThreshold, ackSAThreshold);
+          sharedResource, startTime, ackThreshold, ackSAThreshold, doNotRetry);
       owner.getStats().incSenders(sharedResource, preserveOrder, senderCreateStartTime);
     } finally {
       // our connection failed to notify anyone waiting for our pending con
@@ -350,11 +352,14 @@
    * @param startTime the ms clock start time for the operation
    * @param ackTimeout the ms ack-wait-threshold, or zero
    * @param ackSATimeout the ms ack-severe-alert-threshold, or zero
+   * @param doNotRetryWaitForConnection whether we should perform reattempt (or wait) to create
+   *        connection
    * @return the new Connection, or null if an error
    * @throws IOException if unable to create the connection
    */
   private Connection getSharedConnection(InternalDistributedMember id, boolean scheduleTimeout,
-      boolean preserveOrder, long startTime, long ackTimeout, long ackSATimeout)
+      boolean preserveOrder, long startTime, long ackTimeout, long ackSATimeout,
+      boolean doNotRetryWaitForConnection)
       throws IOException, DistributedSystemDisconnectedException {
 
     final Map<DistributedMember, Object> m =
@@ -387,7 +392,7 @@
         logger.debug("created PendingConnection {}", pc);
       }
       result = handleNewPendingConnection(id, true, preserveOrder, m, pc,
-          startTime, ackTimeout, ackSATimeout);
+          startTime, ackTimeout, ackSATimeout, doNotRetryWaitForConnection);
       if (!preserveOrder && scheduleTimeout) {
         scheduleIdleTimeout(result);
       }
@@ -400,6 +405,10 @@
           throw new IOException("Cannot form connection to alert listener " + id);
         }
 
+        if (doNotRetryWaitForConnection) {
+          return null;
+        }
+
         result = ((PendingConnection) mEntry).waitForConnect(owner.getMembership(),
             startTime, ackTimeout, ackSATimeout);
         if (logger.isDebugEnabled()) {
@@ -425,11 +434,13 @@
    * @param startTime the ms clock start time for the operation
    * @param ackTimeout the ms ack-wait-threshold, or zero
    * @param ackSATimeout the ms ack-severe-alert-threshold, or zero
+   * @param doNotRetry whether we should perform reattempt to create connection
    * @return the connection, or null if an error
    * @throws IOException if the connection could not be created
    */
   Connection getThreadOwnedConnection(InternalDistributedMember id, long startTime, long ackTimeout,
-      long ackSATimeout) throws IOException, DistributedSystemDisconnectedException {
+      long ackSATimeout, boolean doNotRetry)
+      throws IOException, DistributedSystemDisconnectedException {
     Connection result;
 
     // Look for result in the thread local
@@ -449,7 +460,7 @@
     // OK, we have to create a new connection.
     long senderCreateStartTime = owner.getStats().startSenderCreate();
     result = Connection.createSender(owner.getMembership(), this, true, id, false, startTime,
-        ackTimeout, ackSATimeout);
+        ackTimeout, ackSATimeout, doNotRetry);
     owner.getStats().incSenders(false, true, senderCreateStartTime);
     if (logger.isDebugEnabled()) {
       logger.debug("ConnectionTable: created an ordered connection: {}", result);
@@ -521,11 +532,12 @@
    * @param startTime the ms clock start time
    * @param ackTimeout the ms ack-wait-threshold, or zero
    * @param ackSATimeout the ms ack-severe-alert-threshold, or zero
+   * @param doNotRetry whether we should perform reattempt to create connection
    * @return the new Connection, or null if a problem
    * @throws IOException if the connection could not be created
    */
   protected Connection get(InternalDistributedMember id, boolean preserveOrder, long startTime,
-      long ackTimeout, long ackSATimeout)
+      long ackTimeout, long ackSATimeout, boolean doNotRetry)
       throws IOException, DistributedSystemDisconnectedException {
     if (closed) {
       owner.getCancelCriterion().checkCancelInProgress(null);
@@ -535,9 +547,9 @@
     boolean threadOwnsResources = threadOwnsResources();
     if (!preserveOrder || !threadOwnsResources) {
       result = getSharedConnection(id, threadOwnsResources, preserveOrder, startTime, ackTimeout,
-          ackSATimeout);
+          ackSATimeout, doNotRetry);
     } else {
-      result = getThreadOwnedConnection(id, startTime, ackTimeout, ackSATimeout);
+      result = getThreadOwnedConnection(id, startTime, ackTimeout, ackSATimeout, doNotRetry);
     }
     if (result != null) {
       Assert.assertTrue(result.getPreserveOrder() == preserveOrder);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/tcp/TCPConduit.java b/geode-core/src/main/java/org/apache/geode/internal/tcp/TCPConduit.java
index 4d6d9c8..843b49c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/tcp/TCPConduit.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/tcp/TCPConduit.java
@@ -719,7 +719,6 @@
    *
    * @param memberAddress the IDS associated with the remoteId
    * @param preserveOrder whether this is an ordered or unordered connection
-   * @param retry false if this is the first attempt
    * @param startTime the time this operation started
    * @param ackTimeout the ack-wait-threshold * 1000 for the operation to be transmitted (or zero)
    * @param ackSATimeout the ack-severe-alert-threshold * 1000 for the operation to be transmitted
@@ -728,7 +727,7 @@
    * @return the connection
    */
   public Connection getConnection(InternalDistributedMember memberAddress,
-      final boolean preserveOrder, boolean retry, long startTime, long ackTimeout,
+      final boolean preserveOrder, long startTime, long ackTimeout,
       long ackSATimeout) throws IOException, DistributedSystemDisconnectedException {
     if (stopped) {
       throw new DistributedSystemDisconnectedException("The conduit is stopped");
@@ -742,7 +741,7 @@
       try {
         // If this is the second time through this loop, we had problems.
         // Tear down the connection so that it gets rebuilt.
-        if (retry || conn != null) { // not first time in loop
+        if (conn != null) { // not first time in loop
           if (!membership.memberExists(memberAddress)
               || membership.isShunned(memberAddress)
               || membership.shutdownInProgress()) {
@@ -777,18 +776,15 @@
 
           // Close the connection (it will get rebuilt later).
           getStats().incReconnectAttempts();
-          if (conn != null) {
-            try {
-              if (logger.isDebugEnabled()) {
-                logger.debug("Closing old connection.  conn={} before retrying. memberInTrouble={}",
-                    conn, memberInTrouble);
-              }
-              conn.closeForReconnect("closing before retrying");
-            } catch (CancelException ex) {
-              throw ex;
-            } catch (Exception ex) {
-              // ignored
+          try {
+            if (logger.isDebugEnabled()) {
+              logger.debug("Closing old connection.  conn={} before retrying. memberInTrouble={}",
+                  conn, memberInTrouble);
             }
+            conn.closeForReconnect("closing before retrying");
+          } catch (CancelException ex) {
+            throw ex;
+          } catch (Exception ignored) {
           }
         } // not first time in loop
 
@@ -801,7 +797,7 @@
           do {
             retryForOldConnection = false;
             conn = getConTable().get(memberAddress, preserveOrder, startTime, ackTimeout,
-                ackSATimeout);
+                ackSATimeout, false);
             if (conn == null) {
               // conduit may be closed - otherwise an ioexception would be thrown
               problem = new IOException(
@@ -909,6 +905,98 @@
     }
   }
 
+  /**
+   * Return a connection to the given member. This method performs quick scan for connection.
+   * Only one attempt to create a connection to the given member .
+   *
+   * @param memberAddress the IDS associated with the remoteId
+   * @param preserveOrder whether this is an ordered or unordered connection
+   * @param startTime the time this operation started
+   * @param ackTimeout the ack-wait-threshold * 1000 for the operation to be transmitted (or zero)
+   * @param ackSATimeout the ack-severe-alert-threshold * 1000 for the operation to be transmitted
+   *        (or zero)
+   *
+   * @return the connection
+   */
+  public Connection getFirstScanForConnection(InternalDistributedMember memberAddress,
+      final boolean preserveOrder, long startTime, long ackTimeout,
+      long ackSATimeout) throws IOException, DistributedSystemDisconnectedException {
+    if (stopped) {
+      throw new DistributedSystemDisconnectedException("The conduit is stopped");
+    }
+
+    Connection connection = null;
+    stopper.checkCancelInProgress(null);
+    boolean interrupted = Thread.interrupted();
+    try {
+
+      Exception problem = null;
+      try {
+        connection = getConnectionThatIsNotClosed(memberAddress, preserveOrder, startTime,
+            ackTimeout, ackSATimeout);
+
+        // we have a connection; fall through and return it
+      } catch (ConnectionException e) {
+        // Race condition between acquiring the connection and attempting
+        // to use it: another thread closed it.
+        problem = e;
+        // No need to retry since Connection.createSender has already
+        // done retries and now member is really unreachable for some reason
+        // even though it may be in the view
+      } catch (IOException e) {
+        problem = e;
+        // don't keep trying to connect to an alert listener
+        if (AlertingAction.isThreadAlerting()) {
+          if (logger.isDebugEnabled()) {
+            logger.debug("Giving up connecting to alert listener {}", memberAddress);
+          }
+        }
+      }
+
+      if (problem != null) {
+        if (problem instanceof IOException) {
+          if (problem.getMessage() != null
+              && problem.getMessage().startsWith("Cannot form connection to alert listener")) {
+            throw new AlertingIOException((IOException) problem);
+          }
+          throw (IOException) problem;
+        }
+        throw new IOException(
+            String.format("Problem connecting to %s", memberAddress), problem);
+      }
+      // Success!
+
+      return connection;
+    } finally {
+      if (interrupted) {
+        Thread.currentThread().interrupt();
+      }
+    }
+  }
+
+  private Connection getConnectionThatIsNotClosed(InternalDistributedMember memberAddress,
+      final boolean preserveOrder, long startTime, long ackTimeout, long ackSATimeout)
+      throws IOException, ConnectionException {
+    boolean debugEnabled = logger.isDebugEnabled();
+    Connection connection;
+    while (true) {
+      connection = getConTable().get(memberAddress, preserveOrder, startTime, ackTimeout,
+          ackSATimeout, true);
+      if (connection == null) {
+        throw new IOException("Unable to reconnect to server; possible shutdown: " + memberAddress);
+      }
+
+      if (!connection.isClosing() && connection.getRemoteAddress().equals(memberAddress)) {
+        return connection;
+      }
+      if (debugEnabled) {
+        logger.debug("Got an old connection for {}: {}@{}", memberAddress, connection,
+            connection.hashCode());
+      }
+      connection.closeOldConnection("closing old connection");
+    }
+  }
+
   @Override
   public String toString() {
     return String.valueOf(id);
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionQueueJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionQueueJUnitTest.java
index 6643695..1dcbdef 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionQueueJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/BucketRegionQueueJUnitTest.java
@@ -17,7 +17,6 @@
 import static org.apache.geode.cache.Region.SEPARATOR;
 import static org.apache.geode.internal.statistics.StatisticsClockFactory.disabledClock;
 import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
@@ -175,23 +174,66 @@
     List<Object> objects = bucketRegionQueue.getElementsMatching(hasTransactionIdPredicate,
         isLastEventInTransactionPredicate);
 
-    assertEquals(2, objects.size());
-    assertEquals(objects, Arrays.asList(event1, event3));
+    assertThat(objects.size()).isEqualTo(2);
+    assertThat(objects).isEqualTo(Arrays.asList(event1, event3));
 
     objects = bucketRegionQueue.getElementsMatching(hasTransactionIdPredicate,
         isLastEventInTransactionPredicate);
-    assertEquals(1, objects.size());
-    assertEquals(objects, Arrays.asList(event7));
+    assertThat(objects.size()).isEqualTo(1);
+    assertThat(objects).isEqualTo(Arrays.asList(event7));
 
     hasTransactionIdPredicate =
         ParallelGatewaySenderQueue.getHasTransactionIdPredicate(tx2);
     objects = bucketRegionQueue.getElementsMatching(hasTransactionIdPredicate,
         isLastEventInTransactionPredicate);
-    assertEquals(2, objects.size());
-    assertEquals(objects, Arrays.asList(event2, event4));
+    assertThat(objects.size()).isEqualTo(2);
+    assertThat(objects).isEqualTo(Arrays.asList(event2, event4));
   }
 
   @Test
+  public void testGetElementsMatchingWithParallelGatewaySenderQueuePredicatesObjectReadNullDoesNotThrowException()
+      throws ForceReattemptException {
+    ParallelGatewaySenderHelper.createParallelGatewaySenderEventProcessor(this.sender);
+
+    TransactionId tx1 = new TXId(null, 1);
+    TransactionId tx2 = new TXId(null, 2);
+    TransactionId tx3 = new TXId(null, 3);
+
+    GatewaySenderEventImpl event1 = createMockGatewaySenderEvent(1, tx1, false);
+    GatewaySenderEventImpl eventNotInTransaction1 = createMockGatewaySenderEvent(2, null, false);
+    GatewaySenderEventImpl event2 = createMockGatewaySenderEvent(3, tx2, false);
+    GatewaySenderEventImpl event3 = null; // createMockGatewaySenderEvent(4, tx1, true);
+    GatewaySenderEventImpl event4 = createMockGatewaySenderEvent(5, tx2, true);
+    GatewaySenderEventImpl event5 = createMockGatewaySenderEvent(6, tx3, false);
+    GatewaySenderEventImpl event6 = createMockGatewaySenderEvent(7, tx3, false);
+    GatewaySenderEventImpl event7 = createMockGatewaySenderEvent(8, tx1, true);
+
+    this.bucketRegionQueue
+        .cleanUpDestroyedTokensAndMarkGIIComplete(InitialImageOperation.GIIStatus.NO_GII);
+
+    this.bucketRegionQueue.addToQueue(1L, event1);
+    this.bucketRegionQueue.addToQueue(2L, eventNotInTransaction1);
+    this.bucketRegionQueue.addToQueue(3L, event2);
+    this.bucketRegionQueue.addToQueue(4L, event3);
+    this.bucketRegionQueue.addToQueue(5L, event4);
+    this.bucketRegionQueue.addToQueue(6L, event5);
+    this.bucketRegionQueue.addToQueue(7L, event6);
+    this.bucketRegionQueue.addToQueue(8L, event7);
+
+    Predicate<GatewaySenderEventImpl> hasTransactionIdPredicate =
+        ParallelGatewaySenderQueue.getHasTransactionIdPredicate(tx1);
+    Predicate<GatewaySenderEventImpl> isLastEventInTransactionPredicate =
+        ParallelGatewaySenderQueue.getIsLastEventInTransactionPredicate();
+    when(bucketRegionQueue.getValueInVMOrDiskWithoutFaultIn(4L)).thenReturn(null);
+    List<Object> objects = this.bucketRegionQueue.getElementsMatching(hasTransactionIdPredicate,
+        isLastEventInTransactionPredicate);
+
+    assertThat(objects.size()).isEqualTo(2);
+    assertThat(objects).isEqualTo(Arrays.asList(new Object[] {event1, event7}));
+  }
+
+
+  @Test
   public void testPeekedElementsArePossibleDuplicate()
       throws Exception {
     ParallelGatewaySenderHelper.createParallelGatewaySenderEventProcessor(sender);
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/OplogEntryIdSetDrfHashSetThresholdTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/OplogEntryIdSetDrfHashSetThresholdTest.java
new file mode 100644
index 0000000..ff7e43e
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/OplogEntryIdSetDrfHashSetThresholdTest.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.stream.IntStream;
+import java.util.stream.LongStream;
+
+import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
+import it.unimi.dsi.fastutil.longs.LongOpenHashSet;
+import org.junit.jupiter.api.Test;
+import org.junitpioneer.jupiter.SetSystemProperty;
+
+import org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet;
+
+/**
+ * Tests DiskStoreImpl.OplogEntryIdSet
+ */
+public class OplogEntryIdSetDrfHashSetThresholdTest {
+  @Test
+  @SetSystemProperty(key = "gemfire.disk.drfHashMapOverflowThreshold", value = "10")
+  public void addMethodOverflowBasedOnDrfOverflowThresholdParameters() {
+
+    int testEntries = 41;
+    IntOpenHashSet intOpenHashSet = new IntOpenHashSet();
+    LongOpenHashSet longOpenHashSet = new LongOpenHashSet();
+
+    List<IntOpenHashSet> intOpenHashSets =
+        new ArrayList<>(Collections.singletonList(intOpenHashSet));
+    List<LongOpenHashSet> longOpenHashSets =
+        new ArrayList<>(Collections.singletonList(longOpenHashSet));
+
+    OplogEntryIdSet oplogEntryIdSet = new OplogEntryIdSet(intOpenHashSets, longOpenHashSets);
+    IntStream.range(1, testEntries).forEach(oplogEntryIdSet::add);
+    LongStream.range(0x00000000FFFFFFFFL + 1, 0x00000000FFFFFFFFL + testEntries)
+        .forEach(oplogEntryIdSet::add);
+
+    assertThat(intOpenHashSets).hasSize(4);
+    assertThat(longOpenHashSets).hasSize(4);
+
+    IntStream.range(1, testEntries).forEach(i -> assertThat(oplogEntryIdSet.contains(i)).isTrue());
+    LongStream.range(0x00000000FFFFFFFFL + 1, 0x00000000FFFFFFFFL + testEntries)
+        .forEach(i -> assertThat(oplogEntryIdSet.contains(i)).isTrue());
+
+  }
+}
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/TombstoneServiceTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/TombstoneServiceTest.java
index 37bdfd6..9e6c437 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/TombstoneServiceTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/TombstoneServiceTest.java
@@ -14,6 +14,7 @@
  */
 package org.apache.geode.internal.cache;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
@@ -40,7 +41,9 @@
   DistributedRegion region;
   VersionTag destroyedVersion;
   private TombstoneService.ReplicateTombstoneSweeper replicateTombstoneSweeper;
-  private TombstoneService.Tombstone tombstone;
+  private TombstoneService.Tombstone tombstone1;
+
+  private TombstoneService.Tombstone tombstone2;
 
 
   @Before
@@ -55,8 +58,9 @@
     destroyedVersion = mock(VersionTag.class);
     replicateTombstoneSweeper = new TombstoneService.ReplicateTombstoneSweeper(cacheTime, stats,
         cancelCriterion, executor);
-    tombstone = new TombstoneService.Tombstone(entry, region, destroyedVersion);
-    tombstone.entry = entry;
+    tombstone1 = new TombstoneService.Tombstone(entry, region, destroyedVersion);
+    tombstone2 = new TombstoneService.Tombstone(entry, region, destroyedVersion);
+    tombstone1.entry = entry;
   }
 
   @Test
@@ -64,9 +68,9 @@
     when(region.isInitialized()).thenReturn(false);
     when(region.getRegionMap()).thenReturn(regionMap);
 
-    replicateTombstoneSweeper.expireTombstone(tombstone);
+    replicateTombstoneSweeper.expireTombstone(tombstone1);
     replicateTombstoneSweeper.expireBatch();
-    verify(regionMap, Mockito.never()).removeTombstone(tombstone.entry, tombstone);
+    verify(regionMap, Mockito.never()).removeTombstone(tombstone1.entry, tombstone1);
   }
 
   @Test
@@ -80,8 +84,36 @@
     when(region.getDiskRegion()).thenReturn(mock(DiskRegion.class));
 
 
-    replicateTombstoneSweeper.expireTombstone(tombstone);
+    replicateTombstoneSweeper.expireTombstone(tombstone1);
     replicateTombstoneSweeper.expireBatch();
-    verify(regionMap).removeTombstone(tombstone.entry, tombstone);
+    verify(regionMap).removeTombstone(tombstone1.entry, tombstone1);
+  }
+
+  @Test
+  public void validateThatTheExpiredTombstonesAreCleared() {
+    when(region.getRegionMap()).thenReturn(regionMap);
+    replicateTombstoneSweeper.expireTombstone(tombstone1);
+    assertThat(replicateTombstoneSweeper.getScheduledTombstoneCount()).isOne();
+    replicateTombstoneSweeper.unscheduleTombstones(region);
+    assertThat(replicateTombstoneSweeper.getScheduledTombstoneCount()).isZero();
+  }
+
+  @Test
+  public void validateThatTheNonExpiredTombstonesAreCleared() {
+    when(region.getRegionMap()).thenReturn(regionMap);
+    replicateTombstoneSweeper.scheduleTombstone(tombstone1);
+    assertThat(replicateTombstoneSweeper.getScheduledTombstoneCount()).isOne();
+    replicateTombstoneSweeper.unscheduleTombstones(region);
+    assertThat(replicateTombstoneSweeper.getScheduledTombstoneCount()).isZero();
+  }
+
+  @Test
+  public void validateThatTheNonExpiredAndExpiredTombstonesAreCleared() {
+    when(region.getRegionMap()).thenReturn(regionMap);
+    replicateTombstoneSweeper.scheduleTombstone(tombstone1);
+    replicateTombstoneSweeper.expireTombstone(tombstone2);
+    assertThat(replicateTombstoneSweeper.getScheduledTombstoneCount()).isEqualTo(2);
+    replicateTombstoneSweeper.unscheduleTombstones(region);
+    assertThat(replicateTombstoneSweeper.getScheduledTombstoneCount()).isZero();
   }
 }
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultSenderTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultSenderTest.java
new file mode 100644
index 0000000..9ac049d
--- /dev/null
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultSenderTest.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.geode.internal.cache.execute;
+
+import static org.apache.geode.internal.cache.execute.PartitionedRegionFunctionResultSenderTest.MethodToInvoke.LAST_RESULT;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.concurrent.TimeUnit;
+
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.EnumSource;
+import org.junit.runner.RunWith;
+
+import org.apache.geode.cache.execute.Function;
+import org.apache.geode.cache.execute.FunctionContext;
+import org.apache.geode.cache.execute.FunctionException;
+import org.apache.geode.cache.execute.ResultCollector;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.distributed.internal.DistributionManager;
+import org.apache.geode.internal.cache.PartitionedRegion;
+import org.apache.geode.internal.cache.PartitionedRegionDataStore;
+import org.apache.geode.internal.cache.execute.metrics.FunctionStats;
+import org.apache.geode.test.junit.runners.GeodeParamsRunner;
+
+@RunWith(GeodeParamsRunner.class)
+public class PartitionedRegionFunctionResultSenderTest {
+  private DistributionManager dm = mock(DistributionManager.class);
+  private PartitionedRegion region = mock(PartitionedRegion.class);
+  private PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class);
+  private ATestResultCollector rc = new ATestResultCollector();
+  private ServerToClientFunctionResultSender serverToClientFunctionResultSender =
+      mock(ServerToClientFunctionResultSender.class);
+  private FunctionStats functionStats = mock(FunctionStats.class);
+
+  enum MethodToInvoke {
+    SEND_EXCEPTION, LAST_RESULT
+  }
+
+  @BeforeEach
+  public void setUp() {
+    when(region.getDataStore()).thenReturn(dataStore);
+    when(dataStore.areAllBucketsHosted(any())).thenReturn(true);
+  }
+
+  @Test
+  public void whenResponseToClientInLastResultFailsEndResultsIsCalled_NotOnlyLocal_OnlyRemote() {
+    doThrow(new FunctionException()).when(serverToClientFunctionResultSender)
+        .lastResult(any(), any());
+    PartitionedRegionFunctionResultSender sender =
+        new PartitionedRegionFunctionResultSender(dm,
+            region, 1, rc, serverToClientFunctionResultSender, false, true, true,
+            new TestFunction(), new int[2], null, (x, y) -> functionStats);
+
+    sender.lastResult(new Object(), true, rc, null);
+
+    assertThat(rc.isEndResultsCalled()).isEqualTo(true);
+  }
+
+  @ParameterizedTest(name = "{displayName} with {arguments}")
+  @EnumSource(MethodToInvoke.class)
+  public void whenResponseToClientInLastResultFailsEndResultsIsCalled_OnlyLocal_NotOnlyRemote(
+      MethodToInvoke methodToInvoke) {
+    doThrow(new FunctionException("IOException")).when(serverToClientFunctionResultSender)
+        .lastResult(any(), any());
+
+    PartitionedRegionFunctionResultSender sender =
+        new PartitionedRegionFunctionResultSender(dm,
+            region, 1, rc, serverToClientFunctionResultSender, true, false, true,
+            new TestFunction(), new int[2]);
+
+    if (methodToInvoke == LAST_RESULT) {
+      sender.lastResult(new Object());
+    } else {
+      sender.sendException(new Exception());
+    }
+
+    assertThat(rc.isEndResultsCalled()).isEqualTo(true);
+  }
+
+  @ParameterizedTest(name = "{displayName} with {arguments}")
+  @EnumSource(MethodToInvoke.class)
+  public void whenResponseToClientInSendResultFailsEndResultsIsCalled_NotOnlyLocal_OnlyRemote(
+      MethodToInvoke methodToInvoke) {
+    doThrow(new FunctionException("IOException")).when(serverToClientFunctionResultSender)
+        .sendResult(any(), any());
+    PartitionedRegionFunctionResultSender sender =
+        new PartitionedRegionFunctionResultSender(dm,
+            region, 1, rc, serverToClientFunctionResultSender, false, true, true,
+            new TestFunction(), new int[2], null, (x, y) -> functionStats);
+
+    if (methodToInvoke == LAST_RESULT) {
+      sender.lastResult(new Object());
+    } else {
+      sender.sendException(new Exception());
+    }
+
+    assertThat(rc.isEndResultsCalled()).isEqualTo(true);
+  }
+
+  @ParameterizedTest(name = "{displayName} with {arguments}")
+  @EnumSource(MethodToInvoke.class)
+  public void whenResponseToClientInSendResultFailsEndResultsIsCalled_NotOnlyLocal_NotOnlyRemote(
+      MethodToInvoke methodToInvoke) {
+    doThrow(new FunctionException("IOException")).when(serverToClientFunctionResultSender)
+        .sendResult(any(), any());
+    PartitionedRegionFunctionResultSender sender =
+        new PartitionedRegionFunctionResultSender(dm,
+            region, 1, rc, serverToClientFunctionResultSender, false, false, true,
+            new TestFunction(), new int[2], null, (x, y) -> functionStats);
+
+    if (methodToInvoke == LAST_RESULT) {
+      sender.lastResult(new Object(), true, rc, null);
+    } else {
+      sender.sendException(new Exception());
+    }
+
+    assertThat(rc.isEndResultsCalled()).isEqualTo(true);
+  }
+
+  private static class TestFunction implements Function {
+    @Override
+    public void execute(FunctionContext context) {}
+  }
+
+  private static class ATestResultCollector implements ResultCollector {
+    private volatile boolean isEndResultsCalled = false;
+
+    @Override
+    public Object getResult() throws FunctionException {
+      return null;
+    }
+
+    @Override
+    public Object getResult(long timeout, TimeUnit unit)
+        throws FunctionException, InterruptedException {
+      return null;
+    }
+
+    @Override
+    public void addResult(DistributedMember memberID, Object resultOfSingleExecution) {}
+
+    @Override
+    public void endResults() {
+      isEndResultsCalled = true;
+    }
+
+    @Override
+    public void clearResults() {}
+
+    public boolean isEndResultsCalled() {
+      return isEndResultsCalled;
+    }
+  }
+}
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/AbstractGatewaySenderTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/AbstractGatewaySenderTest.java
index aac5f0d..d57ba5f 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/AbstractGatewaySenderTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/AbstractGatewaySenderTest.java
@@ -18,15 +18,28 @@
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
+import java.util.concurrent.CountDownLatch;
 
 import org.junit.Test;
 
+import org.apache.geode.cache.CacheException;
+import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.EntryEvent;
+import org.apache.geode.cache.Operation;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.wan.GatewayQueueEvent;
+import org.apache.geode.distributed.internal.DistributionAdvisor;
+import org.apache.geode.internal.cache.EntryEventImpl;
+import org.apache.geode.internal.cache.EnumListenerEvent;
+import org.apache.geode.internal.cache.InternalRegion;
+import org.apache.geode.internal.cache.KeyInfo;
 import org.apache.geode.internal.cache.RegionQueue;
 
 public class AbstractGatewaySenderTest {
@@ -58,4 +71,161 @@
 
     assertThat(event).isSameAs(gatewaySenderEvent);
   }
+
+  @Test
+  public void distributeFinishesWorkWhenInterrupted() throws InterruptedException {
+    DummyGatewaySenderEventProcessor processor = new DummyGatewaySenderEventProcessor();
+    TestableGatewaySender gatewaySender = new TestableGatewaySender(processor);
+    EnumListenerEvent operationType = EnumListenerEvent.AFTER_CREATE;
+    EntryEventImpl event = mock(EntryEventImpl.class);
+    when(event.getKeyInfo()).thenReturn(mock(KeyInfo.class));
+    Operation operation = mock(Operation.class);
+    when(operation.isLocal()).thenReturn(false);
+    when(operation.isExpiration()).thenReturn(false);
+    when(event.getOperation()).thenReturn(operation);
+    InternalRegion region = mock(InternalRegion.class);
+    when(region.getDataPolicy()).thenReturn(DataPolicy.PARTITION);
+    when(event.getRegion()).thenReturn(region);
+    List<Integer> allRemoteDSIds = Collections.singletonList(1);
+
+    CountDownLatch lockAcquiredLatch = new CountDownLatch(1);
+    CountDownLatch unlockLatch = new CountDownLatch(1);
+
+    // Get lifeCycleLock in write mode in new thread so that
+    // the thread calling distribute will not be able
+    // to acquire it
+    Thread thread = new Thread(() -> {
+      gatewaySender.getLifeCycleLock().writeLock().lock();
+      lockAcquiredLatch.countDown();
+      try {
+        unlockLatch.await();
+      } catch (InterruptedException ignore) {
+      }
+      gatewaySender.getLifeCycleLock().writeLock().unlock();
+    });
+    thread.start();
+    lockAcquiredLatch.await();
+
+    // Send interrupted and then call distribute
+    Thread.currentThread().interrupt();
+    gatewaySender.distribute(operationType, event, allRemoteDSIds, true);
+
+    unlockLatch.countDown();
+
+    // Check that the interrupted exception has been reset
+    assertThat(Thread.currentThread().isInterrupted()).isTrue();
+    // Check that the work was finished even if the interrupt signal was set
+    assertThat(processor.getTimesRegisterEventDroppedInPrimaryQueueCalled()).isEqualTo(1);
+  }
+
+  public static class TestableGatewaySender extends AbstractGatewaySender {
+    private int isRunningTimesCalled = 0;
+
+    public TestableGatewaySender(AbstractGatewaySenderEventProcessor eventProcessor) {
+      this.eventProcessor = eventProcessor;
+      enqueuedAllTempQueueEvents = true;
+    }
+
+    @Override
+    public void fillInProfile(DistributionAdvisor.Profile profile) {}
+
+    @Override
+    public void start() {}
+
+    @Override
+    public boolean isPrimary() {
+      return true;
+    }
+
+    @Override
+    public void startWithCleanQueue() {}
+
+    @Override
+    public void stop() {}
+
+    @Override
+    public void setModifiedEventId(EntryEventImpl clonedEvent) {}
+
+    @Override
+    public GatewaySenderStats getStatistics() {
+      return mock(GatewaySenderStats.class);
+    }
+
+    @Override
+    public GatewaySenderAdvisor getSenderAdvisor() {
+      return mock(GatewaySenderAdvisor.class);
+    }
+
+    @Override
+    public boolean isRunning() {
+      if (isRunningTimesCalled++ == 0) {
+        return true;
+      }
+      return false;
+    }
+
+    @Override
+    public String getId() {
+      return "test";
+    }
+  }
+
+  public static class DummyGatewaySenderEventProcessor extends AbstractGatewaySenderEventProcessor {
+
+    private int timesEnqueueEventCalled = 0;
+    private int timesRegisterEventDroppedInPrimaryQueueCalled = 0;
+
+    public DummyGatewaySenderEventProcessor() {
+      super("", new DummyGatewaySender(), null);
+    }
+
+    @Override
+    public void enqueueEvent(EnumListenerEvent operation, EntryEvent event, Object substituteValue,
+        boolean isLastEventInTransaction) throws IOException, CacheException {
+      timesEnqueueEventCalled++;
+    }
+
+    public int getTimesEnqueueEventCalled() {
+      return timesEnqueueEventCalled;
+    }
+
+    @Override
+    protected void initializeMessageQueue(String id, boolean cleanQueues) {}
+
+    @Override
+    protected void rebalance() {}
+
+    public int getTimesRegisterEventDroppedInPrimaryQueueCalled() {
+      return timesRegisterEventDroppedInPrimaryQueueCalled;
+    }
+
+    @Override
+    protected void registerEventDroppedInPrimaryQueue(EntryEventImpl droppedEvent) {
+      timesRegisterEventDroppedInPrimaryQueueCalled++;
+    }
+
+    @Override
+    public void initializeEventDispatcher() {}
+
+    @Override
+    protected void enqueueEvent(GatewayQueueEvent event) {}
+  }
+
+  public static class DummyGatewaySender extends AbstractGatewaySender {
+    @Override
+    public void fillInProfile(DistributionAdvisor.Profile profile) {}
+
+    @Override
+    public void start() {}
+
+    @Override
+    public void startWithCleanQueue() {}
+
+    @Override
+    public void stop() {}
+
+    @Override
+    public void setModifiedEventId(EntryEventImpl clonedEvent) {}
+
+  }
 }
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/GatewaySenderEventImplTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/GatewaySenderEventImplTest.java
index cec3e4b..cf1f5d1 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/GatewaySenderEventImplTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/GatewaySenderEventImplTest.java
@@ -33,13 +33,15 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.util.stream.Stream;
 
-import junitparams.Parameters;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-import org.junit.runner.RunWith;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInfo;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.CsvSource;
+import org.junit.jupiter.params.provider.MethodSource;
 
 import org.apache.geode.cache.Operation;
 import org.apache.geode.cache.TransactionId;
@@ -61,18 +63,16 @@
 import org.apache.geode.internal.serialization.VersionedDataOutputStream;
 import org.apache.geode.internal.util.BlobHelper;
 import org.apache.geode.test.fake.Fakes;
-import org.apache.geode.test.junit.runners.GeodeParamsRunner;
 
-@RunWith(GeodeParamsRunner.class)
 public class GatewaySenderEventImplTest {
 
   private GemFireCacheImpl cache;
 
-  @Rule
-  public TestName testName = new TestName();
+  private String testName;
 
-  @Before
-  public void setUpGemFire() {
+  @BeforeEach
+  public void setUpGemFire(TestInfo testInfo) {
+    testName = testInfo.getDisplayName();
     createCache();
   }
 
@@ -110,8 +110,8 @@
     assertThat(gatewaySenderEvent.getTransactionId()).isNotNull();
   }
 
-  @Test
-  @Parameters(method = "getVersionsAndExpectedInvocations")
+  @ParameterizedTest
+  @MethodSource("getVersionsAndExpectedInvocations")
   public void testSerializingDataFromCurrentVersionToOldVersion(VersionAndExpectedInvocations vaei)
       throws IOException {
     GatewaySenderEventImpl gatewaySenderEvent = spy(GatewaySenderEventImpl.class);
@@ -129,8 +129,8 @@
         any());
   }
 
-  @Test
-  @Parameters(method = "getVersionsAndExpectedInvocations")
+  @ParameterizedTest
+  @MethodSource("getVersionsAndExpectedInvocations")
   public void testDeserializingDataFromOldVersionToCurrentVersion(
       VersionAndExpectedInvocations vaei)
       throws IOException, ClassNotFoundException {
@@ -151,18 +151,17 @@
         any());
   }
 
-  private VersionAndExpectedInvocations[] getVersionsAndExpectedInvocations() {
-    return new VersionAndExpectedInvocations[] {
-        new VersionAndExpectedInvocations(GEODE_1_8_0, 1, 0, 0),
-        new VersionAndExpectedInvocations(GEODE_1_13_0, 1, 1, 0),
-        new VersionAndExpectedInvocations(GEODE_1_14_0, 1, 1, 1)
-    };
+  private static Stream<Arguments> getVersionsAndExpectedInvocations() {
+    return Stream.of(
+        Arguments.of(new VersionAndExpectedInvocations(GEODE_1_8_0, 1, 0, 0)),
+        Arguments.of(new VersionAndExpectedInvocations(GEODE_1_13_0, 1, 1, 0)),
+        Arguments.of(new VersionAndExpectedInvocations(GEODE_1_14_0, 1, 1, 1)));
   }
 
   @Test
   public void testEquality() throws Exception {
     LocalRegion region = mock(LocalRegion.class);
-    when(region.getFullPath()).thenReturn(testName.getMethodName() + "_region");
+    when(region.getFullPath()).thenReturn(testName + "_region");
     when(region.getCache()).thenReturn(cache);
     Object event = ParallelGatewaySenderHelper.createGatewaySenderEvent(region, Operation.CREATE,
         "key1", "value1", 0, 0, 0, 0);
@@ -209,7 +208,7 @@
     assertThat(event).isNotEqualTo(eventDifferentValue);
 
     LocalRegion region2 = mock(LocalRegion.class);
-    when(region2.getFullPath()).thenReturn(testName.getMethodName() + "_region2");
+    when(region2.getFullPath()).thenReturn(testName + "_region2");
     when(region2.getCache()).thenReturn(cache);
     Object eventDifferentRegion =
         ParallelGatewaySenderHelper.createGatewaySenderEvent(region2, Operation.CREATE,
@@ -221,7 +220,7 @@
   public void testSerialization() throws Exception {
     // Set up test
     LocalRegion region = mock(LocalRegion.class);
-    when(region.getFullPath()).thenReturn(testName.getMethodName() + "_region");
+    when(region.getFullPath()).thenReturn(testName + "_region");
     when(region.getCache()).thenReturn(cache);
     TXId txId = new TXId(cache.getMyId(), 0);
     when(region.getTXId()).thenReturn(txId);
@@ -348,12 +347,13 @@
     return cacheEvent;
   }
 
-  @Parameters({"true, true", "true, false", "false, false"})
+  @ParameterizedTest
+  @CsvSource({"true,true", "true,false", "false,false"})
   public void testCreation_WithAfterUpdateWithGenerateCallbacks(boolean isGenerateCallbacks,
       boolean isCallbackArgumentNull)
       throws IOException {
-    InternalRegion region = mock(InternalRegion.class);
-    when(region.getFullPath()).thenReturn(testName.getMethodName() + "_region");
+    InternalRegion region = mock(LocalRegion.class);
+    when(region.getFullPath()).thenReturn(testName + "_region");
 
     Operation operation = mock(Operation.class);
     when(operation.isLocalLoad()).thenReturn(true);
@@ -377,6 +377,37 @@
     assertThat(event.getAction()).isEqualTo(action);
   }
 
+  @Test
+  public void testShouldNotBeConflatedCreate() throws IOException {
+    final EntryEventImpl cacheEvent = mockEntryEventImpl(mock(TransactionId.class));
+
+    final GatewaySenderEventImpl gatewaySenderEvent =
+        new GatewaySenderEventImpl(EnumListenerEvent.AFTER_CREATE, cacheEvent, null, INCLUDE);
+
+    assertThat(gatewaySenderEvent.shouldBeConflated()).isFalse();
+  }
+
+  @Test
+  public void testShouldBeConflatedUpdate() throws IOException {
+    final EntryEventImpl cacheEvent = mockEntryEventImpl(mock(TransactionId.class));
+
+    final GatewaySenderEventImpl gatewaySenderEvent =
+        new GatewaySenderEventImpl(EnumListenerEvent.AFTER_UPDATE, cacheEvent, null, INCLUDE);
+
+    assertThat(gatewaySenderEvent.shouldBeConflated()).isTrue();
+  }
+
+  @Test
+  public void testShouldNotBeConflatedUpdateConcurrentConflict() throws IOException {
+    final EntryEventImpl cacheEvent = mockEntryEventImpl(mock(TransactionId.class));
+    when(cacheEvent.isConcurrencyConflict()).thenReturn(true);
+
+    final GatewaySenderEventImpl gatewaySenderEvent =
+        new GatewaySenderEventImpl(EnumListenerEvent.AFTER_UPDATE, cacheEvent, null, INCLUDE);
+
+    assertThat(gatewaySenderEvent.shouldBeConflated()).isFalse();
+  }
+
   public static class VersionAndExpectedInvocations {
 
     private final KnownVersion version;
diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/serial/SerialGatewaySenderQueueJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/serial/SerialGatewaySenderQueueJUnitTest.java
index 88ec275..46f390a 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/serial/SerialGatewaySenderQueueJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/serial/SerialGatewaySenderQueueJUnitTest.java
@@ -15,7 +15,6 @@
 package org.apache.geode.internal.cache.wan.serial;
 
 import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.Assert.assertEquals;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
@@ -111,9 +110,9 @@
     queue.setGroupTransactionEvents(true);
 
     List<AsyncEvent<?, ?>> peeked = queue.peek(3, 100);
-    assertEquals(4, peeked.size());
+    assertThat(peeked.size()).isEqualTo(4);
     List<AsyncEvent<?, ?>> peekedAfter = queue.peek(3, 100);
-    assertEquals(3, peekedAfter.size());
+    assertThat(peekedAfter.size()).isEqualTo(3);
   }
 
   @Test
@@ -146,7 +145,7 @@
             .when(queue).getElementsMatching(any(), any(), anyLong());
 
     List<AsyncEvent<?, ?>> peeked = queue.peek(-1, 1);
-    assertEquals(4, peeked.size());
+    assertThat(peeked.size()).isEqualTo(4);
   }
 
   @Test
@@ -155,11 +154,11 @@
         QUEUE_REGION, metaRegionFactory);
 
     List<AsyncEvent<?, ?>> peeked = queue.peek(3, 100);
-    assertEquals(3, peeked.size());
+    assertThat(peeked.size()).isEqualTo(3);
     List<AsyncEvent<?, ?>> peekedAfter = queue.peek(3, 100);
-    assertEquals(3, peekedAfter.size());
+    assertThat(peekedAfter.size()).isEqualTo(3);
     peekedAfter = queue.peek(1, 100);
-    assertEquals(1, peekedAfter.size());
+    assertThat(peekedAfter.size()).isEqualTo(1);
   }
 
   @Test
@@ -192,7 +191,7 @@
             .when(queue).getElementsMatching(any(), any(), anyLong());
 
     List<AsyncEvent<?, ?>> peeked = queue.peek(-1, 1);
-    assertEquals(3, peeked.size());
+    assertThat(peeked.size()).isEqualTo(3);
   }
 
   @Test
@@ -216,24 +215,23 @@
         QUEUE_REGION, metaRegionFactory);
     queue.setGroupTransactionEvents(true);
     List<AsyncEvent<?, ?>> peeked = queue.peek(3, -1);
-    assertEquals(4, peeked.size());
+    assertThat(peeked.size()).isEqualTo(4);
     assertThat(queue.getLastPeekedId()).isEqualTo(2);
-    assertThat(queue.getExtraPeekedIds().contains(5L)).isTrue();
-
+    assertThat(queue.getExtraPeekedIds()).contains(5L);
 
     for (Object ignored : peeked) {
       queue.remove();
     }
-    assertThat(queue.getExtraPeekedIds().contains(5L)).isTrue();
+    assertThat(queue.getExtraPeekedIds()).contains(5L);
 
     peeked = queue.peek(3, -1);
-    assertEquals(3, peeked.size());
-    assertThat(queue.getExtraPeekedIds().contains(5L)).isTrue();
+    assertThat(peeked.size()).isEqualTo(3);
+    assertThat(queue.getExtraPeekedIds()).contains(5L);
 
     for (Object ignored : peeked) {
       queue.remove();
     }
-    assertThat(queue.getExtraPeekedIds().contains(5L)).isFalse();
+    assertThat(queue.getExtraPeekedIds()).doesNotContain(5L);
   }
 
   private GatewaySenderEventImpl createMockGatewaySenderEventImpl(int transactionId,
diff --git a/geode-core/src/test/java/org/apache/geode/internal/offheap/MemoryAllocatorJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/offheap/MemoryAllocatorJUnitTest.java
index 6de0312..b080b7d 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/offheap/MemoryAllocatorJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/offheap/MemoryAllocatorJUnitTest.java
@@ -14,6 +14,7 @@
  */
 package org.apache.geode.internal.offheap;
 
+import static org.apache.geode.internal.offheap.MemoryAllocatorImpl.DummyNonRealTimeStatsUpdater;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -72,9 +73,9 @@
       NullOutOfOffHeapMemoryListener listener = new NullOutOfOffHeapMemoryListener();
       NullOffHeapMemoryStats stats = new NullOffHeapMemoryStats();
       try {
-        MemoryAllocatorImpl.createForUnitTest(listener, stats, 10, 950, 100, size -> {
+        MemoryAllocatorImpl.create(listener, stats, 10, 950, 100, null, size -> {
           throw new OutOfMemoryError("expected");
-        });
+        }, null, () -> new DummyNonRealTimeStatsUpdater());
       } catch (OutOfMemoryError expected) {
       }
       assertTrue(listener.isClosed());
@@ -98,7 +99,8 @@
             }
           }
         };
-        MemoryAllocatorImpl.createForUnitTest(listener, stats, 10, 950, MAX_SLAB_SIZE, factory);
+        MemoryAllocatorImpl.create(listener, stats, 10, 950, MAX_SLAB_SIZE, null, factory, null,
+            () -> new MemoryAllocatorImpl.DummyNonRealTimeStatsUpdater());
       } catch (OutOfMemoryError expected) {
       }
       assertTrue(listener.isClosed());
@@ -109,7 +111,8 @@
       NullOffHeapMemoryStats stats = new NullOffHeapMemoryStats();
       SlabFactory factory = SlabImpl::new;
       MemoryAllocator ma =
-          MemoryAllocatorImpl.createForUnitTest(listener, stats, 10, 950, 100, factory);
+          MemoryAllocatorImpl.create(listener, stats, 10, 950, 100, null, factory, null,
+              () -> new DummyNonRealTimeStatsUpdater());
       try {
         assertFalse(listener.isClosed());
         assertFalse(stats.isClosed());
@@ -135,7 +138,8 @@
         listener = new NullOutOfOffHeapMemoryListener();
         stats2 = new NullOffHeapMemoryStats();
         MemoryAllocator ma2 =
-            MemoryAllocatorImpl.createForUnitTest(listener, stats2, 10, 950, 100, factory);
+            MemoryAllocatorImpl.create(listener, stats2, 10, 950, 100, null, factory, null,
+                () -> new DummyNonRealTimeStatsUpdater());
         assertSame(ma, ma2);
         assertTrue(stats.isClosed());
         assertFalse(listener.isClosed());
diff --git a/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapHelperJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapHelperJUnitTest.java
index 7f5fcdc..8d649d0 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapHelperJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapHelperJUnitTest.java
@@ -14,6 +14,7 @@
  */
 package org.apache.geode.internal.offheap;
 
+import static org.apache.geode.internal.offheap.MemoryAllocatorImpl.DummyNonRealTimeStatsUpdater;
 import static org.hamcrest.CoreMatchers.equalTo;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.not;
@@ -46,7 +47,7 @@
     OffHeapMemoryStats stats = mock(OffHeapMemoryStats.class);
 
     ma = MemoryAllocatorImpl.create(ooohml, stats, 3, OffHeapStorage.MIN_SLAB_SIZE * 3,
-        OffHeapStorage.MIN_SLAB_SIZE);
+        OffHeapStorage.MIN_SLAB_SIZE, null, () -> new DummyNonRealTimeStatsUpdater());
   }
 
   /**
diff --git a/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapRegionEntryHelperInstanceTest.java b/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapRegionEntryHelperInstanceTest.java
index d32cb8b..48f6e87 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapRegionEntryHelperInstanceTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapRegionEntryHelperInstanceTest.java
@@ -55,6 +55,7 @@
 import org.apache.geode.internal.cache.entries.DiskEntry;
 import org.apache.geode.internal.cache.entries.OffHeapRegionEntry;
 import org.apache.geode.internal.cache.entries.VersionedStatsDiskRegionEntryOffHeap;
+import org.apache.geode.internal.offheap.MemoryAllocatorImpl.DummyNonRealTimeStatsUpdater;
 import org.apache.geode.internal.serialization.DSCODE;
 
 public class OffHeapRegionEntryHelperInstanceTest {
@@ -81,7 +82,7 @@
 
     memoryAllocator =
         MemoryAllocatorImpl.create(listener, stats, 1, OffHeapStorage.MIN_SLAB_SIZE,
-            OffHeapStorage.MIN_SLAB_SIZE);
+            OffHeapStorage.MIN_SLAB_SIZE, null, () -> new DummyNonRealTimeStatsUpdater());
 
     offHeapRegionEntryHelperInstance =
         spy(new OffHeapRegionEntryHelperInstance(ohAddress -> offHeapStoredObject,
diff --git a/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapStorageJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapStorageJUnitTest.java
index f940bca..9f358c9 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapStorageJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapStorageJUnitTest.java
@@ -30,6 +30,7 @@
 import org.apache.geode.distributed.internal.DistributionStats;
 import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.distributed.internal.InternalLocator;
+import org.apache.geode.internal.offheap.MemoryAllocatorImpl.DummyNonRealTimeStatsUpdater;
 import org.apache.geode.internal.statistics.LocalStatisticsFactory;
 import org.apache.geode.util.internal.GeodeGlossary;
 
@@ -167,7 +168,8 @@
     StatisticsFactory localStatsFactory = new LocalStatisticsFactory(null);
     OutOfOffHeapMemoryListener ooohml = mock(OutOfOffHeapMemoryListener.class);
     MemoryAllocator ma =
-        OffHeapStorage.basicCreateOffHeapStorage(localStatsFactory, 1024 * 1024, ooohml);
+        OffHeapStorage.basicCreateOffHeapStorage(localStatsFactory, 1024 * 1024, ooohml, null,
+            () -> new DummyNonRealTimeStatsUpdater());
     try {
       OffHeapMemoryStats stats = ma.getStats();
       assertNotNull(stats.getStats());
diff --git a/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapStorageNonRuntimeStatsJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapStorageNonRuntimeStatsJUnitTest.java
index 2aecc7b..0dd651f 100755
--- a/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapStorageNonRuntimeStatsJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapStorageNonRuntimeStatsJUnitTest.java
@@ -30,7 +30,8 @@
     StatisticsFactory localStatsFactory = new LocalStatisticsFactory(null);
     OutOfOffHeapMemoryListener ooohml = mock(OutOfOffHeapMemoryListener.class);
     MemoryAllocator ma =
-        OffHeapStorage.basicCreateOffHeapStorage(localStatsFactory, 1024 * 1024, ooohml, 100);
+        OffHeapStorage.basicCreateOffHeapStorage(localStatsFactory, 1024 * 1024, ooohml, () -> 100,
+            null);
     try {
       OffHeapMemoryStats stats = ma.getStats();
 
diff --git a/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapStoredObjectJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapStoredObjectJUnitTest.java
index 2801c6d..2fc6a65 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapStoredObjectJUnitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/offheap/OffHeapStoredObjectJUnitTest.java
@@ -42,6 +42,7 @@
 import org.apache.geode.internal.cache.CachePerfStats;
 import org.apache.geode.internal.cache.EntryEventImpl;
 import org.apache.geode.internal.cache.RegionEntryContext;
+import org.apache.geode.internal.offheap.MemoryAllocatorImpl.DummyNonRealTimeStatsUpdater;
 import org.apache.geode.internal.offheap.MemoryBlock.State;
 import org.apache.geode.internal.serialization.DSCODE;
 import org.apache.geode.internal.serialization.KnownVersion;
@@ -74,7 +75,7 @@
     OffHeapMemoryStats stats = mock(OffHeapMemoryStats.class);
 
     ma = MemoryAllocatorImpl.create(ooohml, stats, 3, OffHeapStorage.MIN_SLAB_SIZE * 3,
-        OffHeapStorage.MIN_SLAB_SIZE);
+        OffHeapStorage.MIN_SLAB_SIZE, null, () -> new DummyNonRealTimeStatsUpdater());
   }
 
   @After
diff --git a/geode-core/src/test/java/org/apache/geode/internal/tcp/ConnectionTransmissionTest.java b/geode-core/src/test/java/org/apache/geode/internal/tcp/ConnectionTransmissionTest.java
index 5a041eb..906a021 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/tcp/ConnectionTransmissionTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/tcp/ConnectionTransmissionTest.java
@@ -168,7 +168,7 @@
     senderAddr.setDirectChannelPort(conduit.getPort());
 
     return spy(Connection.createSender(membership, writerTable, true, remoteAddr, true,
-        System.currentTimeMillis(), 1000, 1000));
+        System.currentTimeMillis(), 1000, 1000, false));
   }
 
   private Connection createReceiverConnectionOnFirstAccept(final ServerSocketChannel acceptorSocket,
diff --git a/geode-core/src/test/java/org/apache/geode/internal/tcp/TCPConduitTest.java b/geode-core/src/test/java/org/apache/geode/internal/tcp/TCPConduitTest.java
index 392a599..e1b3ddf 100644
--- a/geode-core/src/test/java/org/apache/geode/internal/tcp/TCPConduitTest.java
+++ b/geode-core/src/test/java/org/apache/geode/internal/tcp/TCPConduitTest.java
@@ -94,7 +94,8 @@
             TCPConduit -> connectionTable, socketCreator, doNothing(), false);
     InternalDistributedMember member = mock(InternalDistributedMember.class);
     doThrow(new IOException("Cannot form connection to alert listener"))
-        .when(connectionTable).get(eq(member), anyBoolean(), anyLong(), anyLong(), anyLong());
+        .when(connectionTable)
+        .get(eq(member), anyBoolean(), anyLong(), anyLong(), anyLong(), anyBoolean());
     when(membership.memberExists(eq(member)))
         .thenReturn(true);
     when(membership.isShunned(same(member)))
@@ -102,7 +103,7 @@
 
     AlertingAction.execute(() -> {
       Throwable thrown = catchThrowable(() -> {
-        tcpConduit.getConnection(member, false, false, 0L, 0L, 0L);
+        tcpConduit.getConnection(member, false, 0L, 0L, 0L);
       });
 
       assertThat(thrown)
@@ -123,13 +124,14 @@
     doThrow(new IOException("Cannot form connection to alert listener"))
         // getConnection will loop indefinitely until connectionTable returns connection
         .doReturn(connection)
-        .when(connectionTable).get(eq(member), anyBoolean(), anyLong(), anyLong(), anyLong());
+        .when(connectionTable)
+        .get(eq(member), anyBoolean(), anyLong(), anyLong(), anyLong(), anyBoolean());
     when(membership.memberExists(eq(member)))
         .thenReturn(true);
     when(membership.isShunned(same(member)))
         .thenReturn(false);
 
-    Connection value = tcpConduit.getConnection(member, false, false, 0L, 0L, 0L);
+    Connection value = tcpConduit.getConnection(member, false, 0L, 0L, 0L);
 
     assertThat(value)
         .isSameAs(connection);
@@ -143,12 +145,13 @@
             TCPConduit -> connectionTable, socketCreator, doNothing(), false);
     InternalDistributedMember member = mock(InternalDistributedMember.class);
     doThrow(new IOException("Cannot form connection to alert listener"))
-        .when(connectionTable).get(eq(member), anyBoolean(), anyLong(), anyLong(), anyLong());
+        .when(connectionTable)
+        .get(eq(member), anyBoolean(), anyLong(), anyLong(), anyLong(), anyBoolean());
     when(membership.memberExists(eq(member)))
         .thenReturn(false);
 
     Throwable thrown = catchThrowable(() -> {
-      tcpConduit.getConnection(member, false, false, 0L, 0L, 0L);
+      tcpConduit.getConnection(member, false, 0L, 0L, 0L);
     });
 
     assertThat(thrown)
@@ -164,14 +167,15 @@
             TCPConduit -> connectionTable, socketCreator, doNothing(), false);
     InternalDistributedMember member = mock(InternalDistributedMember.class);
     doThrow(new IOException("Cannot form connection to alert listener"))
-        .when(connectionTable).get(same(member), anyBoolean(), anyLong(), anyLong(), anyLong());
+        .when(connectionTable)
+        .get(same(member), anyBoolean(), anyLong(), anyLong(), anyLong(), anyBoolean());
     when(membership.memberExists(same(member)))
         .thenReturn(true);
     when(membership.isShunned(same(member)))
         .thenReturn(true);
 
     Throwable thrown = catchThrowable(() -> {
-      tcpConduit.getConnection(member, false, false, 0L, 0L, 0L);
+      tcpConduit.getConnection(member, false, 0L, 0L, 0L);
     });
 
     assertThat(thrown)
@@ -188,7 +192,8 @@
             TCPConduit -> connectionTable, socketCreator, doNothing(), false);
     InternalDistributedMember member = mock(InternalDistributedMember.class);
     doThrow(new IOException("Cannot form connection to alert listener"))
-        .when(connectionTable).get(same(member), anyBoolean(), anyLong(), anyLong(), anyLong());
+        .when(connectionTable)
+        .get(same(member), anyBoolean(), anyLong(), anyLong(), anyLong(), anyBoolean());
     when(membership.memberExists(same(member)))
         .thenReturn(true);
     when(membership.isShunned(same(member)))
@@ -197,7 +202,7 @@
         .thenReturn(true);
 
     Throwable thrown = catchThrowable(() -> {
-      tcpConduit.getConnection(member, false, false, 0L, 0L, 0L);
+      tcpConduit.getConnection(member, false, 0L, 0L, 0L);
     });
 
     assertThat(thrown)
@@ -214,7 +219,8 @@
             TCPConduit -> connectionTable, socketCreator, doNothing(), false);
     InternalDistributedMember member = mock(InternalDistributedMember.class);
     doThrow(new IOException("Cannot form connection to alert listener"))
-        .when(connectionTable).get(same(member), anyBoolean(), anyLong(), anyLong(), anyLong());
+        .when(connectionTable)
+        .get(same(member), anyBoolean(), anyLong(), anyLong(), anyLong(), anyBoolean());
     when(membership.memberExists(same(member)))
         .thenReturn(true);
     when(membership.isShunned(same(member)))
@@ -223,7 +229,7 @@
         .thenReturn(true);
 
     Throwable thrown = catchThrowable(() -> {
-      tcpConduit.getConnection(member, false, false, 0L, 0L, 0L);
+      tcpConduit.getConnection(member, false, 0L, 0L, 0L);
     });
 
     assertThat(thrown)
@@ -231,6 +237,73 @@
         .hasMessage("Abandoned because shutdown is in progress");
   }
 
+  @Test
+  public void getFirstScanForConnectionThrowsAlertingIOException_ifCaughtIOException_whileAlerting()
+      throws Exception {
+    TCPConduit tcpConduit =
+        new TCPConduit(membership, 0, localHost, false, directChannel, mock(BufferPool.class),
+            new Properties(),
+            TCPConduit -> connectionTable, socketCreator, doNothing(), false);
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    doThrow(new IOException("Cannot form connection to alert listener"))
+        .when(connectionTable)
+        .get(eq(member), anyBoolean(), anyLong(), anyLong(), anyLong(), anyBoolean());
+
+    AlertingAction.execute(() -> {
+      Throwable thrown = catchThrowable(() -> {
+        tcpConduit.getFirstScanForConnection(member, false, 0L, 0L, 0L);
+      });
+
+      assertThat(thrown)
+          .isInstanceOf(AlertingIOException.class);
+    });
+  }
+
+  @Test
+  public void getFirstScanForConnectionRethrows_ifCaughtIOException_whileNotAlerting()
+      throws Exception {
+    TCPConduit tcpConduit =
+        new TCPConduit(membership, 0, localHost, false, directChannel, mock(BufferPool.class),
+            new Properties(),
+            TCPConduit -> connectionTable, socketCreator, doNothing(), false);
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    Connection connection = mock(Connection.class);
+    doThrow(new IOException("Connection not created in first try"))
+        .doReturn(connection)
+        .when(connectionTable)
+        .get(eq(member), anyBoolean(), anyLong(), anyLong(), anyLong(), anyBoolean());
+
+    Throwable thrown = catchThrowable(() -> {
+      tcpConduit.getFirstScanForConnection(member, false, 0L, 0L, 0L);
+    });
+
+    assertThat(thrown)
+        .isInstanceOf(IOException.class);
+  }
+
+
+  @Test
+  public void getFirstScanForConnectionRethrows_ifCaughtIOException_whithoutMessage()
+      throws Exception {
+    TCPConduit tcpConduit =
+        new TCPConduit(membership, 0, localHost, false, directChannel, mock(BufferPool.class),
+            new Properties(),
+            TCPConduit -> connectionTable, socketCreator, doNothing(), false);
+    InternalDistributedMember member = mock(InternalDistributedMember.class);
+    Connection connection = mock(Connection.class);
+    doThrow(new IOException())
+        .doReturn(connection)
+        .when(connectionTable)
+        .get(eq(member), anyBoolean(), anyLong(), anyLong(), anyLong(), anyBoolean());
+
+    Throwable thrown = catchThrowable(() -> {
+      tcpConduit.getFirstScanForConnection(member, false, 0L, 0L, 0L);
+    });
+
+    assertThat(thrown)
+        .isInstanceOf(IOException.class);
+  }
+
   private Runnable doNothing() {
     return () -> {
       // nothing
diff --git a/geode-docs/developing/events/conflate_multisite_gateway_queue.html.md.erb b/geode-docs/developing/events/conflate_multisite_gateway_queue.html.md.erb
index 5fa8df9..4d2ed5f 100644
--- a/geode-docs/developing/events/conflate_multisite_gateway_queue.html.md.erb
+++ b/geode-docs/developing/events/conflate_multisite_gateway_queue.html.md.erb
@@ -25,9 +25,9 @@
 **Note:**
 Do not use conflation if your receiving applications depend on the specific ordering of entry modifications, or if they need to be notified of every change to an entry.
 
-Conflation is most useful when a single entry is updated frequently, but other sites only need to know the current value of the entry (rather than the value of each update). When an update is added to a queue that has conflation enabled, if there is already an update message in the queue for the entry key, then the existing message assumes the value of the new update and the new update is dropped, as shown here for key A.
+Conflation is most useful when a single entry is updated frequently, but other sites only need to know the current value of the entry (rather than the value of each update). When an update is added to a queue that has conflation enabled, if there is already an update message in the queue for the entry key, then the existing message is removed and the new update is added to the end of the queue, as shown here for key A.
 
-<img src="../../images/MultiSite-4.gif" id="conflate_multisite_gateway_queue__image_27219DAAB6D643348641389DBAEA1E94" class="image" />
+<img src="../../images_svg/MultiSite-4.svg" id="conflate_multisite_gateway_queue__image_27219DAAB6D643348641389DBAEA1E94" class="image" />
 
 **Note:**
 This method of conflation is different from the one used for server-to-client subscription queue conflation and peer-to-peer distribution within a cluster.
diff --git a/geode-docs/getting_started/system_requirements/host_machine.html.md.erb b/geode-docs/getting_started/system_requirements/host_machine.html.md.erb
index 9ce558d..51d4e86 100644
--- a/geode-docs/getting_started/system_requirements/host_machine.html.md.erb
+++ b/geode-docs/getting_started/system_requirements/host_machine.html.md.erb
@@ -24,7 +24,7 @@
 <a id="system_requirements__section_1E1F206FBC8B4A898A449E0699907A7A"></a>
 Each machine that will run <%=vars.product_name_long%> must meet the following requirements:
 
--   Java SE Development Kit <%=vars.min_java_version%> with update <%=vars.min_java_update%> or a more recent version <%=vars.min_java_version%> update.  The same versions are supported with OpenJDK.
+-   Java SE Development Kit <%=vars.min_java_version%> with update <%=vars.min_java_update%> or a more recent version <%=vars.min_java_version%> update.  The same versions are supported with OpenJDK. See [Java Support](java_support.html) for details.
 -   A system clock set to the correct time and a time synchronization service such as Network Time Protocol (NTP). Correct time stamps permit the following activities:
     -   Logs that are useful for troubleshooting. Synchronized time stamps ensure that log messages from different hosts can be merged to reproduce an accurate chronological history of a distributed run.
     -   Aggregate product-level and application-level time statistics. 
@@ -49,4 +49,4 @@
         sysctl -p
         ```
 
-    See [Disabling TCP SYN Cookies](../../managing/monitor_tune/disabling_tcp_syn_cookies.html) for details.
+    See [Disable TCP SYN Cookies](../../managing/monitor_tune/disabling_tcp_syn_cookies.html) for details.
diff --git a/geode-docs/getting_started/system_requirements/java_support.html.md.erb b/geode-docs/getting_started/system_requirements/java_support.html.md.erb
new file mode 100644
index 0000000..4bb44a4
--- /dev/null
+++ b/geode-docs/getting_started/system_requirements/java_support.html.md.erb
@@ -0,0 +1,151 @@
+---
+title:  Java Support
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+This version of <%=vars.product_name%> requires Java 8 release <%=vars.min_java_update %> or
+a more recent version 8 update.  The same versions are supported with OpenJDK (HotSpot).
+
+<%=vars.product_name%> is also compatible with Open JDK 11 and JDK 17.
+
+The <%=vars.product_name%> product download does not include Java.
+Download and install a supported JRE or JDK on each system running <%=vars.product_name%>.
+VMware recommends the installation of a full JDK (and not just a JRE)
+to obtain better performance with `gfsh status` and `gfsh stop` commands.
+
+The IBM SDK, Java Technology Edition, Version 8 is supported for application clients only. Some
+client region eviction configurations such as `HEAP_LRU` are known to not function properly with
+this SDK.
+
+## <a id="java-modules-and-geode" class="no-quick-link"></a>Java Modules and <%=vars.product_name%>
+
+### <a id="how-jdk-encapsulation-affects-geode" class="no-quick-link"></a>How JDK 17 Encapsulation Affects <%=vars.product_name%>
+
+Beginning with Java 17, the JVM strongly enforces the encapsulation policies introduced in Java 9 by the Java Platform Module System.
+
+Several <%=vars.product_name%> features, such as serialization and eviction, use deep reflection to inspect the Java objects used as keys and values. In Java 17, this deep reflection requires that the object's type be made explicitly available for reflection.
+
+By default, each type declared in a named module is available for reflection only if the declaring module opens the type's package.
+
+As a result, <%=vars.product_name%>, by default, cannot inspect the following encapsulated types and fields:
+- The private and protected types declared in named modules.
+- The private and protected fields of types declared in named modules.
+
+In Java 17, every type defined by the JDK is declared in a named module, and none are opened for reflection by any code outside of the JDK. Therefore, any encapsulated type or field defined by the JDK is, by default, unavailable for reflection.
+
+See [Access Required by <%=vars.product_name%>](#access-required-by-geode) for ways to identify what access <%=vars.product_name%> needs, and [Granting Access to Encapsulated Types](#granting-access-to-encapsulated-types) for ways to grant that access.
+
+### <a id="access-required-by-geode" class="no-quick-link"></a>Access Required by <%=vars.product_name%>
+
+<%=vars.product_name%> may require reflective access to the types used in your application data. 
+
+If <%=vars.product_name%> does not have the access it requires, it throws an exception similar to the following:
+
+```
+java.lang.reflect.InaccessibleObjectException: Unable to make field private final
+java.math.BigInteger java.math.BigDecimal.intVal accessible: module java.base does not 
+"opens java.math" to unnamed module @23a5fd2
+```
+
+#### Access to the Types Used in Application Data
+
+Depending on the <%=vars.product_name%> features that you use, <%=vars.product_name%> may require reflective access to the types used in your application data. This includes:
+
+- The types that define your application data.
+- The types to which your application data holds a reference, whether directly or indirectly.
+
+If any object of your application data is represented by a type declared in a named module, or holds a reference (directly or indirectly) to a type declared in a named module, <%=vars.product_name%> may require explicit access to that type.
+
+> **Note:** Every type defined by the JDK is declared in a named module. Therefore, if any object of your application data is represented by a type defined by the JDK, or holds a reference (directly or indirectly) to a type defined by the JDK, <%=vars.product_name%> may require explicit access to that type.
+
+<%=vars.product_name%> can automatically access types declared in packages loaded from the classpath.
+
+#### Additional Access Required by All <%=vars.product_name%> Processes
+
+<%=vars.product_name%> always requires access to certain JDK packages, regardless of the types of objects used by your application. The following options must always be included when launching locators and servers, and when launching clients and other applications that depend on <%=vars.product_name%>:
+
+```
+--add-exports=java.base/sun.nio.ch=ALL-UNNAMED
+--add-exports=java.management/com.sun.jmx.remote.security=ALL-UNNAMED
+--add-opens=java.base/java.lang=ALL-UNNAMED
+--add-opens=java.base/java.nio=ALL-UNNAMED
+--add-opens=jdk.management/com.sun.management.internal=ALL-UNNAMED
+```
+
+> **Note:** `gfsh` automatically supplies these options whenever it launches a locator or server process.
+
+### <a id="granting-access-to-encapsulated-types" class="no-quick-link"></a>Granting Access to Encapsulated Types
+
+#### Opening Specific Packages
+
+To give <%=vars.product_name%> access to the types in a specific package, use the `--add-opens` command line option when launching a locator, server, client, or other application that uses <%=vars.product_name%>. For example:
+
+```
+--add-opens=java.base/java.math=ALL-UNNAMED
+```
+
+This example allows <%=vars.product_name%> to inspect `BigDecimal` and other types declared in the `java.math` package in the `java.base` module.
+
+Because the JDK loads <%=vars.product_name%> code into an unnamed module, the `--add-opens` option must open the package to `ALL-UNNAMED`.
+
+#### Using an Argument File
+
+Starting with Java 9, the `java` command accepts argument files as an option. The `java` command treats the content of the argument file as arguments for the JVM.
+
+If you must use numerous JVM options to give <%=vars.product_name%> the access it needs, you can combine the arguments into an argument file. For example:
+
+```
+--add-opens=java.base/java.text=ALL-UNNAMED
+--add-opens=java.base/java.time=ALL-UNNAMED
+--add-opens=java.base/java.time.chrono=ALL-UNNAMED
+--add-opens=java.base/java.time.format=ALL-UNNAMED
+--add-opens=java.base/java.time.temporal=ALL-UNNAMED
+--add-opens=java.base/java.time.zone=ALL-UNNAMED
+```
+
+To use an argument file, add the `@` prefix to identify your argument file to the `java` command:
+
+```
+java @path/to/my-argument-file ...
+```
+
+#### Opening All JDK Packages
+
+The <%=vars.product_name%> distribution includes an argument file that opens every package in the Linux version of OpenJDK 17: `path_to_product/config/open-all-jdk-packages-linux-openjdk-17`.
+
+Use the JDK 17 argument file as follows:
+
+-   When launching <%=vars.product_name%> clients and other applications with <%=vars.product_name%> functionality:
+    
+    ```
+    java @path_to_product/config/open-all-jdk-packages-linux-openjdk-17 ...
+    ```
+    
+-   When launching <%=vars.product_name%> members with `gfsh`, add the argument file as a `--J` option:
+    
+    ```
+    start locator --J=@path_to_product/config/open-all-jdk-packages-linux-openjdk-17 ...
+    start server --J=@path_to_product/config/open-all-jdk-packages-linux-openjdk-17 ...
+    ```
+    
+If you use a different JDK, copy and edit this file to add or remove packages to match your JDK.
+
+### <a id="java-17-and-garbage-collection" class="no-quick-link"></a>Java 17 and Garbage Collection
+
+Java 17 does not include the CMS garbage collector. The default garbage collector is now G1. See [Managing Heap Memory](../../managing/heap_use/heap_management.html) for details about configuring <%=vars.product_name%>'s use of the garbage collector.
diff --git a/geode-docs/getting_started/upgrade/upgrade_overview.html.md.erb b/geode-docs/getting_started/upgrade/upgrade_overview.html.md.erb
index cfecad3..899da9b 100644
--- a/geode-docs/getting_started/upgrade/upgrade_overview.html.md.erb
+++ b/geode-docs/getting_started/upgrade/upgrade_overview.html.md.erb
@@ -54,3 +54,53 @@
 
 -   **[Upgrading Clients](upgrade_clients.html)**
 
+    When you upgrade your <%=vars.product_name%> server software, you may need to update your client
+    applications in order to maintain compatibility with the upgraded servers.
+
+## <a id="upgrade_to_115" class="no-quick-link"></a>Upgrading to v1.15
+
+For some users, issues regarding SSL protocols and their default values require a preparatory SSL protocol migration step when upgrading to <%=vars.product_name%> v1.15.
+Please read the following section carefully to determine whether your system requires this additional SSL protocol migration step.
+
+### <a id="is_ssl_protocol_migration_required" class="no-quick-link"></a>Does my System Require SSL Protocol Migration Before Upgrading to <%=vars.product_name%> v1.15?
+
+To determine whether your system requires the SSL protocol migration preparatory step, see if your system meets both of the following conditions:
+
+- If `ssl-endpoint-identification-enabled` is set to `true` AND<br/>
+- If `ssl-protocols` is set to a value other than "any", that is, it specifies a list of specific protocols, but does not include "SSLv2Hello",
+
+THEN your system requires the SSL protocol migration step.
+
+**How do I determine my system's settings for the `ssl-endpoint-identification-enabled` and `ssl-protocols` properties?**
+
+SSL properties may be set in properties files or on the gfsh command line. To determine the settings for these parameters,
+
+- Check `gemfire.properties` and `gfsecurity.properties` for
+`ssl-endpoint-identification-enabled=true`. Also look for `ssl-use-default-context=true`, which sets
+`ssl-endpoint-identification-enabled=true`.
+
+- Search system logs for these properties (using `grep`, for example).
+
+## <a id="preparatory-migration" class="no-quick-link"></a>Preparatory SSL Protocol Migration
+
+The preparatory SSL protocol migration process consists of replacing one property, `ssl-protocols`,
+with two new properties, `ssl-client-protocols` and `ssl-server-protocols`, then removing the old
+`ssl-protocols` definition. Perform this substitution in whatever way the original `ssl-protocols`
+were defined: in `.properties` files or on a command line.
+
+1. If your system is running JDK 8, upgrade to the latest version of JDK 8 before proceeding. This is necessary, even if you plan to
+perform the optional JDK upgrade step to JDK 11 or JDK 17.
+1. Shutdown a member (server or locator).
+2. Install <%=vars.product_name%> 1.15.
+3. Optionally install a new Java JDK.
+4. Add security property `ssl-client-protocols` with the same definition as the old `ssl-protocols` property.
+5. Add security property `ssl-server-protocols` with the same definition as the old `ssl-protocols` property PLUS "SSLv2Hello".
+For example, if the original value of `ssl-protocols` is "TLSv1.2", then define
+    - `ssl-client-protocols="TLSv1.2"`
+    - `ssl-server-protocols="TLSv1.2,SSLv2Hello"`
+6. Start the member.
+7. Verify successful cluster join.
+8. Repeat from step 1 for the next member.
+
+Optionally, after your upgrade is complete, you may restore your original `ssl-protocols` property
+and restart all your members to eliminate the `SSLv2Hello` protocol support.
diff --git a/geode-docs/images/MultiSite-4.gif b/geode-docs/images/MultiSite-4.gif
deleted file mode 100644
index c4a4b7d..0000000
--- a/geode-docs/images/MultiSite-4.gif
+++ /dev/null
Binary files differ
diff --git a/geode-docs/images_svg/MultiSite-4.svg b/geode-docs/images_svg/MultiSite-4.svg
new file mode 100644
index 0000000..e58a4e6
--- /dev/null
+++ b/geode-docs/images_svg/MultiSite-4.svg
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Do not edit this file with editors other than diagrams.net -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="624px" height="391px" viewBox="-0.5 -0.5 624 391" content="&lt;mxfile host=&quot;Electron&quot; modified=&quot;2022-06-01T08:05:30.280Z&quot; agent=&quot;5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/17.2.4 Chrome/96.0.4664.174 Electron/16.1.0 Safari/537.36&quot; etag=&quot;owQXNkmOwgtk-nMk95M2&quot; version=&quot;17.2.4&quot; type=&quot;device&quot;&gt;&lt;diagram id=&quot;C5RBs43oDa-KdzZeNtuy&quot; name=&quot;Page-1&quot;&gt;7VrZdtowEP0aHttjyQvmkSVN06Q5SZMm7aOwBagxFpHFlq+vZMuApTSAG7bAC1hjeSzNnbmeGbtiN/uTc4YGve80xFEFWuGkYrcqEAJgQfEnJVMlsaCbSbqMhEo2F9yRF5xPVNIhCXFSmMgpjTgZFIUBjWMc8IIMMUbHxWkdGhXvOkBdbAjuAhSZ0kcS8l4m9WF1Lv+KSbeX3xl4texMH+WT1U6SHgrpeEFkn1XsJqOUZ0f9SRNH0nq5XR4vpo/R1ZN3/u02eUY/G5f31w+fMmVf1rlktgWGY15aNfPGjfbN1XXz5aJ68+e6c9vET+oSa4SiobKXuAWbVqAXiTs12kwcdeVRwDDi2JRfYjHZaigL8Wlu9nGPcHw3QIEcj4Vvibk93o/ECIhDlAwysDtkgsUClYIRZhxPNOCW7BrMoBBOjGkfp+u3lBYbKKCV//pqx+MFX1Ci3oIb5DKkvK87Uzy3sDhQRl7D4GBVgw8H4RsGr5snHlKV0Pqxt1g4trNXWEADC8N0OA7rkoTEKIhQkpCgaDtGh3EojdayxAhPCP+lzsjj31L+2VWj1mRhWmuqBtktcWiw2CqmFmulQxbgZRFuQrJgc/cVm+cyhiPEyai4tteAUHe4oUSseh59tWL0AVfDMlu+umqRvDRFDtQUQU0RR6yLuaEo9YvZtsu7inPwrrIrF3CBrSEHyrmA6y1RtGEXcA0XuKepefpU8rSVEPmnOYVgUK6xbUS6sfQRgRlmQiB5loh8pa5O9EkYyssbDCfkBbVTVRL+gdxZule3UXFbUteQ0yTLuKTqhDP6hJs0okJvK6ax1NIhUaSJNsXvbtUqIgRMgq+94ml6LL8bwXsGZBcx4QTJCyMapP+9YVv8Pg9x+vAcE7HQNBntSK+n8XEjCqxizDk1A9BZor8VRKtr8bCyzj9J+CAJ1dGCrForx6feMkUb5lPfgPKjlR6waGCYPwl3le/WVrX4h6w9qvsFBjBL77eobB9TyhWqD3+3TKkFoG+Xo0rH1hS5mqINUyUwuwaH5iu7Kz88DTqnnA+43hJFm/YBs1txqj/0+qOYrYq4NTh+q/VH/ow56nS1iIkNS3Kwt0zRpuPPbAG9V/Y0UtnTreEc+5I9AVeLrJyNdpY9md2YehjK6MgwsXIQLE7n5f0xk6NeygPfhBB4W2VHsx45ZTWrkSq0tXh0YTlWhd4SRRtmVWhWQcfDqnqDwHbN9tp2X4it/HbyUHsyehtg9yY30/pDI8EV2gAL30vsQR/AgSVbpnofwK5tt2UK16sn9tFZ9qUPMPOJ/+0DGIo27QNmHXLqA7zdB7B9k+S32geA5ovIW/XCEXWk9U9vHGdlCvCL2EHfwA7Y7wOeGM6/3svic/4RpH32Fw==&lt;/diagram&gt;&lt;/mxfile&gt;" style="background-color: rgb(255, 255, 255);"><defs/><g><rect x="277" y="31" width="80" height="80" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 71px; margin-left: 278px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">entry<br />create<br />Key B</div></div></div></foreignObject><text x="317" y="75" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">entry...</text></switch></g><rect x="394" y="31" width="80" height="80" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 71px; margin-left: 395px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">entry<br />update<br />Key A<br />Value R</div></div></div></foreignObject><text x="434" y="75" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">entry...</text></switch></g><path d="M 357 71 L 380.63 71" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 385.88 71 L 378.88 74.5 L 380.63 71 L 378.88 67.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><path d="M 473 72 L 516.63 72" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 521.88 72 L 514.88 75.5 L 516.63 72 L 514.88 68.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><rect x="530" y="61" width="90" height="20" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 71px; margin-left: 575px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;">To remote site</div></div></div></foreignObject><text x="575" y="75" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">To remote site</text></switch></g><rect x="63" y="0" width="210" height="20" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 10px; margin-left: 168px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;">Initial local hub queue with conflation</div></div></div></foreignObject><text x="168" y="14" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">Initial local hub queue with confla...</text></switch></g><path d="M 0 130 L 600 130" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><rect x="280" y="164" width="80" height="80" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 204px; margin-left: 281px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">entry<br />create<br />Key B</div></div></div></foreignObject><text x="320" y="208" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">entry...</text></switch></g><rect x="397" y="164" width="80" height="80" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 204px; margin-left: 398px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">entry<br />update<br />Key A<br />Value R</div></div></div></foreignObject><text x="437" y="208" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">entry...</text></switch></g><path d="M 360 204 L 383.63 204" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 388.88 204 L 381.88 207.5 L 383.63 204 L 381.88 200.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><path d="M 476 205 L 519.63 205" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 524.88 205 L 517.88 208.5 L 519.63 205 L 517.88 201.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><rect x="533" y="194" width="90" height="20" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 204px; margin-left: 578px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;">To remote site</div></div></div></foreignObject><text x="578" y="208" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">To remote site</text></switch></g><path d="M 3 274 L 603 274" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><rect x="113" y="163" width="80" height="80" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 203px; margin-left: 114px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">entry<br />update<br />Key A<br />value Q</div></div></div></foreignObject><text x="153" y="207" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">entry...</text></switch></g><rect x="63" y="133" width="160" height="20" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 143px; margin-left: 143px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;">Add entry update to queue</div></div></div></foreignObject><text x="143" y="147" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">Add entry update to queue</text></switch></g><path d="M 193 203 L 216.63 203" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 221.88 203 L 214.88 206.5 L 216.63 203 L 214.88 199.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><rect x="280" y="310" width="80" height="80" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 350px; margin-left: 281px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">entry<br />update<br />Key A<br />value Q</div></div></div></foreignObject><text x="320" y="354" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">entry...</text></switch></g><rect x="397" y="310" width="80" height="80" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 350px; margin-left: 398px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">entry<br />create<br />Key B</div></div></div></foreignObject><text x="437" y="354" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">entry...</text></switch></g><path d="M 360 350 L 383.63 350" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 388.88 350 L 381.88 353.5 L 383.63 350 L 381.88 346.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><path d="M 476 351 L 519.63 351" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 524.88 351 L 517.88 354.5 L 519.63 351 L 517.88 347.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><rect x="533" y="340" width="90" height="20" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 350px; margin-left: 578px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;">To remote site</div></div></div></foreignObject><text x="578" y="354" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">To remote site</text></switch></g><rect x="78" y="279" width="130" height="20" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 289px; margin-left: 143px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;">Queue after conflation</div></div></div></foreignObject><text x="143" y="293" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">Queue after conflation</text></switch></g></g><switch><g requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"/><a transform="translate(0,-5)" xlink:href="https://www.diagrams.net/doc/faq/svg-export-text-problems" target="_blank"><text text-anchor="middle" font-size="10px" x="50%" y="100%">Text is not SVG - cannot display</text></a></switch></svg>
\ No newline at end of file
diff --git a/geode-docs/managing/disk_storage/optimize_availability_and_performance.html.md.erb b/geode-docs/managing/disk_storage/optimize_availability_and_performance.html.md.erb
index f0bfd8b..d15b185 100644
--- a/geode-docs/managing/disk_storage/optimize_availability_and_performance.html.md.erb
+++ b/geode-docs/managing/disk_storage/optimize_availability_and_performance.html.md.erb
@@ -33,5 +33,7 @@
     ```
     start server --name=server1 --J=-Dgeode.parallelDiskStoreRecovery=false
     ```
-
+**Note:**
+In case using parallel disk store recovery, use different disk stores for the PDX and the region.
+Otherwise, it will run in the sequential recovery mode, regardless of the above flag.
 
diff --git a/geode-docs/managing/heap_use/heap_management.html.md.erb b/geode-docs/managing/heap_use/heap_management.html.md.erb
index 682d711..b138716 100644
--- a/geode-docs/managing/heap_use/heap_management.html.md.erb
+++ b/geode-docs/managing/heap_use/heap_management.html.md.erb
@@ -153,7 +153,6 @@
 Although the garbage first (G1) garbage collector works effectively with <%=vars.product_name_long%>, issues can arise in some cases due to the differences between CMS and G1.
 For example, G1 by design is not able to set a maximum tenured heap size, so when this value is requested from the garbage collector, it reports the total heap maximum size. This impacts <%=vars.product_name_long%>, as the resource manager uses the maximum size of the tenured heap size to calculate the value in bytes of the eviction and critical percentages.
 Extensive testing is recommended before using G1 garbage collector. See your JVM documentation for all JVM-specific settings that can be used to improve garbage collection (GC) response.
-If you find the <%=vars.product_name_long%> Resource Manager does not detect crossing the eviction or critical threshold quickly enough, it has been observed that its responsiveness is increased by reducing the default value of `--J-XX:MaxGCPauseMillis=VALUE` JVM parameter (which is `200`). Be sure to take into account that this change also increases the amount of time spent in garbage collection.
 
 Size of objects stored on a region must also be taken into account. If the primary heap objects you allocate are larger than 50 percent of the G1 region size (what are called "humongous" objects), this can cause the JVM to report `out of heap memory` when it has used only 50 percent of the heap.
 The default G1 region size is 1 Mb; it can be increased up to 32 Mb (with values that are always a power of 2) by using the `--J-XX:G1HeapRegionSize=VALUE` JVM parameter. If you are using large objects and want to use G1GC without increasing its heap region size (or if your values are larger than 16 Mb), then you could configure your <%=vars.product_name_long%> regions to store the large values off-heap. However, even if you do that the large off-heap values will allocate large temporary heap values that G1GC will treat as "humongous" allocations, even though they will be short lived. Consider using CMS if most of you values will result in "humongous" allocations.
diff --git a/geode-docs/managing/monitor_tune/chapter_overview.html.md.erb b/geode-docs/managing/monitor_tune/chapter_overview.html.md.erb
index af2c485..9e63ead 100644
--- a/geode-docs/managing/monitor_tune/chapter_overview.html.md.erb
+++ b/geode-docs/managing/monitor_tune/chapter_overview.html.md.erb
@@ -21,7 +21,7 @@
 
 A collection of tools and controls allow you to monitor and adjust <%=vars.product_name_long%> performance.
 
--   **[Disabling TCP SYN Cookies](disabling_tcp_syn_cookies.html)**
+-   **[Disable TCP SYN Cookies](disabling_tcp_syn_cookies.html)**
 
     This is a must-do for Linux systems.
 
diff --git a/geode-docs/managing/security/implementing_authentication.html.md.erb b/geode-docs/managing/security/implementing_authentication.html.md.erb
index a79e5d1..972ed9d 100644
--- a/geode-docs/managing/security/implementing_authentication.html.md.erb
+++ b/geode-docs/managing/security/implementing_authentication.html.md.erb
@@ -36,6 +36,7 @@
 
 In case of an `AuthenticationExpiredException` the <%=vars.product_name%> client code will make one automatic attempt
 to re-connect to the member that sent the exception.
+A `SecurityManager` implementation that supports reauthentication using expiring credentials must also support non-expiring credentials for cluster members.
 
 A well-designed `authenticate` method will have a set of known credentials, such as user and password pairs, that can be
 compared to the credentials presented or will have a way of obtaining those credentials.
diff --git a/geode-docs/managing/security/implementing_authentication_expiry.html.md.erb b/geode-docs/managing/security/implementing_authentication_expiry.html.md.erb
index 25f59e9..d6f5131 100644
--- a/geode-docs/managing/security/implementing_authentication_expiry.html.md.erb
+++ b/geode-docs/managing/security/implementing_authentication_expiry.html.md.erb
@@ -19,15 +19,17 @@
 limitations under the License.
 -->
 
+Authentication expiry is supported only with client connections.
+The use of expirable credentials is most common when used in combination with token-based authentication and authorization.
 Authentication expiry makes it possible for cluster administrators to limit the life span of client
-and peer connections within the cluster. The use of expirable credentials is most common when used in
-combination with token based authentication and authorization.
+connections within the cluster.
 
-Client connections are notified of expiry through the throwing of an `AuthenticationExpiredException`
+Client connections are notified of expiry by `AuthenticationExpiredException`,
 which is thrown in the implementations of `SecurityManager.authenticate` or `SecurityManager.authorize`.
 
-Clients will do one automatic attempt to reconnect. Upon receiving a second `AuthenticationExpiredException`
-the exception will be propagated up the chain for the user to handle. There are some differences in
+Upon receiving the AuthenticationExpiredException, clients will make one automatic attempt to gather new credentials and reconnect (AuthInitialize.getCredentials()).
+Upon receiving a second `AuthenticationExpiredException`
+the exception is thrown back to the user to handle. There are some differences in
 behavior between older and newer clients.
 
 **Support for Automatic	Reconnect**
@@ -48,7 +50,7 @@
 * multi-user client mode
 * event-dispatching (CQ and registered interest)
 
-## <id="authentication_expiry_considerations"></a>Authentication Expiry Considerations
+## <a id="authentication_expiry_considerations"></a>Authentication Expiry Considerations
 
 The common cycle for authentication and authorization is the following:
 
@@ -56,15 +58,9 @@
 AuthInitialize.getCredentials(...) -> SecurityManager.authenticate(...) -> SecurityManager.authorize(...)
 ```
 
-Where `AuthInitialize.getCredentials()` provides the `security properties` for `SecurityManager.authenticate()`
-which in turn provides the `principal object` for `SecurityManager.authorize()`. It's important to
-understand that some time will pass between the `AuthInitialize.getCredentials()` call and the
-`SecurityManager.authorize()` call. The specific amount of time depends on the implementation and
-runtime environment details.
-
 In case of the use of an external token provider we assume that this token provider will be asked for
-a token in the `AuthInitialize.getCredentials()` call. A token provider can return existing tokens for
-a given user so it is recommended that implementers of the `AuthInitialize` and `SecurityManager`
+a token in the `AuthInitialize.getCredentials()` call. A token provider can return existing tokens (which are about to expire) for
+a given user, so it is recommended that implementers of the `AuthInitialize` and `SecurityManager`
 interfaces take imminent timeout and token refresh in consideration to avoid receiving multiple
 unintended `AuthenticationExpiredException`s in a row and having to deal with the propagation of these
 exceptions.
diff --git a/geode-docs/managing/security/implementing_ssl.html.md.erb b/geode-docs/managing/security/implementing_ssl.html.md.erb
index d431423..8f797e9 100644
--- a/geode-docs/managing/security/implementing_ssl.html.md.erb
+++ b/geode-docs/managing/security/implementing_ssl.html.md.erb
@@ -32,32 +32,32 @@
 system components.  The following list shows the system components that can be separately configured
 to communicate using SSL, and the kind of communications to which each component name refers:
 
-<dt>**cluster**</dt>
-<dd>Peer-to-peer communications among members of a cluster</dd>
+**cluster**
+:    Peer-to-peer communications among members of a cluster
 
-<dt>**gateway**</dt>
-<dd>Communication across WAN gateways from one site to another</dd>
+**gateway**
+:    Communication across WAN gateways from one site to another
 
-<dt>**web**</dt>
-<dd>All web-based services hosted on the configured server, which can include the Developer REST API
+**web**
+:    All web-based services hosted on the configured server, which can include the Developer REST API
 service, the Management REST API service (used for remote cluster management) and the Pulse
-monitoring tool's web-based user interface.</dd>
+monitoring tool's web-based user interface.
 
-<dt>**jmx**</dt>
-<dd>Java management extension communications, including communications with the `gfsh` utility. 
+**jmx**
+:    Java management extension communications, including communications with the `gfsh` utility. 
 The Pulse monitoring tool uses JMX for server-side communication with a locator, but SSL
 applies to this connection only if Pulse is located on an app server separate from the
 locator. When Pulse and the locator are colocated, JMX communication between the two does not
-involve a TCP connection, so SSL does not apply.</dd>
+involve a TCP connection, so SSL does not apply.
 
-<dt>**locator**</dt>
-<dd>Communication with and between locators</dd>
+**locator**
+:    Communication with and between locators
 
-<dt>**server**</dt>
-<dd>Communication between clients and servers</dd>
+**server**
+:    Communication between clients and servers
 
-<dt>**all**</dt>
-<dd>All of the above (use SSL system-wide)</dd>
+**all**
+:    All of the above (use SSL system-wide)
 
 Specifying that a component is enabled for SSL applies to the component's server-socket side and its
 client-socket side.  For example, if you enable SSL for locators, then any process that communicates
@@ -68,55 +68,55 @@
 You can use <%=vars.product_name%> configuration properties to enable or disable SSL, to identify SSL ciphers and
 protocols, and to provide the location and credentials for key and trust stores.
 
-<dt>**ssl-enabled-components**</dt>
-<dd>List of components for which to enable SSL. Component list can be "" (disable SSL), "all", or a comma-separated list of components.</dd>
+**ssl-enabled-components**
+:    List of components for which to enable SSL. Component list can be "" (disable SSL), "all", or a comma-separated list of components.
 
-<dt>**ssl-endpoint-identification-enabled**</dt>
-<dd>A boolean value that, when set to true, causes clients to validate the server's hostname using the server's certificate.
+**ssl-endpoint-identification-enabled**
+:    A boolean value that, when set to true, causes clients to validate the server's hostname using the server's certificate.
 The default value is false.
-Enabling endpoint identification guards against DNS man-in-the-middle attacks when trusting certificates that are not self-signed.</dd>
+Enabling endpoint identification guards against DNS man-in-the-middle attacks when trusting certificates that are not self-signed.
 
-<dt>**ssl-use-default-context**</dt>
-<dd>A boolean value that, when set to true, allows <%=vars.product_name%> to use the default SSL context as returned by
+**ssl-use-default-context**
+:    A boolean value that, when set to true, allows <%=vars.product_name%> to use the default SSL context as returned by
 SSLContext.getInstance('Default') or set by using SSLContext.setDefault().
 When enabled, also causes ssl-endpoint-identification-enabled to be set to true.
-</dd>
 
-<dt>**ssl-require-authentication**</dt>
-<dd>Requires two-way authentication, applies to all components except web. Boolean - if true (the default), two-way authentication is required.</dd>
 
-<dt>**ssl-web-require-authentication**</dt>
-<dd>Requires two-way authentication for web component. Boolean - if true, two-way authentication is required. Default is false (one-way authentication only).</dd>
+**ssl-require-authentication**
+:    Requires two-way authentication, applies to all components except web. Boolean - if true (the default), two-way authentication is required.
 
-<dt>**ssl-default-alias**</dt>
-<dd>A server uses one key store to hold its SSL certificates. All components on that server can share a
+**ssl-web-require-authentication**
+:    Requires two-way authentication for web component. Boolean - if true, two-way authentication is required. Default is false (one-way authentication only).
+
+**ssl-default-alias**
+:    A server uses one key store to hold its SSL certificates. All components on that server can share a
 single certificate, designated by the ssl-default-alias property.  If ssl-default-alias
-is not specified, the first certificate in the key store acts as the default certificate.</dd>
+is not specified, the first certificate in the key store acts as the default certificate.
 
-<dt>**ssl-_component_-alias=string**</dt>
-<dd>You can configure a separate certificate for any component. All certificates reside in the same key
+**ssl-_component_-alias=string**
+:    You can configure a separate certificate for any component. All certificates reside in the same key
 store, but can be designated by separate aliases that incorporate the component name, using this syntax,
 where _component_ is the name of a component. When a component-specific alias is specified, it
 overrides the ssl-default-alias for the _component_ specified.
 
-For example, ssl-locator-alias would specify a name for the locator component's certificate in the system key store.</dd>
+For example, ssl-locator-alias would specify a name for the locator component's certificate in the system key store.
 
-<dt>**ssl-ciphers**</dt>
-<dd>A comma-separated list of the valid ciphers for TCP/IP connections with TLS encryption enabled. A setting of 'any'
-allows the JSSE provider to select an appropriate cipher that it supports.</dd>
+**ssl-ciphers**
+:    A comma-separated list of the valid ciphers for TCP/IP connections with TLS encryption enabled. A setting of 'any'
+allows the JSSE provider to select an appropriate cipher that it supports.
 
-<dt>**ssl-protocols**</dt>
-<dd>A comma-separated list of the valid protocol versions for TCP/IP connections with TLS encryption enabled.
-A setting of 'any' attempts to use your JSSE provider's TLSv1.3, or TLSv1.2 if v1.3 is not available.</dd>
+**ssl-protocols**
+:    A comma-separated list of the valid protocol versions for TCP/IP connections with TLS encryption enabled.
+A setting of 'any' attempts to use your JSSE provider's TLSv1.3, or TLSv1.2 if v1.3 is not available.
 
-<dt>**ssl-keystore, ssl-keystore-password**</dt>
-<dd>The path to the key store and the key store password, specified as strings</dd>
+**ssl-keystore, ssl-keystore-password**
+:    The path to the key store and the key store password, specified as strings
 
-<dt>**ssl-truststore, ssl-truststore-password**</dt>
-<dd>The path to the trust store and the trust store password, specified as strings</dd>
+**ssl-truststore, ssl-truststore-password**
+:    The path to the trust store and the trust store password, specified as strings
 
-<dt>**ssl-keystore-type, ssl-truststore-type**</dt>
-<dd>The types of the key store and trust store, specified as strings. The default for both is "JKS", indicating a Java key store or trust store.</dd>
+**ssl-keystore-type, ssl-truststore-type**
+:    The types of the key store and trust store, specified as strings. The default for both is "JKS", indicating a Java key store or trust store.
 
 ### Example: secure communications throughout
 
diff --git a/geode-docs/reference/topics/cache_xml.html.md.erb b/geode-docs/reference/topics/cache_xml.html.md.erb
index 9ab1ba7..d1eeaa3 100644
--- a/geode-docs/reference/topics/cache_xml.html.md.erb
+++ b/geode-docs/reference/topics/cache_xml.html.md.erb
@@ -267,7 +267,7 @@
 </tr>
 <tr>
 <td>batch-time-interval</td>
-<td>Maximum number of milliseconds that can elapse between sending batches.</td>
+<td>Maximum amount of time, in ms, that can elapse before a batch is delivered, when no events are found in the queue to reach the batch-size.</td>
 <td>1000</td>
 </tr>
 <tr>
@@ -539,7 +539,7 @@
 </tr>
 <tr>
 <td>batch-time-interval</td>
-<td>Maximum number of milliseconds that can elapse between sending batches.</td>
+<td>Maximum amount of time, in ms, that can elapse before a batch is delivered, when no events are found in the queue to reach the batch-size.</td>
 <td>5</td>
 </tr>
 <tr>
diff --git a/geode-docs/tools_modules/gfsh/command-pages/alter.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/alter.html.md.erb
index 7c4ac2f..14f4758 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/alter.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/alter.html.md.erb
@@ -238,7 +238,7 @@
 </tr>
 <tr>
 <td><span class="keyword parmname">&#8209;&#8209;batch-time-interval</span></td>
-<td>Maximum time, in milliseconds, that can elapse between sending batches.</td>
+<td>Maximum amount of time, in ms, that can elapse before a batch is delivered, when no events are found in the queue to reach the batch-size.</td>
 </tr>
 <tr>
 <td><span class="keyword parmname">&#8209;&#8209;gateway-event-filter</span></td>
diff --git a/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb b/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
index c8503ad..b946ab6 100644
--- a/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
+++ b/geode-docs/tools_modules/gfsh/command-pages/create.html.md.erb
@@ -132,7 +132,7 @@
 </tr>
 <tr>
 <td><span class="keyword parmname">&#8209;&#8209;batch-time-interval</span></td>
-<td>Maximum amount of time, in ms, that can elapse before a batch is delivered.</td>
+<td>Maximum amount of time, in ms, that can elapse before a batch is delivered, when no events are found in the queue to reach the batch-size.</td>
 <td>5</td>
 </tr>
 <tr>
@@ -585,7 +585,7 @@
 </tr>
 <tr>
 <td><span class="keyword parmname">&#8209;&#8209;batch-time-interval</span></td>
-<td>Maximum number of milliseconds that can elapse between sending batches.</td>
+<td>Maximum amount of time, in ms, that can elapse before a batch is delivered, when no events are found in the queue to reach the batch-size.</td>
 <td>1000</td>
 </tr>
 <tr>
diff --git a/geode-dunit/src/main/java/org/apache/geode/internal/cache/functions/TestFunction.java b/geode-dunit/src/main/java/org/apache/geode/internal/cache/functions/TestFunction.java
index 951a9b7..a46840f 100755
--- a/geode-dunit/src/main/java/org/apache/geode/internal/cache/functions/TestFunction.java
+++ b/geode-dunit/src/main/java/org/apache/geode/internal/cache/functions/TestFunction.java
@@ -99,6 +99,7 @@
   public static final String TEST_FUNCTION_SINGLE_HOP_FORCE_NETWORK_HOP =
       "executeFunctionSingleHopForceNetworkHop";
   public static final String TEST_FUNCTION_GET_NETWORK_HOP = "executeFunctionGetNetworkHop";
+  public static final String TEST_FUNCTION_SLOW = "SlowFunction";
   private static final String ID = "id";
   private static final String HAVE_RESULTS = "haveResults";
   private final Properties props;
@@ -197,6 +198,8 @@
       executeSingleHopForceNetworkHop(context);
     } else if (id.equals(TEST_FUNCTION_GET_NETWORK_HOP)) {
       executeGetNetworkHop(context);
+    } else if (id.equals(TEST_FUNCTION_SLOW)) {
+      executeSlowFunction(context);
     } else if (noAckTest.equals("true")) {
       execute1(context);
     }
@@ -1041,6 +1044,22 @@
     context.getResultSender().lastResult(networkHopType);
   }
 
+  private void executeSlowFunction(FunctionContext context) {
+    int entries = 4;
+    int waitBetweenEntriesMs = 5000;
+    for (int i = 0; i < entries; i++) {
+      try {
+        Thread.sleep(waitBetweenEntriesMs);
+      } catch (InterruptedException e) {
+        context.getResultSender().sendException(e);
+        Thread.currentThread().interrupt();
+        return;
+      }
+      context.getResultSender().sendResult(i);
+    }
+    context.getResultSender().lastResult(entries);
+  }
+
   /**
    * Get the function identifier, used by clients to invoke this function
    *
@@ -1096,12 +1115,12 @@
 
   @Override
   public boolean isHA() {
-
     if (getId().equals(TEST_FUNCTION10)) {
       return true;
     }
     if (getId().equals(TEST_FUNCTION_NONHA_SERVER) || getId().equals(TEST_FUNCTION_NONHA_REGION)
-        || getId().equals(TEST_FUNCTION_NONHA_NOP) || getId().equals(TEST_FUNCTION_NONHA)) {
+        || getId().equals(TEST_FUNCTION_NONHA_NOP) || getId().equals(TEST_FUNCTION_NONHA)
+        || getId().equals(TEST_FUNCTION_SLOW)) {
       return false;
     }
     return Boolean.parseBoolean(props.getProperty(HAVE_RESULTS));
diff --git a/geode-dunit/src/main/java/org/apache/geode/test/junit/rules/ServerStarterRule.java b/geode-dunit/src/main/java/org/apache/geode/test/junit/rules/ServerStarterRule.java
index bce7ed9..f23ee9a 100644
--- a/geode-dunit/src/main/java/org/apache/geode/test/junit/rules/ServerStarterRule.java
+++ b/geode-dunit/src/main/java/org/apache/geode/test/junit/rules/ServerStarterRule.java
@@ -66,6 +66,7 @@
   private PdxSerializer pdxSerializer = null;
   private boolean pdxReadSerialized = false;
   private boolean pdxReadSerializedUserSet = false;
+  private int maxThreads = -1;
   // By default we start one server per jvm
   private int serverCount = 1;
 
@@ -123,6 +124,11 @@
     servers.clear();
   }
 
+  public ServerStarterRule withMaxThreads(int maxThreads) {
+    this.maxThreads = maxThreads;
+    return this;
+  }
+
   public ServerStarterRule withPDXPersistent() {
     pdxPersistent = true;
     pdxPersistentUserSet = true;
@@ -219,6 +225,9 @@
       } else {
         server.setPort(0);
       }
+      if (maxThreads >= 0) {
+        server.setMaxThreads(maxThreads);
+      }
       try {
         server.start();
       } catch (IOException e) {
diff --git a/geode-dunit/src/main/resources/org/apache/geode/test/dunit/internal/sanctioned-geode-dunit-serializables.txt b/geode-dunit/src/main/resources/org/apache/geode/test/dunit/internal/sanctioned-geode-dunit-serializables.txt
index 81f334b..70a8520 100644
--- a/geode-dunit/src/main/resources/org/apache/geode/test/dunit/internal/sanctioned-geode-dunit-serializables.txt
+++ b/geode-dunit/src/main/resources/org/apache/geode/test/dunit/internal/sanctioned-geode-dunit-serializables.txt
@@ -180,4 +180,4 @@
 org/apache/geode/test/junit/rules/LocatorStarterRule,false
 org/apache/geode/test/junit/rules/MemberStarterRule,false,autoStart:boolean,availableHttpPort:int,availableJmxPort:int,cleanWorkingDir:boolean,firstLevelChildrenFile:java/util/List,httpPort:int,jmxPort:int,logFile:boolean,memberPort:int,name:java/lang/String,properties:java/util/Properties,restore:org/apache/geode/test/junit/rules/accessible/AccessibleRestoreSystemProperties,systemProperties:java/util/Properties
 org/apache/geode/test/junit/rules/ServerLauncherStartupRule,false,autoStart:boolean,builderOperator:java/util/function/UnaryOperator,launcher:org/apache/geode/distributed/ServerLauncher,properties:java/util/Properties,temp:org/junit/rules/TemporaryFolder
-org/apache/geode/test/junit/rules/ServerStarterRule,false,availableLocatorPort:int,embeddedLocatorPort:int,pdxPersistent:boolean,pdxPersistentUserSet:boolean,pdxReadSerialized:boolean,pdxReadSerializedUserSet:boolean,pdxSerializer:org/apache/geode/pdx/PdxSerializer,regions:java/util/Map,serverCount:int
+org/apache/geode/test/junit/rules/ServerStarterRule,false,availableLocatorPort:int,embeddedLocatorPort:int,maxThreads:int,pdxPersistent:boolean,pdxPersistentUserSet:boolean,pdxReadSerialized:boolean,pdxReadSerializedUserSet:boolean,pdxSerializer:org/apache/geode/pdx/PdxSerializer,regions:java/util/Map,serverCount:int
diff --git a/geode-pulse/geode-pulse-test/src/main/java/org/apache/geode/tools/pulse/tests/DataBrowserResultLoader.java b/geode-pulse/geode-pulse-test/src/main/java/org/apache/geode/tools/pulse/tests/DataBrowserResultLoader.java
index f227800..392f0c9 100644
--- a/geode-pulse/geode-pulse-test/src/main/java/org/apache/geode/tools/pulse/tests/DataBrowserResultLoader.java
+++ b/geode-pulse/geode-pulse-test/src/main/java/org/apache/geode/tools/pulse/tests/DataBrowserResultLoader.java
@@ -17,13 +17,11 @@
 package org.apache.geode.tools.pulse.tests;
 
 import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
-import java.net.URL;
 import java.nio.charset.StandardCharsets;
+import java.util.stream.Collectors;
 
 public class DataBrowserResultLoader {
   /* Constants for executing Data Browser queries */
@@ -33,7 +31,8 @@
   public static final String QUERY_TYPE_FOUR = "query4";
   public static final String QUERY_TYPE_FIVE = "query5";
   public static final String QUERY_TYPE_SIX = "query6";
-  public static final String QUERY_TYPE_SEVENE = "query7";
+  public static final String QUERY_TYPE_SEVEN = "query7";
+  public static final String QUERY_TYPE_EIGHT = "query8";
 
   private static final DataBrowserResultLoader dbResultLoader = new DataBrowserResultLoader();
 
@@ -43,41 +42,46 @@
 
   public String load(String queryString) throws IOException {
 
-    URL url = null;
-    InputStream inputStream = null;
-    BufferedReader streamReader = null;
-    String inputStr = null;
-    StringBuilder sampleQueryResultResponseStrBuilder = null;
+    String fileName;
+    String fileContent = "";
 
     try {
-      ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
 
-      if (queryString.equals(QUERY_TYPE_ONE)) {
-        url = classLoader.getResource("testQueryResultClusterSmall.txt");
-      } else if (queryString.equals(QUERY_TYPE_TWO)) {
-        url = classLoader.getResource("testQueryResultSmall.txt");
-      } else if (queryString.equals(QUERY_TYPE_THREE)) {
-        url = classLoader.getResource("testQueryResult.txt");
-      } else if (queryString.equals(QUERY_TYPE_FOUR)) {
-        url = classLoader.getResource("testQueryResultWithStructSmall.txt");
-      } else if (queryString.equals(QUERY_TYPE_FIVE)) {
-        url = classLoader.getResource("testQueryResultClusterWithStruct.txt");
-      } else if (queryString.equals(QUERY_TYPE_SIX)) {
-        url = classLoader.getResource("testQueryResultHashMapSmall.txt");
-      } else if (queryString.equals(QUERY_TYPE_SEVENE)) {
-        url = classLoader.getResource("testQueryResult1000.txt");
-      } else {
-        url = classLoader.getResource("testQueryResult.txt");
+      switch (queryString) {
+        case QUERY_TYPE_ONE:
+          fileName = "testQueryResultClusterSmall.txt";
+          break;
+        case QUERY_TYPE_TWO:
+          fileName = "testQueryResultSmall.txt";
+          break;
+        case QUERY_TYPE_THREE:
+          fileName = "testQueryResult.txt";
+          break;
+        case QUERY_TYPE_FOUR:
+          fileName = "testQueryResultWithStructSmall.txt";
+          break;
+        case QUERY_TYPE_FIVE:
+          fileName = "testQueryResultClusterWithStruct.txt";
+          break;
+        case QUERY_TYPE_SIX:
+          fileName = "testQueryResultHashMapSmall.txt";
+          break;
+        case QUERY_TYPE_SEVEN:
+          fileName = "testQueryResult1000.txt";
+          break;
+        case QUERY_TYPE_EIGHT:
+          fileName = "testQueryResultClusterSmallJSInject.txt";
+          break;
+        default:
+          fileName = "testQueryResult.txt";
+          break;
       }
 
-      File sampleQueryResultFile = new File(url.getPath());
-      inputStream = new FileInputStream(sampleQueryResultFile);
-      streamReader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
-      sampleQueryResultResponseStrBuilder = new StringBuilder();
-
-      while ((inputStr = streamReader.readLine()) != null) {
-        sampleQueryResultResponseStrBuilder.append(inputStr);
-      }
+      InputStream inputStream = getClass().getResourceAsStream("/" + fileName);
+      assert inputStream != null;
+      BufferedReader streamReader =
+          new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
+      fileContent = streamReader.lines().collect(Collectors.joining(System.lineSeparator()));
 
       // close stream reader
       streamReader.close();
@@ -86,6 +90,6 @@
       ex.printStackTrace();
     }
 
-    return sampleQueryResultResponseStrBuilder.toString();
+    return fileContent;
   }
 }
diff --git a/geode-pulse/geode-pulse-test/src/main/java/org/apache/geode/tools/pulse/tests/PulseTestData.java b/geode-pulse/geode-pulse-test/src/main/java/org/apache/geode/tools/pulse/tests/PulseTestData.java
index 219c63f..edbc0e1 100644
--- a/geode-pulse/geode-pulse-test/src/main/java/org/apache/geode/tools/pulse/tests/PulseTestData.java
+++ b/geode-pulse/geode-pulse-test/src/main/java/org/apache/geode/tools/pulse/tests/PulseTestData.java
@@ -94,6 +94,9 @@
     public static final String partialRgnName = "R";
     public static final String chkRgnClassName = "bttn chk checkbox_true_full";
     public static final String notChkRgnClassName = "bttn chk checkbox_false_full";
+    public static final String resultClusterHeadingsXPath = "//div[@id='clusterDetails']/div/div";
+    public static final String resultClusterCellXPath =
+        "//tr/td[contains(@title, '<script>alert')]";
 
     public static final String regName = "R1";
     public static final String query1Text = "select * from " + SEPARATOR + "R1";
@@ -101,6 +104,4 @@
     public static final String datePattern = "EEE, MMM dd yyyy, HH:mm:ss z";
 
   }
-
-
 }
diff --git a/geode-pulse/geode-pulse-test/src/main/resources/testQueryResultClusterSmallJSInject.txt b/geode-pulse/geode-pulse-test/src/main/resources/testQueryResultClusterSmallJSInject.txt
new file mode 100644
index 0000000..25a0a2c
--- /dev/null
+++ b/geode-pulse/geode-pulse-test/src/main/resources/testQueryResultClusterSmallJSInject.txt
@@ -0,0 +1,23 @@
+{"result":[
+    ["org.apache.geode.cache.query.data.PortfolioDummy",
+     {"type":["java.lang.String","type0"],"ID":["int",0],"active":["boolean",true],"pk":["java.lang.String","0"],"collectionHolderMapDummy":["java.util.HashMap",{"3":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"2":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"1":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"0":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}]}],"createTime":["long",0],"positions":["java.util.HashMap",{"YHOO":["org.apache.geode.cache.query.data.Position",{"id":["int",2],"secId":["java.lang.String","YHOO"],"mktValue":["double",3],"sharesOutstanding":["double",2000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"IBM":["org.apache.geode.cache.query.data.Position",{"id":["int",1],"secId":["java.lang.String","IBM"],"mktValue":["double",2],"sharesOutstanding":["double",1000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}]}],"p1":["org.apache.geode.cache.query.data.Position",{"id":["int",0],"secId":["java.lang.String","SUN"],"mktValue":["double",1],"sharesOutstanding":["double",0],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"p2":null,"floatMinValue":["float",1.4E-45],"longMinValue":["float",-9.223372E18],"doubleMinValue":["double",4.9E-324]}],
+
+    ["org.apache.geode.cache.query.data.Portfolio",
+     {"type":["java.lang.String","type0"],"ID":["int",0],"active":["boolean",true],"pk":["java.lang.String","0"],"collectionHolderMap":["java.util.HashMap",{"3":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"2":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"1":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"0":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}]}],"createTime":["long",0],"positions":["java.util.HashMap",{"YHOO":["org.apache.geode.cache.query.data.Position",{"id":["int",2],"secId":["java.lang.String","YHOO"],"mktValue":["double",3],"sharesOutstanding":["double",2000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"IBM":["org.apache.geode.cache.query.data.Position",{"id":["int",1],"secId":["java.lang.String","IBM"],"mktValue":["double",2],"sharesOutstanding":["double",1000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}]}],"p1":["org.apache.geode.cache.query.data.Position",{"id":["int",0],"secId":["java.lang.String","SUN"],"mktValue":["double",1],"sharesOutstanding":["double",0],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"p2":null,"floatMinValue":["float",1.4E-45],"longMinValue":["float",-9.223372E18],"doubleMinValue":["double",4.9E-324]}],
+
+    ["org.apache.geode.cache.query.data.Portfolio",
+     {"type":["java.lang.String","type1"],"ID":["int",1],"active":["boolean",false],"pk":["java.lang.String","1"],"collectionHolderMap":["java.util.HashMap",{"3":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"2":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"1":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"0":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}]}],"createTime":["long",0],"positions":["java.util.HashMap",{"AOL":["org.apache.geode.cache.query.data.Position",{"id":["int",5],"secId":["java.lang.String","AOL"],"mktValue":["double",6],"sharesOutstanding":["double",5000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"APPL":["org.apache.geode.cache.query.data.Position",{"id":["int",6],"secId":["java.lang.String","APPL"],"mktValue":["double",7],"sharesOutstanding":["double",6000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}]}],"p1":["org.apache.geode.cache.query.data.Position",{"id":["int",3],"secId":["java.lang.String","GOOG"],"mktValue":["double",4],"sharesOutstanding":["double",3000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"p2":["org.apache.geode.cache.query.data.Position",{"id":["int",4],"secId":["java.lang.String","MSFT"],"mktValue":["double",5],"sharesOutstanding":["double",4000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"floatMinValue":["float",1.4E-45],"longMinValue":["float",-9.223372E18],"doubleMinValue":["double",4.9E-324]}],
+
+    ["org.apache.geode.cache.query.data.Portfolio",
+     {"type":["java.lang.String","type2"],"ID":["int",2],"active":["boolean",true],"pk":["java.lang.String","2"],"collectionHolderMap":["java.util.HashMap",{"3":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"2":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"1":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"0":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}]}],"createTime":["long",0],"positions":["java.util.HashMap",{"SAP":["org.apache.geode.cache.query.data.Position",{"id":["int",8],"secId":["java.lang.String","SAP"],"mktValue":["double",9],"sharesOutstanding":["double",8000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"DELL":["org.apache.geode.cache.query.data.Position",{"id":["int",9],"secId":["java.lang.String","DELL"],"mktValue":["double",10],"sharesOutstanding":["double",9000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}]}],"p1":["org.apache.geode.cache.query.data.Position",{"id":["int",7],"secId":["java.lang.String","ORCL"],"mktValue":["double",8],"sharesOutstanding":["double",7000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"p2":null,"floatMinValue":["float",1.4E-45],"longMinValue":["float",-9.223372E18],"doubleMinValue":["double",4.9E-324]}],
+
+    ["org.apache.geode.cache.query.data.Portfolio",
+     {"type":["java.lang.String","type0"],"ID":["int",3],"active":["boolean",false],"pk":["java.lang.String","3"],"collectionHolderMap":["java.util.HashMap",{"3":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"2":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"1":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"0":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}]}],"createTime":["long",0],"positions":["java.util.HashMap",{"HP":["org.apache.geode.cache.query.data.Position",{"id":["int",12],"secId":["java.lang.String","HP"],"mktValue":["double",13],"sharesOutstanding":["double",12000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"SUN":["org.apache.geode.cache.query.data.Position",{"id":["int",13],"secId":["java.lang.String","SUN"],"mktValue":["double",14],"sharesOutstanding":["double",13000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}]}],"p1":["org.apache.geode.cache.query.data.Position",{"id":["int",10],"secId":["java.lang.String","RHAT"],"mktValue":["double",11],"sharesOutstanding":["double",10000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"p2":["org.apache.geode.cache.query.data.Position",{"id":["int",11],"secId":["java.lang.String","NOVL"],"mktValue":["double",12],"sharesOutstanding":["double",11000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"floatMinValue":["float",1.4E-45],"longMinValue":["float",-9.223372E18],"doubleMinValue":["double",4.9E-324]}],
+
+    ["org.apache.geode.cache.query.data.PortfolioDummy",
+     {"type":["java.lang.String","type1"],"ID":["int",4],"active":["boolean",true],"pk":["java.lang.String","4"],"collectionHolderMap":["java.util.HashMap",{"3":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"2":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"1":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"0":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}]}],"createTime":["long",0],"positions":["java.util.HashMap",{"YHOO":["org.apache.geode.cache.query.data.Position",{"id":["int",15],"secId":["java.lang.String","YHOO"],"mktValue":["double",16],"sharesOutstanding":["double",15000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"GOOG":["org.apache.geode.cache.query.data.Position",{"id":["int",16],"secId":["java.lang.String","GOOG"],"mktValue":["double",17],"sharesOutstanding":["double",16000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}]}],"p1":["org.apache.geode.cache.query.data.Position",{"id":["int",14],"secId":["java.lang.String","IBM"],"mktValue":["double",15],"sharesOutstanding":["double",14000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"p2":null,"floatMinValue":["float",1.4E-45],"longMinValue":["float",-9.223372E18],"doubleMinValue":["double",4.9E-324]}],
+
+    ["org.apache.geode.cache.query.data.Portfolio",
+     {"type":["java.lang.String","<script>alert('xss')</script>"],"ID":["int",5],"active":["boolean",false],"pk":["java.lang.String","5"],"collectionHolderMap":["java.util.HashMap",{"3":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"2":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"1":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}],"0":["org.apache.geode.cache.query.data.CollectionHolder",{"arr":["java.lang.String[]",["0","1","2","3","4","SUN","IBM","YHOO","GOOG","MSFT"]]}]}],"createTime":["long",0],"positions":["<script>alert('xss')</script>",{"APPL":["org.apache.geode.cache.query.data.Position",{"id":["int",19],"secId":["java.lang.String","<script>alert('APPL')</script>"],"mktValue":["double",20],"sharesOutstanding":["double",19000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"ORCL":["org.apache.geode.cache.query.data.Position",{"id":["int",20],"secId":["java.lang.String","ORCL"],"mktValue":["double",21],"sharesOutstanding":["double",20000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}]}],"p1":["org.apache.geode.cache.query.data.Position",{"id":["int",17],"secId":["java.lang.String","MSFT"],"mktValue":["double",18],"sharesOutstanding":["double",17000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"p2":["org.apache.geode.cache.query.data.Position",{"id":["int",18],"secId":["java.lang.String","AOL"],"mktValue":["double",19],"sharesOutstanding":["double",18000],"col":["java.util.HashSet",[["java.lang.String","1"],["java.lang.String","0"]]]}],"floatMinValue":["float",1.4E-45],"longMinValue":["float",-9.223372E18],"doubleMinValue":["double",4.9E-324]}]
+    ]
+}
\ No newline at end of file
diff --git a/geode-pulse/src/main/webapp/META-INF/context.xml b/geode-pulse/src/main/webapp/META-INF/context.xml
new file mode 100644
index 0000000..e70eb6c
--- /dev/null
+++ b/geode-pulse/src/main/webapp/META-INF/context.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<Context>
+
+    <!-- Add SameSite to the cookies for Tomcat -->
+    <CookieProcessor
+            sameSiteCookies="Strict" />
+
+</Context>
\ No newline at end of file
diff --git a/geode-pulse/src/main/webapp/WEB-INF/web.xml b/geode-pulse/src/main/webapp/WEB-INF/web.xml
index 5da1928..e16a8dd 100644
--- a/geode-pulse/src/main/webapp/WEB-INF/web.xml
+++ b/geode-pulse/src/main/webapp/WEB-INF/web.xml
@@ -43,6 +43,14 @@
     <param-name>spring.profiles.default</param-name>
     <param-value>pulse.authentication.default</param-value>
   </context-param>
+
+  <session-config>
+    <cookie-config>
+      <http-only>true</http-only>
+      <comment>__SAME_SITE_STRICT__</comment>
+    </cookie-config>
+  </session-config>
+
   <filter>
     <filter-name>springSecurityFilterChain</filter-name>
     <filter-class>org.springframework.web.filter.DelegatingFilterProxy</filter-class>
diff --git a/geode-pulse/src/main/webapp/scripts/pulsescript/pages/DataBrowserQuery.js b/geode-pulse/src/main/webapp/scripts/pulsescript/pages/DataBrowserQuery.js
index ce37dfb..e5056ba 100644
--- a/geode-pulse/src/main/webapp/scripts/pulsescript/pages/DataBrowserQuery.js
+++ b/geode-pulse/src/main/webapp/scripts/pulsescript/pages/DataBrowserQuery.js
@@ -68,10 +68,10 @@
   }
   
   // Determine selected members query to be execute on 
-  if($("#membersList").html() != ""){
+  if($("#membersList").html() !== ""){
     var selectedMembers = $( "input[type=checkbox][name=Member]:checked" );
     for(var i=0; i< selectedMembers.length; i++){
-      if(selectedMemberNames == ""){
+      if(selectedMemberNames === ""){
         selectedMemberNames = selectedMembers[i].value;
       }else{
         selectedMemberNames += ","+selectedMembers[i].value;
@@ -803,14 +803,14 @@
 //Function for converting raw response into expected format
 function convertRawResponseToExpectedFormat(rawResponeData){
   
-  if(rawResponeData == null || rawResponeData == undefined){
+  if(rawResponeData === null || rawResponeData === undefined){
     return;
   }
   
   var finalResponseData = {};
   var finalResponseResults = [];
   
-  if(rawResponeData.result != null || rawResponeData.result != undefined){
+  if(rawResponeData.result != null || rawResponeData.result !== undefined){
     var rawResponeDataResult = rawResponeData.result;
     
     for(var i=0; i<rawResponeDataResult.length; i++){
@@ -821,7 +821,7 @@
             finalResponseResults = convertToExpectedObjectsFormat(rawResponeDataResult, "");
             break;
             
-          }else if(rawResponeDataResult[i].member != null && rawResponeDataResult[i].member != undefined){
+          }else if(rawResponeDataResult[i].member != null && rawResponeDataResult[i].member !== undefined){
             
             var responseForMember = {};
             responseForMember.member = rawResponeDataResult[i].member[0];
@@ -842,31 +842,25 @@
 
 // Function for converting raw response into expected object wise results format
 function convertToExpectedObjectsFormat(rawResponseResult, prefixForId){
-  
-  var expResponseResult = [];
-  
-  if(rawResponseResult != null && rawResponseResult != undefined ){
+
+  let entry;
+  let objectResults;
+  const expResponseResult = [];
+
+  if(rawResponseResult != null ){
     
-    for(var i=0; i< rawResponseResult.length; i++){
+    for(let i=0; i < rawResponseResult.length; i++){
       if(rawResponseResult[i] != null){
         
         if(expResponseResult.length > 0){
           // search expected object type in expResponseResult
-          var flagObjectFound = false;
-          for(var j=0 ; j < expResponseResult.length ; j++){
-            if(expResponseResult[j].objectType == rawResponseResult[i][0]){
+          let flagObjectFound = false;
+          for(let j=0 ; j < expResponseResult.length ; j++){
+            if(expResponseResult[j].objectType === rawResponseResult[i][0]){
               // required object found
               flagObjectFound = true;
-              var objectResults = expResponseResult[j].objectResults;
-              var type = rawResponseResult[i][0];
-              var entry = rawResponseResult[i][1];
-
-              // if entry is not object then convert it into object
-              if(typeof(entry) != "object" ){
-                var entryObj = {};
-                entryObj[type] = rawResponseResult[i][1];
-                entry = entryObj;
-              }
+              objectResults = expResponseResult[j].objectResults;
+              entry = htmlEncodeEntry(rawResponseResult[i]);
 
               // add unique id for new entry
               entry.uid = generateEntryUID(prefixForId, expResponseResult[j].objectType, objectResults.length);
@@ -875,57 +869,12 @@
               break;
             }
           }
-          
-          if(!flagObjectFound){  // required object not found in expResponseResult 
-            
-            var objectResults = [];
-            var type = rawResponseResult[i][0];
-            var entry = rawResponseResult[i][1];
-
-            // if entry is not object then convert it into object
-            if(typeof(entry) != "object" ){
-              var entryObj = {};
-              entryObj[type] = rawResponseResult[i][1];
-              entry = entryObj;
-            }
-
-            // add unique id for new entry
-            entry.uid = generateEntryUID(prefixForId, type, objectResults.length);
-            
-            objectResults.push(entry);
-            
-            var newResultObject = {};
-            newResultObject.objectType = type;
-            newResultObject.objectResults = objectResults;
-            
-            expResponseResult.push(newResultObject);
+          if(!flagObjectFound){  // required object not found in expResponseResult
+            expResponseResult.push(addToExpResponseResult(rawResponseResult[i], prefixForId));
           }
-          
         }else{  // expResponseResult is empty
-          
-          var objectResults = [];
-          var type = rawResponseResult[i][0];
-          var entry = rawResponseResult[i][1];
-
-          // if entry is not object then convert it into object
-          if(typeof(entry) != "object" ){
-            var entryObj = {};
-            entryObj[type] = rawResponseResult[i][1];
-            entry = entryObj;
-          }
-
-          // add unique id for new entry
-          entry.uid = generateEntryUID(prefixForId, type, objectResults.length);
-          
-          objectResults.push(entry);
-          
-          var newResultObject = {};
-          newResultObject.objectType = type;
-          newResultObject.objectResults = objectResults;
-          
-          expResponseResult.push(newResultObject);
+          expResponseResult.push(addToExpResponseResult(rawResponseResult[i], prefixForId));
         }
-        
       }
     }
   }
@@ -933,6 +882,54 @@
   return expResponseResult;
 }
 
+// Add results to the expected responseResults
+function addToExpResponseResult(rawResponseResultEntry, prefixForId) {
+  let objectResults = [];
+  let type = rawResponseResultEntry[0];
+  let entry = htmlEncodeEntry(rawResponseResultEntry, prefixForId);
+
+  // add unique id for new entry
+  entry.uid = generateEntryUID(prefixForId, type, objectResults.length);
+
+  objectResults.push(entry);
+
+  let newResultObject = {};
+  newResultObject.objectType = type;
+  newResultObject.objectResults = objectResults;
+
+  return newResultObject;
+}
+
+// Ensure that strings are HTML encoded to reduce likelihood of XSS attacks
+function htmlEncodeEntry(rawResponseResultEntry, prefixForId) {
+  let type = htmlEncodeStringsAndObjects(rawResponseResultEntry[0]);
+  let entry = rawResponseResultEntry[1];
+
+  let entryObj = {};
+
+  // if entry is not object then convert it into object
+  if(typeof(entry) == "object" ){
+    entryObj = htmlEncodeStringsAndObjects(entry);
+  } else {
+    entryObj[type] = htmlEncodeStringsAndObjects(entry)
+  }
+
+  return entryObj;
+}
+
+function htmlEncodeStringsAndObjects(raw) {
+  switch(typeof(raw)) {
+    case "string":
+      return $('<pre/>').text(raw).html();
+    case "object":
+      let objectAsString = JSON.stringify(raw);
+      objectAsString = $('<pre/>').text(objectAsString).html();
+      return JSON.parse(objectAsString);
+    default:
+      return raw
+  }
+}
+
 // Function to generate unique idetifier for entry
 function generateEntryUID(prefixForId, type, len) {
 
diff --git a/geode-pulse/src/main/webapp/scripts/pulsescript/pages/DataBrowserQueryHistory.js b/geode-pulse/src/main/webapp/scripts/pulsescript/pages/DataBrowserQueryHistory.js
index ef4b9e4..c413816 100644
--- a/geode-pulse/src/main/webapp/scripts/pulsescript/pages/DataBrowserQueryHistory.js
+++ b/geode-pulse/src/main/webapp/scripts/pulsescript/pages/DataBrowserQueryHistory.js
@@ -21,25 +21,25 @@
 // updateQueryHistory()
 function updateQueryHistory(action,queryId) {
   
-  requestData = {
-    action:action,
-    queryId:queryId
+  let requestData = {
+    action: action,
+    queryId: queryId
   };
 
   $.getJSON("dataBrowserQueryHistory", requestData, function(data) {
-    
-    var queries = [];
-    if(data.queryHistory != undefined && data.queryHistory != null){
+
+    let queries = [];
+    if(data.queryHistory !== undefined && data.queryHistory != null){
       queries = data.queryHistory;
     }
-    var refHistoryConatiner = $("#detailsHistoryList");
-    var queryListHTML = "";
-    if(queries.length == 0){
+    const refHistoryContainer = $("#detailsHistoryList");
+    let queryListHTML = "";
+    if(queries.length === 0){
       // no queries found
       queryListHTML = "No Query Found";
     }else{
       queries.sort(dynamicSort("queryId", "desc"));
-      for(var i=0; i<queries.length && i<20; i++){
+      for(let i=0; i < queries.length && i < 20; i++){
         // add query item
         queryListHTML += "" +
           "<div class=\"container\">" +
@@ -50,7 +50,7 @@
               "<div class=\"remove\">" +
                 "<a href=\"#\" onclick=\"updateQueryHistory('delete','"+ queries[i].queryId +"');\">&nbsp;</a>" +
               "</div>" +
-              "<div class=\"wrapHistoryContent\"  ondblclick=\"queryHistoryItemClicked(this);\">" + queries[i].queryText +
+              "<div class=\"wrapHistoryContent\"  ondblclick=\"queryHistoryItemClicked(this);\">" + queries[i].queryText.replaceAll("\"", "") +
               "</div>" +
               "<div class=\"dateTimeHistory\">" + queries[i].queryDateTime +
               "</div>" +
@@ -59,7 +59,7 @@
       }
     }
     
-    refHistoryConatiner.html(queryListHTML);
+    refHistoryContainer.html(queryListHTML);
     //$('.queryHistoryScroll-pane').jScrollPane();/*Custome scroll*/    
 
     // Set eventsAdded = false as list is refreshed and slide events 
@@ -73,7 +73,7 @@
 // This function displays error if occurred 
 function resErrHandler(data){
   // Check for unauthorized access
-  if (data.status == 401) {
+  if (data.status === 401) {
     // redirect user on Login Page
     window.location.href = "login.html?error=UNAUTH_ACCESS";
   }else{
diff --git a/geode-pulse/src/uiTest/java/org/apache/geode/tools/pulse/tests/ui/PulseAutomatedTest.java b/geode-pulse/src/uiTest/java/org/apache/geode/tools/pulse/tests/ui/PulseAutomatedTest.java
index 5af912c..3fc3f25 100644
--- a/geode-pulse/src/uiTest/java/org/apache/geode/tools/pulse/tests/ui/PulseAutomatedTest.java
+++ b/geode-pulse/src/uiTest/java/org/apache/geode/tools/pulse/tests/ui/PulseAutomatedTest.java
@@ -13,12 +13,6 @@
  * the License.
  *
  */
-/**
- * This test class contains automated tests for Pulse application related to 1. Different grid data
- * validations for example - Topology, Server Group, Redundancy Zone 2. Data Browser 3.
- *
- * @since GemFire 2014-04-02
- */
 package org.apache.geode.tools.pulse.tests.ui;
 
 import static org.apache.geode.tools.pulse.tests.ui.PulseTestUtils.assertMemberSortingByCpuUsage;
@@ -59,13 +53,16 @@
 import static org.apache.geode.tools.pulse.tests.ui.PulseTestUtils.verifyTextPresrntById;
 import static org.apache.geode.tools.pulse.tests.ui.PulseTestUtils.verifyTextPresrntByXpath;
 import static org.apache.geode.tools.pulse.tests.ui.PulseTestUtils.waitForElementWithId;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
+import java.util.Collection;
 import java.util.List;
+import java.util.stream.Collectors;
 
 import org.junit.Assert;
 import org.junit.Before;
@@ -84,6 +81,12 @@
 import org.apache.geode.tools.pulse.tests.rules.ServerRule;
 import org.apache.geode.tools.pulse.tests.rules.WebDriverRule;
 
+/**
+ * This test class contains automated tests for Pulse application related to 1. Different grid data
+ * validations for example - Topology, Server Group, Redundancy Zone 2. Data Browser 3.
+ *
+ * @since GemFire 2014-04-02
+ */
 public class PulseAutomatedTest extends PulseBase {
 
   @ClassRule
@@ -851,7 +854,7 @@
     clickElementUsingXpath(PulseTestLocators.DataBrowser.btnClearXpath);
     String editorTextAfterClear = getTextUsingId(PulseTestLocators.DataBrowser.queryEditorTxtBoxId);
 
-    assertFalse(PulseTestData.DataBrowser.query1Text.equals(editorTextAfterClear));
+    assertNotEquals(PulseTestData.DataBrowser.query1Text, editorTextAfterClear);
   }
 
   @Ignore("WIP") // Data Browser's Query History not showing any data on button click, therefore
@@ -892,10 +895,50 @@
     System.out.println("Query Text from History Table: " + queryText);
     System.out.println("Query Time from History Table: " + historyDateTime);
     // verify the query text, query datetime in history panel
-    assertTrue(DataBrowserResultLoader.QUERY_TYPE_ONE.equals(queryText));
-    assertTrue(historyDateTime.contains(queryTime[0]));
-
+    assertThat(queryText).isEqualTo(DataBrowserResultLoader.QUERY_TYPE_ONE);
+    assertThat(historyDateTime).contains(queryTime[0]);
   }
 
+  @Test
+  public void testDataBrowserHTMLEncode() {
+    // navigate to Data browser page
+    loadDataBrowserpage();
 
+    WebDriver driver = webDriverRule.getDriver();
+    List<WebElement> numOfReg = driver
+        .findElements(By.xpath(PulseTestLocators.DataBrowser.divDataRegions));
+
+    for (int i = 1; i <= numOfReg.size(); i++) {
+      if (getTextUsingId("treeDemo_" + i + "_span").equals(PulseTestData.DataBrowser.regName)) {
+        searchByIdAndClick("treeDemo_" + i + "_check"); // driver.findElement(By.id("treeDemo_" + i
+        // + "_check")).click();
+      }
+    }
+
+    sendKeysUsingId(PulseTestLocators.DataBrowser.queryEditorTxtBoxId,
+        DataBrowserResultLoader.QUERY_TYPE_EIGHT);
+    clickElementUsingId(PulseTestLocators.DataBrowser.btnExecuteQueryId);
+
+    clickElementUsingId(PulseTestLocators.DataBrowser.historyIcon);
+    String queryText = findElementByXpath(PulseTestLocators.DataBrowser.historyLst)
+        .findElement(By.cssSelector(PulseTestLocators.DataBrowser.queryText)).getText();
+
+    assertThat(queryText).isEqualTo(DataBrowserResultLoader.QUERY_TYPE_EIGHT);
+
+    List<WebElement> elements =
+        driver.findElements(By.xpath(PulseTestData.DataBrowser.resultClusterHeadingsXPath));
+    List<WebElement> filteredElements = elements.stream().filter(webElement -> webElement.getText()
+        .equals("org.apache.geode.cache.query.data.Portfolio")).collect(
+            Collectors.toList());
+    List<WebElement> finalElements = filteredElements.stream().map(webElement -> {
+      webElement.click();
+      return webElement.findElements(By.xpath(PulseTestData.DataBrowser.resultClusterCellXPath));
+    }).flatMap(Collection::stream).collect(Collectors.toList());
+
+    // confirm script text is displayed
+    assertThat(finalElements).hasSize(2);
+    finalElements.forEach(webElement -> {
+      assertThat(webElement.getAttribute("title")).isEqualTo("<script>alert('xss')</script>");
+    });
+  }
 }
diff --git a/geode-server-all/src/integrationTest/resources/dependency_classpath.txt b/geode-server-all/src/integrationTest/resources/dependency_classpath.txt
index 6af61df..c33e6ed 100644
--- a/geode-server-all/src/integrationTest/resources/dependency_classpath.txt
+++ b/geode-server-all/src/integrationTest/resources/dependency_classpath.txt
@@ -1,8 +1,8 @@
 spring-web-5.3.20.jar
-shiro-event-1.9.0.jar
-shiro-crypto-hash-1.9.0.jar
-shiro-crypto-cipher-1.9.0.jar
-shiro-config-core-1.9.0.jar
+shiro-event-1.9.1.jar
+shiro-crypto-hash-1.9.1.jar
+shiro-crypto-cipher-1.9.1.jar
+shiro-config-core-1.9.1.jar
 commons-digester-2.1.jar
 commons-validator-1.7.jar
 spring-jcl-5.3.20.jar
@@ -16,18 +16,18 @@
 javax.resource-api-1.7.1.jar
 LatencyUtils-2.0.3.jar
 jline-2.12.jar
-jetty-servlet-9.4.46.v20220331.jar
+jetty-servlet-9.4.47.v20220610.jar
 spring-core-5.3.20.jar
-jetty-util-ajax-9.4.46.v20220331.jar
+jetty-util-ajax-9.4.47.v20220610.jar
 geode-cq-0.0.0.jar
 geode-old-client-support-0.0.0.jar
 javax.servlet-api-3.1.0.jar
 jgroups-3.6.14.Final.jar
-shiro-cache-1.9.0.jar
+shiro-cache-1.9.1.jar
 httpcore-4.4.15.jar
 spring-beans-5.3.20.jar
 lucene-queries-6.6.6.jar
-shiro-core-1.9.0.jar
+shiro-core-1.9.1.jar
 HikariCP-4.0.3.jar
 slf4j-api-1.7.32.jar
 geode-http-service-0.0.0.jar
@@ -38,18 +38,18 @@
 lucene-core-6.6.6.jar
 fastutil-8.5.8.jar
 geode-gfsh-0.0.0.jar
-jetty-http-9.4.46.v20220331.jar
+jetty-http-9.4.47.v20220610.jar
 geode-memcached-0.0.0.jar
 rmiio-2.1.2.jar
 geode-tcp-server-0.0.0.jar
 log4j-jcl-2.17.2.jar
 geode-connectors-0.0.0.jar
 jackson-core-2.13.2.jar
-jetty-util-9.4.46.v20220331.jar
+jetty-util-9.4.47.v20220610.jar
 log4j-slf4j-impl-2.17.2.jar
 lucene-analyzers-common-6.6.6.jar
 geode-membership-0.0.0.jar
-jetty-webapp-9.4.46.v20220331.jar
+jetty-webapp-9.4.47.v20220610.jar
 commons-lang3-3.12.0.jar
 jopt-simple-5.0.4.jar
 swagger-annotations-2.2.0.jar
@@ -59,11 +59,11 @@
 geode-serialization-0.0.0.jar
 istack-commons-runtime-4.0.1.jar
 lucene-queryparser-6.6.6.jar
-jetty-io-9.4.46.v20220331.jar
+jetty-io-9.4.47.v20220610.jar
 geode-deployment-legacy-0.0.0.jar
 commons-beanutils-1.9.4.jar
 log4j-core-2.17.2.jar
-shiro-crypto-core-1.9.0.jar
+shiro-crypto-core-1.9.1.jar
 jaxb-api-2.3.1.jar
 geode-unsafe-0.0.0.jar
 spring-shell-1.2.0.RELEASE.jar
@@ -73,20 +73,20 @@
 HdrHistogram-2.1.12.jar
 jackson-annotations-2.13.2.jar
 micrometer-core-1.9.0.jar
-shiro-config-ogdl-1.9.0.jar
+shiro-config-ogdl-1.9.1.jar
 geode-log4j-0.0.0.jar
 lucene-analyzers-phonetic-6.6.6.jar
 spring-context-5.3.20.jar
-jetty-security-9.4.46.v20220331.jar
+jetty-security-9.4.47.v20220610.jar
 geode-logging-0.0.0.jar
 commons-io-2.11.0.jar
-shiro-lang-1.9.0.jar
+shiro-lang-1.9.1.jar
 javax.transaction-api-1.3.jar
 geode-common-0.0.0.jar
 antlr-2.7.7.jar
-jetty-xml-9.4.46.v20220331.jar
+jetty-xml-9.4.47.v20220610.jar
 geode-rebalancer-0.0.0.jar
-jetty-server-9.4.46.v20220331.jar
+jetty-server-9.4.47.v20220610.jar
 jackson-datatype-jsr310-2.13.2.jar
 jackson-datatype-joda-2.13.2.jar
 joda-time-2.10.14.jar
\ No newline at end of file
diff --git a/geode-wan/src/distributedTest/java/org/apache/geode/internal/cache/wan/WANTestBase.java b/geode-wan/src/distributedTest/java/org/apache/geode/internal/cache/wan/WANTestBase.java
index ce7d19a..89daabc 100644
--- a/geode-wan/src/distributedTest/java/org/apache/geode/internal/cache/wan/WANTestBase.java
+++ b/geode-wan/src/distributedTest/java/org/apache/geode/internal/cache/wan/WANTestBase.java
@@ -917,6 +917,13 @@
     createCache(true, locPort);
   }
 
+  public static void createCacheConserveSocketsInVMs(Boolean conserveSockets, Integer locPort,
+      VM... vms) {
+    for (VM vm : vms) {
+      vm.invoke(() -> createCacheConserveSockets(conserveSockets, locPort));
+    }
+  }
+
   public static void createCacheConserveSockets(Boolean conserveSockets, Integer locPort) {
     WANTestBase test = new WANTestBase();
     Properties props = test.getDistributedSystemProperties();
diff --git a/geode-wan/src/distributedTest/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderOperationsDistributedTest.java b/geode-wan/src/distributedTest/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderOperationsDistributedTest.java
index bc7ea22..4e3e485 100644
--- a/geode-wan/src/distributedTest/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderOperationsDistributedTest.java
+++ b/geode-wan/src/distributedTest/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderOperationsDistributedTest.java
@@ -59,6 +59,7 @@
 import org.apache.geode.cache.wan.GatewayEventFilter;
 import org.apache.geode.cache.wan.GatewaySender;
 import org.apache.geode.distributed.internal.ClusterDistributionManager;
+import org.apache.geode.distributed.internal.ClusterOperationExecutors;
 import org.apache.geode.distributed.internal.DistributionMessage;
 import org.apache.geode.distributed.internal.DistributionMessageObserver;
 import org.apache.geode.internal.cache.BucketRegion;
@@ -347,6 +348,66 @@
   }
 
   /**
+   * Verifies that no distributed deadlock occurs when stopping a gateway sender while receiving
+   * traffic.
+   * The distributed deadlock may occur when the gateway sender tries to get the
+   * size of the gateway sender queue (sending a size message to other members) while holding the
+   * lifeCycleLock lock. This lock is also taken when an event is to be distributed by the gateway
+   * sender.
+   * As this issue has only been observed in the field with a lot of traffic, in order to reproduce
+   * it in a test case, conserve-sockets is set to true (although the deadlock has also
+   * been seen with conserve-sockets=false), the size of the PartitionedRegion thread pool is set
+   * to a small value and an artificial timeout is added at a point in the distribute() call
+   * of the AbstractGatewaySeder class.
+   */
+  @Test
+  public void testNoDistributedDeadlockWithGatewaySenderStop() throws Exception {
+    addIgnoredException("Broken pipe");
+    Integer[] locatorPorts = createLNAndNYLocators();
+    Integer lnPort = locatorPorts[0];
+    Integer nyPort = locatorPorts[1];
+    VM[] senders = {vm4, vm5, vm6, vm7};
+    try {
+      for (VM sender : senders) {
+        sender.invoke(() -> AbstractGatewaySender.doSleepForTestingInDistribute.set(true));
+        sender.invoke(() -> ClusterOperationExecutors.maxPrThreadsForTest.set(2));
+      }
+      vm2.invoke(() -> ClusterOperationExecutors.maxPrThreadsForTest.set(2));
+      vm3.invoke(() -> ClusterOperationExecutors.maxPrThreadsForTest.set(2));
+
+      createSendersReceiversAndPartitionedRegion(lnPort, nyPort, false, true, true);
+
+      // make sure all the senders are running before doing any puts
+      waitForSendersRunning();
+
+      // Send a fairly big amount of operations to provoke the deadlock
+      int invocationsPerServer = 4;
+      AsyncInvocation[] invocations = new AsyncInvocation[senders.length * invocationsPerServer];
+      for (int i = 0; i < senders.length; i++) {
+        for (int j = 0; j < invocationsPerServer; j++) {
+          invocations[i + (j * invocationsPerServer)] =
+              senders[i].invokeAsync(() -> doPuts(getUniqueName() + "_PR", 100));
+        }
+      }
+
+      // Wait for some elements to be replicated before stopping the senders
+      for (int i = 0; i < senders.length; i++) {
+        senders[i].invoke(() -> await()
+            .untilAsserted(() -> assertThat(getSenderStats("ln", -1).get(3)).isGreaterThan(1)));
+      }
+
+      stopSendersAsync();
+      for (int i = 0; i < invocations.length; i++) {
+        invocations[i].await();
+      }
+    } finally {
+      for (int i = 0; i < senders.length; i++) {
+        senders[i].invoke(() -> AbstractGatewaySender.doSleepForTestingInDistribute.set(false));
+      }
+    }
+  }
+
+  /**
    * Normal scenario in which a sender is stopped and then started again.
    */
   @Test
@@ -1271,7 +1332,13 @@
 
   private void createSendersReceiversAndPartitionedRegion(Integer lnPort, Integer nyPort,
       boolean createAccessors, boolean startSenders) {
-    createSendersAndReceivers(lnPort, nyPort);
+    createSendersReceiversAndPartitionedRegion(lnPort, nyPort, createAccessors, startSenders,
+        false);
+  }
+
+  private void createSendersReceiversAndPartitionedRegion(Integer lnPort, Integer nyPort,
+      boolean createAccessors, boolean startSenders, boolean conserveSockets) {
+    createSendersAndReceivers(lnPort, nyPort, conserveSockets);
 
     createPartitionedRegions(createAccessors);
 
@@ -1280,11 +1347,11 @@
     }
   }
 
-  private void createSendersAndReceivers(Integer lnPort, Integer nyPort) {
-    createCacheInVMs(nyPort, vm2, vm3);
+  private void createSendersAndReceivers(Integer lnPort, Integer nyPort, boolean conserveSockets) {
+    createCacheConserveSocketsInVMs(conserveSockets, nyPort, vm2, vm3);
     createReceiverInVMs(vm2, vm3);
 
-    createCacheInVMs(lnPort, vm4, vm5, vm6, vm7);
+    createCacheConserveSocketsInVMs(conserveSockets, lnPort, vm4, vm5, vm6, vm7);
 
     vm4.invoke(() -> createSender("ln", 2, true, 100, 10, false, false, null, true));
     vm5.invoke(() -> createSender("ln", 2, true, 100, 10, false, false, null, true));
@@ -1578,6 +1645,17 @@
     vm7.invoke(() -> stopSender("ln"));
   }
 
+  private void stopSendersAsync() throws InterruptedException {
+    AsyncInvocation inv1 = vm4.invokeAsync(() -> stopSender("ln"));
+    AsyncInvocation inv2 = vm5.invokeAsync(() -> stopSender("ln"));
+    AsyncInvocation inv3 = vm6.invokeAsync(() -> stopSender("ln"));
+    AsyncInvocation inv4 = vm7.invokeAsync(() -> stopSender("ln"));
+    inv1.await();
+    inv2.await();
+    inv3.await();
+    inv4.await();
+  }
+
   private void waitForSendersRunning() {
     vm4.invoke(() -> waitForSenderRunningState("ln"));
     vm5.invoke(() -> waitForSenderRunningState("ln"));
diff --git a/geode-wan/src/distributedTest/java/org/apache/geode/internal/cache/wan/serial/InternalConflictResolutionReplicateRegionWithSerialGwsDistributedTest.java b/geode-wan/src/distributedTest/java/org/apache/geode/internal/cache/wan/serial/InternalConflictResolutionReplicateRegionWithSerialGwsDistributedTest.java
new file mode 100644
index 0000000..c940ade
--- /dev/null
+++ b/geode-wan/src/distributedTest/java/org/apache/geode/internal/cache/wan/serial/InternalConflictResolutionReplicateRegionWithSerialGwsDistributedTest.java
@@ -0,0 +1,333 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.geode.internal.cache.wan.serial;
+
+import static org.apache.geode.distributed.ConfigurationProperties.DISTRIBUTED_SYSTEM_ID;
+import static org.apache.geode.distributed.ConfigurationProperties.REMOTE_LOCATORS;
+import static org.apache.geode.internal.cache.wan.wancommand.WANCommandUtils.validateGatewaySenderMXBeanProxy;
+import static org.apache.geode.internal.cache.wan.wancommand.WANCommandUtils.verifySenderState;
+import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.Serializable;
+import java.util.AbstractMap;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.Set;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.geode.cache.CacheWriter;
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.EntryEvent;
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.RegionEvent;
+import org.apache.geode.cache.wan.GatewayReceiver;
+import org.apache.geode.cache.wan.GatewaySender;
+import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
+import org.apache.geode.internal.cache.InternalRegion;
+import org.apache.geode.internal.cache.RegionQueue;
+import org.apache.geode.internal.cache.wan.AbstractGatewaySender;
+import org.apache.geode.management.internal.cli.util.CommandStringBuilder;
+import org.apache.geode.management.internal.i18n.CliStrings;
+import org.apache.geode.test.dunit.VM;
+import org.apache.geode.test.dunit.rules.ClientVM;
+import org.apache.geode.test.dunit.rules.ClusterStartupRule;
+import org.apache.geode.test.dunit.rules.MemberVM;
+import org.apache.geode.test.junit.categories.WanTest;
+import org.apache.geode.test.junit.rules.GfshCommandRule;
+
+@Category({WanTest.class})
+public class InternalConflictResolutionReplicateRegionWithSerialGwsDistributedTest
+    implements Serializable {
+
+  @Rule
+  public ClusterStartupRule clusterStartupRule = new ClusterStartupRule(9);
+
+  @Rule
+  public transient GfshCommandRule gfsh = new GfshCommandRule();
+
+  public static boolean ENTRY_CONFLICT_WINNER_HAS_REACHED_THE_REDUNDANT_SERVER;
+
+  private MemberVM locator1Site2;
+
+  private MemberVM server1Site1;
+  private MemberVM server2Site1;
+
+  private MemberVM server1Site2;
+  private MemberVM server2Site2;
+
+  private int server1Site2Port;
+  private int server2Site2Port;
+
+  private ClientVM clientConnectedToServer1Site2;
+  private ClientVM clientConnectedToServer2Site2;
+
+  private static final String DISTRIBUTED_SYSTEM_ID_SITE1 = "1";
+  private static final String DISTRIBUTED_SYSTEM_ID_SITE2 = "2";
+  private static final String REGION_NAME = "test1";
+
+  private static final String GATEWAY_SENDER_ID = "ln";
+
+  private final Map.Entry<Integer, Integer> ENTRY_INITIAL = new AbstractMap.SimpleEntry<>(1, 0);
+  private final Map.Entry<Integer, Integer> ENTRY_CONFLICT_RESOLUTION_WINNER =
+      new AbstractMap.SimpleEntry<>(1, 1);
+  private final Map.Entry<Integer, Integer> ENTRY_CONFLICT_RESOLUTION_LOSER =
+      new AbstractMap.SimpleEntry<>(1, 2);
+
+  @Before
+  public void setupMultiSite() throws Exception {
+    Properties props = new Properties();
+    props.setProperty(DISTRIBUTED_SYSTEM_ID, DISTRIBUTED_SYSTEM_ID_SITE1);
+    MemberVM locator1Site1 = clusterStartupRule.startLocatorVM(0, props);
+    MemberVM locator2Site1 = clusterStartupRule.startLocatorVM(1, props, locator1Site1.getPort());
+
+    // start servers for site #1
+    server1Site1 =
+        clusterStartupRule.startServerVM(2, locator1Site1.getPort(), locator2Site1.getPort());
+    server2Site1 =
+        clusterStartupRule.startServerVM(3, locator1Site1.getPort(), locator2Site1.getPort());
+    connectGfshToSite(locator1Site1);
+
+    // create partition region on site #1
+    CommandStringBuilder regionCmd = new CommandStringBuilder(CliStrings.CREATE_REGION);
+    regionCmd.addOption(CliStrings.CREATE_REGION__REGION, REGION_NAME);
+    regionCmd.addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT, "REPLICATE");
+
+    gfsh.executeAndAssertThat(regionCmd.toString()).statusIsSuccess();
+
+    String csb = new CommandStringBuilder(CliStrings.CREATE_GATEWAYRECEIVER)
+        .addOption(CliStrings.CREATE_GATEWAYRECEIVER__BINDADDRESS, "localhost")
+        .getCommandString();
+
+    gfsh.executeAndAssertThat(csb).statusIsSuccess();
+
+    server1Site1.invoke(
+        InternalConflictResolutionReplicateRegionWithSerialGwsDistributedTest::verifyReceiverState);
+    server2Site1.invoke(
+        InternalConflictResolutionReplicateRegionWithSerialGwsDistributedTest::verifyReceiverState);
+
+    props.setProperty(DISTRIBUTED_SYSTEM_ID, DISTRIBUTED_SYSTEM_ID_SITE2);
+    props.setProperty(REMOTE_LOCATORS,
+        "localhost[" + locator1Site1.getPort() + "],localhost[" + locator2Site1.getPort() + "]");
+    locator1Site2 = clusterStartupRule.startLocatorVM(5, props);
+
+    // start servers for site #2
+    server1Site2 = clusterStartupRule.startServerVM(6, locator1Site2.getPort());
+    server2Site2 = clusterStartupRule.startServerVM(7, locator1Site2.getPort());
+
+    server2Site2Port = server2Site2.getPort();
+    server1Site2Port = server1Site2.getPort();
+
+    // create gateway-sender on site #2
+    connectGfshToSite(locator1Site2);
+    String command = new CommandStringBuilder(CliStrings.CREATE_GATEWAYSENDER)
+        .addOption(CliStrings.MEMBERS, server2Site2.getName())
+        .addOption(CliStrings.CREATE_GATEWAYSENDER__ID, GATEWAY_SENDER_ID)
+        .addOption(CliStrings.CREATE_GATEWAYSENDER__REMOTEDISTRIBUTEDSYSTEMID, "1")
+        .addOption(CliStrings.CREATE_GATEWAYSENDER__PARALLEL, "false")
+        .addOption(CliStrings.CREATE_GATEWAYSENDER__ENABLEBATCHCONFLATION, "true")
+        .getCommandString();
+    gfsh.executeAndAssertThat(command).statusIsSuccess();
+
+    verifyGatewaySenderState(server2Site2, false);
+
+    executeGatewaySenderActionCommandSite2(CliStrings.PAUSE_GATEWAYSENDER);
+
+    // create partition region on site #2
+    regionCmd = new CommandStringBuilder(CliStrings.CREATE_REGION);
+    regionCmd.addOption(CliStrings.CREATE_REGION__REGION, REGION_NAME);
+    regionCmd.addOption(CliStrings.CREATE_REGION__REGIONSHORTCUT, "REPLICATE");
+    regionCmd.addOption(CliStrings.CREATE_REGION__GATEWAYSENDERID, GATEWAY_SENDER_ID);
+    gfsh.executeAndAssertThat(regionCmd.toString()).statusIsSuccess();
+  }
+
+  @Test
+  public void testEventIsNotConflatedWhenConcurrentModificationIsDetected() throws Exception {
+    startClientToServer1Site2(server1Site2Port);
+    startClientToServer2Site2(server2Site2Port);
+
+    clientConnectedToServer2Site2.invoke(() -> executePutOperation(ENTRY_INITIAL));
+    waitUntilEventIsConsistentlyReplicatedAcrossServers(ENTRY_INITIAL, server1Site2, server2Site2);
+
+    // Configure cache writer on server to delay writing of entry in order to provoke
+    // the internal conflict
+    server1Site2.invoke(() -> {
+      InternalRegion region =
+          ClusterStartupRule.getCache().getInternalRegionByPath("/" + REGION_NAME);
+      region.getAttributesMutator().setCacheWriter(new TestCacheWriterDelayWritingOfEntry(
+          ENTRY_CONFLICT_RESOLUTION_WINNER, ENTRY_CONFLICT_RESOLUTION_LOSER));
+    });
+
+    clientConnectedToServer2Site2.invokeAsync(() -> executePutOperation(
+        ENTRY_CONFLICT_RESOLUTION_WINNER));
+
+    server1Site2.invoke(() -> await().untilAsserted(() -> assertThat(
+        InternalConflictResolutionReplicateRegionWithSerialGwsDistributedTest.ENTRY_CONFLICT_WINNER_HAS_REACHED_THE_REDUNDANT_SERVER)
+            .isTrue()));
+
+    clientConnectedToServer1Site2.invokeAsync(() -> executePutOperation(
+        ENTRY_CONFLICT_RESOLUTION_LOSER));
+
+    // Check that expected entry has won the internal conflict resolution
+    waitUntilEventIsConsistentlyReplicatedAcrossServers(ENTRY_CONFLICT_RESOLUTION_WINNER,
+        server1Site2,
+        server2Site2);
+
+    server2Site2.invoke(
+        InternalConflictResolutionReplicateRegionWithSerialGwsDistributedTest::awaitQueueSize);
+    executeGatewaySenderActionCommandSite2(CliStrings.RESUME_GATEWAYSENDER);
+
+    // check that expected event is replicated to the remote cluster
+    waitUntilEventIsConsistentlyReplicatedAcrossServers(ENTRY_CONFLICT_RESOLUTION_WINNER,
+        server1Site1,
+        server2Site1);
+  }
+
+  private void waitUntilEventIsConsistentlyReplicatedAcrossServers(
+      final Map.Entry<Integer, Integer> entry,
+      MemberVM... servers) {
+    await().untilAsserted(() -> isEventIsConsistentlyReplicatedAcrossServers(entry, servers));
+  }
+
+  private static void isEventIsConsistentlyReplicatedAcrossServers(
+      final Map.Entry<Integer, Integer> entry,
+      MemberVM... servers) {
+    for (MemberVM server : servers) {
+      assertThat(server.invoke(() -> doesEventExistOnServer(entry))).isTrue();
+    }
+  }
+
+  private static boolean doesEventExistOnServer(Map.Entry<Integer, Integer> entry) {
+    Region<Integer, Integer> region =
+        ClusterStartupRule.getCache().getRegion("/" + REGION_NAME);
+    return Objects.equals(region.get(entry.getKey()), entry.getValue());
+  }
+
+  private void executeGatewaySenderActionCommandSite2(final String action) throws Exception {
+    connectGfshToSite(locator1Site2);
+    CommandStringBuilder regionCmd = new CommandStringBuilder(action);
+    regionCmd.addOption(CliStrings.MEMBERS, server2Site2.getName());
+    regionCmd.addOption(CliStrings.PAUSE_GATEWAYSENDER__ID, GATEWAY_SENDER_ID);
+    gfsh.executeAndAssertThat(regionCmd.toString()).statusIsSuccess();
+
+    verifyGatewaySenderState(server2Site2, CliStrings.PAUSE_GATEWAYSENDER.equals(action));
+  }
+
+  private void executePutOperation(Map.Entry<Integer, Integer> entry) {
+    Region<Integer, Integer> region =
+        ClusterStartupRule.clientCacheRule.getCache().getRegion(REGION_NAME);
+    region.put(entry.getKey(), entry.getValue());
+  }
+
+  private static void awaitQueueSize() {
+    await()
+        .untilAsserted(() -> validateQueueSize(
+            InternalConflictResolutionReplicateRegionWithSerialGwsDistributedTest.GATEWAY_SENDER_ID,
+            3));
+  }
+
+  private static void validateQueueSize(String senderId, int numQueueEntries) {
+    GatewaySender sender = ClusterStartupRule.getCache().getGatewaySender(senderId);
+    Set<RegionQueue> queues = ((AbstractGatewaySender) sender).getQueues();
+    int size = 0;
+    for (RegionQueue q : queues) {
+      size += q.size();
+    }
+    assertThat(size).isEqualTo(numQueueEntries);
+  }
+
+  private static void verifyReceiverState() {
+    Set<GatewayReceiver> receivers = ClusterStartupRule.getCache().getGatewayReceivers();
+    for (GatewayReceiver receiver : receivers) {
+      assertThat(receiver.isRunning()).isEqualTo(true);
+    }
+  }
+
+  private void verifyGatewaySenderState(MemberVM memberVM, boolean isPaused) {
+    memberVM.invoke(() -> verifySenderState(GATEWAY_SENDER_ID, true, isPaused));
+    locator1Site2.invoke(
+        () -> validateGatewaySenderMXBeanProxy(getMember(memberVM.getVM()), GATEWAY_SENDER_ID, true,
+            isPaused));
+  }
+
+  private static InternalDistributedMember getMember(final VM vm) {
+    return vm.invoke(() -> ClusterStartupRule.getCache().getMyId());
+  }
+
+  private void startClientToServer1Site2(final int serverPort) throws Exception {
+    clientConnectedToServer1Site2 =
+        clusterStartupRule.startClientVM(8, c -> c.withServerConnection(serverPort));
+    clientConnectedToServer1Site2.invoke(() -> {
+      ClusterStartupRule.clientCacheRule.createProxyRegion(REGION_NAME);
+    });
+  }
+
+  private void startClientToServer2Site2(final int serverPort) throws Exception {
+    clientConnectedToServer2Site2 =
+        clusterStartupRule.startClientVM(4, c -> c.withServerConnection(serverPort));
+    clientConnectedToServer2Site2.invoke(() -> {
+      ClusterStartupRule.clientCacheRule.createProxyRegion(REGION_NAME);
+    });
+  }
+
+  private void connectGfshToSite(MemberVM locator) throws Exception {
+    if (gfsh.isConnected()) {
+      gfsh.disconnect();
+    }
+    gfsh.connectAndVerify(locator);
+  }
+
+  public static class TestCacheWriterDelayWritingOfEntry<K, V> implements CacheWriter<K, V> {
+    private final Map.Entry<Integer, Integer> entryToDelay;
+
+    private final Map.Entry<Integer, Integer> waitUntilEntry;
+
+    public TestCacheWriterDelayWritingOfEntry(Map.Entry<Integer, Integer> entryToDelay,
+        Map.Entry<Integer, Integer> waitUntilEntry) {
+      this.entryToDelay = entryToDelay;
+      this.waitUntilEntry = waitUntilEntry;
+    }
+
+    @Override
+    public void beforeUpdate(EntryEvent<K, V> event) throws CacheWriterException {
+      Region<Integer, Integer> region = ClusterStartupRule.getCache().getRegion("/" + REGION_NAME);
+      int value = (Integer) event.getNewValue();
+      int key = (Integer) event.getKey();
+      if (key == entryToDelay.getKey() && value == entryToDelay.getValue()) {
+        InternalConflictResolutionReplicateRegionWithSerialGwsDistributedTest.ENTRY_CONFLICT_WINNER_HAS_REACHED_THE_REDUNDANT_SERVER =
+            true;
+        await().untilAsserted(() -> assertThat(region.get(waitUntilEntry.getKey()))
+            .isEqualTo(waitUntilEntry.getValue()));
+      }
+    }
+
+    @Override
+    public void beforeCreate(EntryEvent<K, V> event) throws CacheWriterException {}
+
+    @Override
+    public void beforeDestroy(EntryEvent<K, V> event) throws CacheWriterException {}
+
+    @Override
+    public void beforeRegionDestroy(RegionEvent<K, V> event) throws CacheWriterException {}
+
+    @Override
+    public void beforeRegionClear(RegionEvent<K, V> event) throws CacheWriterException {}
+  }
+}
diff --git a/gradle.properties b/gradle.properties
index bbf7c8e..2c72e62 100755
--- a/gradle.properties
+++ b/gradle.properties
@@ -26,7 +26,7 @@
 #   <blank>   - release
 #
 # The full version string consists of 'versionNumber + releaseQualifier + releaseType'
-version = 1.15.0-build.0
+version = 1.15.1-build.0
 
 # Default Maven targets
 mavenSnapshotUrl = gcs://maven.apachegeode-ci.info/snapshots
diff --git a/settings.gradle b/settings.gradle
index 74b2578..c4d9f38 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -109,7 +109,8 @@
  '1.13.1',
  '1.13.8',
  '1.14.0', // Include for SSL protocol configuration changes in 1.14.0
- '1.14.4'].each {
+ '1.14.4',
+ '1.15.0'].each {
   include 'geode-old-versions:'.concat(it)
 }