GEODE-8778: Change conserve-sockets default value to false (#5832)

- Update certain tests to explicitly set conserve-sockets=true to allow
them to pass
- Update docs to reflect new default
- Add ignored exception to test, do not explicitly set conserve-sockets
- Add comment to explain use of conserve-sockets=true in test

Authored-by: Donal Evans <doevans@vmware.com>
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/distributed/internal/ClusterDistributionManagerDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/distributed/internal/ClusterDistributionManagerDUnitTest.java
index 3e3ab70..78a9a89 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/distributed/internal/ClusterDistributionManagerDUnitTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/distributed/internal/ClusterDistributionManagerDUnitTest.java
@@ -294,6 +294,8 @@
     Region<String, String> region = regionFactory.create("testRegion");
 
     addIgnoredException("elapsed while waiting for replies");
+    // Ignore logging from Connection.doSevereAlertProcessing()
+    addIgnoredException("seconds have elapsed waiting for a response from");
     vm1.invoke("Connect to distributed system", () -> {
       config.setProperty(NAME, "sleeper");
       getSystem(config);
@@ -345,6 +347,8 @@
     Region<String, String> region = regionFactory.create("testRegion");
 
     addIgnoredException("sec have elapsed while waiting for replies");
+    // Ignore logging from Connection.doSevereAlertProcessing()
+    addIgnoredException("seconds have elapsed waiting for a response from");
 
     vm1.invoke(new SerializableRunnable("Connect to distributed system") {
       @Override
diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/CloseConnectionTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/CloseConnectionTest.java
index 78a5153..cdb5432 100644
--- a/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/CloseConnectionTest.java
+++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/tcp/CloseConnectionTest.java
@@ -14,6 +14,7 @@
  */
 package org.apache.geode.internal.tcp;
 
+import static org.apache.geode.distributed.ConfigurationProperties.CONSERVE_SOCKETS;
 import static org.apache.geode.test.awaitility.GeodeAwaitility.await;
 import static org.apache.geode.test.dunit.Disconnect.disconnectAllFromDS;
 import static org.apache.geode.test.dunit.VM.getController;
@@ -40,14 +41,19 @@
 import org.apache.geode.test.dunit.rules.DistributedRule;
 
 public class CloseConnectionTest implements Serializable {
+  private static final long serialVersionUID = 3692493564204797623L;
   private VM vm0;
   private VM vm1;
 
   @Rule
   public DistributedRule distributedRule = new DistributedRule();
 
+  /**
+   * The test case in this class requires that conserve-sockets=true in order for the connections to
+   * be shared.
+   */
   @Rule
-  public CacheRule cacheRule = new CacheRule();
+  public CacheRule cacheRule = new CacheRule.Builder().addConfig(CONSERVE_SOCKETS, "true").build();
 
   @Before
   public void setUp() {
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/ConfigurationProperties.java b/geode-core/src/main/java/org/apache/geode/distributed/ConfigurationProperties.java
index 22a8fbc..1955210 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/ConfigurationProperties.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/ConfigurationProperties.java
@@ -338,7 +338,7 @@
    * "false" then every application thread that sends distribution messages to other members of the
    * distributed system will own its own sockets and have exclusive access to them. The length of
    * time a thread can have exclusive access to a socket can be configured with "socket-lease-time".
-   * <U>Default</U>: "true"
+   * <U>Default</U>: "false"
    * </p>
    * <U>Allowed values</U>: true|false
    * </p>
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionConfig.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionConfig.java
index 9aad592..6f6f523 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionConfig.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionConfig.java
@@ -1911,7 +1911,7 @@
   /**
    * The default value of the {@link ConfigurationProperties#CONSERVE_SOCKETS} property
    */
-  boolean DEFAULT_CONSERVE_SOCKETS = true;
+  boolean DEFAULT_CONSERVE_SOCKETS = false;
 
   /**
    * Returns the value of the {@link ConfigurationProperties#ROLES} property
diff --git a/geode-docs/managing/logging/logging_categories.html.md.erb b/geode-docs/managing/logging/logging_categories.html.md.erb
index 9ed450e..66b0854 100644
--- a/geode-docs/managing/logging/logging_categories.html.md.erb
+++ b/geode-docs/managing/logging/logging_categories.html.md.erb
@@ -211,7 +211,7 @@
     bind-address=""
     cache-xml-file="cache.xml"
     conflate-events="server"
-    conserve-sockets="true"
+    conserve-sockets="false"
       ...
     socket-buffer-size="32768"
     socket-lease-time="60000"
diff --git a/geode-docs/managing/monitor_tune/socket_communication_have_enough_sockets.html.md.erb b/geode-docs/managing/monitor_tune/socket_communication_have_enough_sockets.html.md.erb
index 1c52ed5..644c0f3 100644
--- a/geode-docs/managing/monitor_tune/socket_communication_have_enough_sockets.html.md.erb
+++ b/geode-docs/managing/monitor_tune/socket_communication_have_enough_sockets.html.md.erb
@@ -30,7 +30,7 @@
 - **Peer-to-peer**. You can configure whether your members share sockets both at the application
 level and at the thread level. To enable sharing at the application level, set the
 `gemfire.properties` property `conserve-sockets` to `true`. To achieve maximum throughput, however, we
-recommend that you set `conserve-sockets` to `false`.
+recommend that you use the default value of `false`.
 
     At the thread level, developers can override this setting by using the DistributedSystem API method `setThreadsSocketPolicy`. You might want to enable socket sharing at the application level and then have threads that do a lot of cache work take sole ownership of their sockets. Make sure to program these threads to release their sockets as soon as possible using the `releaseThreadsSockets` method, rather than waiting for a timeout or thread death.
 
diff --git a/geode-docs/managing/monitor_tune/sockets_and_gateways.html.md.erb b/geode-docs/managing/monitor_tune/sockets_and_gateways.html.md.erb
index 8e1e587..b21594d 100644
--- a/geode-docs/managing/monitor_tune/sockets_and_gateways.html.md.erb
+++ b/geode-docs/managing/monitor_tune/sockets_and_gateways.html.md.erb
@@ -73,7 +73,7 @@
     ```
 
 **Note:**
-WAN deployments increase the messaging demands on a <%=vars.product_name%> system. To avoid hangs related to WAN messaging, always set `conserve-sockets=false` for GemFire members that participate in a WAN deployment.
+WAN deployments increase the messaging demands on a <%=vars.product_name%> system. To avoid hangs related to WAN messaging, always use the default setting of <code class="ph codeph">conserve-sockets=false</code> for <%=vars.product_name%> members that participate in a WAN deployment.
 
 ## <a id="socket_comm__section_4A7C60D4471A4339884AA5AAC97B4DAA" class="no-quick-link"></a>Multi-site (WAN) Socket Requirements
 
diff --git a/geode-docs/reference/topics/gemfire_properties.html.md.erb b/geode-docs/reference/topics/gemfire_properties.html.md.erb
index 8de4d71..3386423 100644
--- a/geode-docs/reference/topics/gemfire_properties.html.md.erb
+++ b/geode-docs/reference/topics/gemfire_properties.html.md.erb
@@ -129,9 +129,9 @@
 </tr>
 <tr>
 <td>conserve-sockets</td>
-<td>Specifies whether sockets are shared by the system member’s threads. If true, threads share, and a minimum number of sockets are used to connect to the cluster. If false, every application thread has its own sockets for distribution purposes. You can override this setting for individual threads inside your application. Where possible, it is better to set conserve-sockets to true and enable the use of specific extra sockets in the application code if needed. WAN deployments increase the messaging demands on a <%=vars.product_name%> system. To avoid hangs related to WAN messaging, always set <code class="ph codeph">conserve-sockets=false</code> for <%=vars.product_name%> members that participate in a WAN deployment.</td>
+<td>Specifies whether sockets are shared by the system member’s threads. If true, threads share, and a minimum number of sockets are used to connect to the cluster. If false, every application thread has its own sockets for distribution purposes. You can override this setting for individual threads inside your application. WAN deployments increase the messaging demands on a <%=vars.product_name%> system. To avoid hangs related to WAN messaging, always use the default setting of <code class="ph codeph">conserve-sockets=false</code> for <%=vars.product_name%> members that participate in a WAN deployment.</td>
 <td>S, L</td>
-<td>true</td>
+<td>false</td>
 </tr>
 <tr>
 <td>delta-propagation</td>
diff --git a/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb b/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb
index ba44676..bf6dfd2 100644
--- a/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb
+++ b/geode-docs/reference/topics/memory_requirements_for_cache_data.html.md.erb
@@ -224,16 +224,16 @@
 
 Servers always maintain two outgoing connections to each of their peers. So for each peer a server has, there are four total connections: two going out to the peer and two coming in from the peer.
 
-The server threads that service client requests also communicate with peers to distribute events and forward client requests. If the server's <%=vars.product_name%> connection property *conserve-sockets* is set to true (the default), these threads use the already-established peer connections for this communication.
+The server threads that service client requests also communicate with peers to distribute events and forward client requests. If the server's <%=vars.product_name%> connection property *conserve-sockets* is set to true, these threads use the already-established peer connections for this communication.
 
-If *conserve-sockets* is false, each thread that services clients establishes two of its own individual connections to its server peers, one to send, and one to receive. Each socket uses a file descriptor, so the number of available sockets is governed by two operating system settings:
+If *conserve-sockets* is false (the default), each thread that services clients establishes two of its own individual connections to its server peers, one to send, and one to receive. Each socket uses a file descriptor, so the number of available sockets is governed by two operating system settings:
 
 -   maximum open files allowed on the system as a whole
 -   maximum open files allowed for each session
 
 In servers with many threads servicing clients, if *conserve-sockets* is set to false, the demand for connections can easily overrun the number of available sockets. Even with *conserve-sockets* set to false, you can cap the number of these connections by setting the server's *max-threads* parameter.
 
-Since each client connection takes one server socket on a thread to handle the connection, and since that server acts as a proxy on partitioned regions to get results, or execute the function service on behalf of the client, for partitioned regions, if conserve sockets is set to false, this also results in a new socket on the server being opened to each peer. Thus N sockets are opened, where N is the number of peers. Large number of clients simultaneously connecting to a large set of peers with a partitioned region with conserve sockets set to false can cause a huge amount of memory to be consumed by socket. Set conserve-sockets to true in these instances.
+Since each client connection takes one server socket on a thread to handle the connection, and since that server acts as a proxy on partitioned regions to get results, or execute the function service on behalf of the client, for partitioned regions, if conserve-sockets is set to false, this also results in a new socket on the server being opened to each peer. Thus N sockets are opened, where N is the number of peers. Large number of clients simultaneously connecting to a large set of peers with a partitioned region with conserve sockets set to false can cause a large amount of memory to be consumed by sockets.
 
 **Note:**
 There is also JVM overhead for the thread stack for each client connection being processed, set at 256KB or 512KB for most JVMs . On some JVMs you can reduce it to 128KB. You can use the <%=vars.product_name%> `max-threads` property or the <%=vars.product_name%> `max-connections` property to limit the number of client threads and thus both thread overhead and socket overhead.
@@ -262,15 +262,15 @@
 <td>= (lesser of max-threads property on server or max-connections)* (socket buffer size +thread overhead for the JVM )</td>
 </tr>
 <tr>
-<td>Per member of the cluster if conserve sockets is set to true</td>
+<td>Per member of the cluster if conserve-sockets is set to true</td>
 <td>4* number of peers</td>
 </tr>
 <tr>
-<td>Per member, if conserve sockets is set to false</td>
+<td>Per member, if conserve-sockets is set to false</td>
 <td>4 * number of peers hosting that region* number of threads</td>
 </tr>
 <tr>
-<td>If member hosts a Partitioned Region, If conserve sockets set to false and it is a Server (this is cumulative with the above)</td>
+<td>If member hosts a Partitioned Region, If conserve-sockets set to false and it is a Server (this is cumulative with the above)</td>
 <td><p>=&lt; max-threads * 2 * number of peers</p>
 <div class="note note">
 <b>Note:</b>
diff --git a/geode-docs/topologies_and_comm/multi_site_configuration/setting_up_a_multisite_system.html.md.erb b/geode-docs/topologies_and_comm/multi_site_configuration/setting_up_a_multisite_system.html.md.erb
index d3bfa4a..d7ba5e4 100644
--- a/geode-docs/topologies_and_comm/multi_site_configuration/setting_up_a_multisite_system.html.md.erb
+++ b/geode-docs/topologies_and_comm/multi_site_configuration/setting_up_a_multisite_system.html.md.erb
@@ -25,7 +25,7 @@
 
 Before you start, you should understand how to configure membership and communication in peer-to-peer systems using locators. See [Configuring Peer-to-Peer Discovery](../p2p_configuration/setting_up_a_p2p_system.html) and [Configuring Peer Communication](../p2p_configuration/setting_up_peer_communication.html).
 
-WAN deployments increase the messaging demands on a <%=vars.product_name%> system. To avoid hangs related to WAN messaging, always set `conserve-sockets=false` for <%=vars.product_name%> members that participate in a WAN deployment. See [Configuring Sockets in Multi-Site (WAN) Deployments](../../managing/monitor_tune/sockets_and_gateways.html) and [Making Sure You Have Enough Sockets](../../managing/monitor_tune/socket_communication_have_enough_sockets.html).
+WAN deployments increase the messaging demands on a <%=vars.product_name%> system. To avoid hangs related to WAN messaging, always use the default setting of <code class="ph codeph">conserve-sockets=false</code> for <%=vars.product_name%> members that participate in a WAN deployment. See [Configuring Sockets in Multi-Site (WAN) Deployments](../../managing/monitor_tune/sockets_and_gateways.html) and [Making Sure You Have Enough Sockets](../../managing/monitor_tune/socket_communication_have_enough_sockets.html).
 
 ## <a id="setting_up_a_multisite_system__section_86F9FE9D786D407FB438C56E43FC5DB1" class="no-quick-link"></a>Main Steps