disable group by config applyLimitPushDownToSegment by default (#9711) (#9713)

* disable group by config applyLimitPushDownToSegment by default

* document
diff --git a/docs/querying/groupbyquery.md b/docs/querying/groupbyquery.md
index a9ff9b2..27323e4 100644
--- a/docs/querying/groupbyquery.md
+++ b/docs/querying/groupbyquery.md
@@ -414,7 +414,7 @@
 |`druid.query.groupBy.forceHashAggregation`|Force to use hash-based aggregation.|false|
 |`druid.query.groupBy.intermediateCombineDegree`|Number of intermediate nodes combined together in the combining tree. Higher degrees will need less threads which might be helpful to improve the query performance by reducing the overhead of too many threads if the server has sufficiently powerful cpu cores.|8|
 |`druid.query.groupBy.numParallelCombineThreads`|Hint for the number of parallel combining threads. This should be larger than 1 to turn on the parallel combining feature. The actual number of threads used for parallel combining is min(`druid.query.groupBy.numParallelCombineThreads`, `druid.processing.numThreads`).|1 (disabled)|
-|`druid.query.groupBy.applyLimitPushDownToSegment`|If Broker pushes limit down to queryable nodes (historicals, peons) then limit results during segment scan.|true (enabled)|
+|`druid.query.groupBy.applyLimitPushDownToSegment`|If Broker pushes limit down to queryable data server (historicals, peons) then limit results during segment scan. If typically there are a large number of segments taking part in a query on a data server, this setting may counterintuitively reduce performance if enabled.|false (disabled)|
 
 Supported query contexts:
 
diff --git a/processing/src/main/java/org/apache/druid/query/groupby/GroupByQueryConfig.java b/processing/src/main/java/org/apache/druid/query/groupby/GroupByQueryConfig.java
index 17ca929..c31ec26 100644
--- a/processing/src/main/java/org/apache/druid/query/groupby/GroupByQueryConfig.java
+++ b/processing/src/main/java/org/apache/druid/query/groupby/GroupByQueryConfig.java
@@ -80,7 +80,7 @@
   private boolean forcePushDownLimit = false;
 
   @JsonProperty
-  private boolean applyLimitPushDownToSegment = true;
+  private boolean applyLimitPushDownToSegment = false;
 
   @JsonProperty
   private boolean forcePushDownNestedQuery = false;
diff --git a/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryConfigTest.java b/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryConfigTest.java
index 4cb5e80..fb0090f 100644
--- a/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryConfigTest.java
+++ b/processing/src/test/java/org/apache/druid/query/groupby/GroupByQueryConfigTest.java
@@ -56,6 +56,7 @@
     Assert.assertEquals(4, config.getMaxOnDiskStorage());
     Assert.assertEquals(5, config.getMaxMergingDictionarySize());
     Assert.assertEquals(6.0, config.getBufferGrouperMaxLoadFactor(), 0.0);
+    Assert.assertFalse(config.isApplyLimitPushDownToSegment());
   }
 
   @Test
@@ -78,6 +79,7 @@
     Assert.assertEquals(4, config2.getMaxOnDiskStorage());
     Assert.assertEquals(5, config2.getMaxMergingDictionarySize());
     Assert.assertEquals(6.0, config2.getBufferGrouperMaxLoadFactor(), 0.0);
+    Assert.assertFalse(config2.isApplyLimitPushDownToSegment());
   }
 
   @Test
@@ -95,7 +97,7 @@
                             "maxOnDiskStorage", 0,
                             "maxResults", 2,
                             "maxMergingDictionarySize", 3,
-                            "applyLimitPushDownToSegment", false
+                            "applyLimitPushDownToSegment", true
                         )
                     )
                     .build()
@@ -109,6 +111,6 @@
     Assert.assertEquals(0, config2.getMaxOnDiskStorage());
     Assert.assertEquals(3, config2.getMaxMergingDictionarySize());
     Assert.assertEquals(6.0, config2.getBufferGrouperMaxLoadFactor(), 0.0);
-    Assert.assertFalse(config2.isApplyLimitPushDownToSegment());
+    Assert.assertTrue(config2.isApplyLimitPushDownToSegment());
   }
 }