Replace .collect(toList()) with .toList() and misc. code cleanups (#12978)

diff --git a/dev-tools/scripts/StageArtifacts.java b/dev-tools/scripts/StageArtifacts.java
index 052c251..be6f2fa 100644
--- a/dev-tools/scripts/StageArtifacts.java
+++ b/dev-tools/scripts/StageArtifacts.java
@@ -362,7 +362,7 @@
             // Ignore locally generated maven metadata files.
             .filter(path -> !path.getFileName().toString().startsWith("maven-metadata."))
             .sorted(Comparator.comparing(Path::toString))
-            .collect(Collectors.toList());
+            .toList();
       }
 
       // Figure out nexus profile ID based on POMs. It is assumed that all artifacts
diff --git a/gradle/generation/extract-jdk-apis/ExtractJdkApis.java b/gradle/generation/extract-jdk-apis/ExtractJdkApis.java
index 58c7d2e..82f43c1 100644
--- a/gradle/generation/extract-jdk-apis/ExtractJdkApis.java
+++ b/gradle/generation/extract-jdk-apis/ExtractJdkApis.java
@@ -82,7 +82,7 @@
     // Collect all files to process:
     final List<Path> filesToExtract;
     try (var stream = Files.walk(jrtPath)) {
-      filesToExtract = stream.filter(p -> pattern.matches(jrtPath.relativize(p))).collect(Collectors.toList());
+      filesToExtract = stream.filter(p -> pattern.matches(jrtPath.relativize(p))).toList();
     }
     
     // Process all class files:
diff --git a/lucene/analysis.tests/src/test/org/apache/lucene/analysis/tests/TestRandomChains.java b/lucene/analysis.tests/src/test/org/apache/lucene/analysis/tests/TestRandomChains.java
index 988deaf..42b9880 100644
--- a/lucene/analysis.tests/src/test/org/apache/lucene/analysis/tests/TestRandomChains.java
+++ b/lucene/analysis.tests/src/test/org/apache/lucene/analysis/tests/TestRandomChains.java
@@ -168,7 +168,7 @@
 
   private static final Map<Class<?>, Function<Random, Object>> argProducers =
       Collections.unmodifiableMap(
-          new IdentityHashMap<Class<?>, Function<Random, Object>>() {
+          new IdentityHashMap<>() {
             {
               put(
                   int.class,
@@ -176,7 +176,7 @@
                     // TODO: could cause huge ram usage to use full int range for some filters
                     // (e.g. allocate enormous arrays)
                     // return Integer.valueOf(random.nextInt());
-                    return Integer.valueOf(TestUtil.nextInt(random, -50, 50));
+                    return TestUtil.nextInt(random, -50, 50);
                   });
               put(
                   char.class,
@@ -187,7 +187,7 @@
                     while (true) {
                       char c = (char) random.nextInt(65536);
                       if (c < '\uD800' || c > '\uDFFF') {
-                        return Character.valueOf(c);
+                        return c;
                       }
                     }
                   });
@@ -382,7 +382,7 @@
                   });
               put(
                   SynonymMap.class,
-                  new Function<Random, Object>() {
+                  new Function<>() {
                     @Override
                     public Object apply(Random random) {
                       SynonymMap.Builder b = new SynonymMap.Builder(random.nextBoolean());
@@ -448,12 +448,11 @@
                   });
               put(
                   Automaton.class,
-                  random -> {
-                    return Operations.determinize(
-                        new RegExp(AutomatonTestUtil.randomRegexp(random), RegExp.NONE)
-                            .toAutomaton(),
-                        Operations.DEFAULT_DETERMINIZE_WORK_LIMIT);
-                  });
+                  random ->
+                      Operations.determinize(
+                          new RegExp(AutomatonTestUtil.randomRegexp(random), RegExp.NONE)
+                              .toAutomaton(),
+                          Operations.DEFAULT_DETERMINIZE_WORK_LIMIT));
               put(
                   PatternTypingFilter.PatternTypingRule[].class,
                   random -> {
@@ -625,9 +624,9 @@
     }
 
     final Comparator<Constructor<?>> ctorComp = Comparator.comparing(Constructor::toGenericString);
-    Collections.sort(tokenizers, ctorComp);
-    Collections.sort(tokenfilters, ctorComp);
-    Collections.sort(charfilters, ctorComp);
+    tokenizers.sort(ctorComp);
+    tokenfilters.sort(ctorComp);
+    charfilters.sort(ctorComp);
     if (VERBOSE) {
       System.out.println("tokenizers = " + tokenizers);
       System.out.println("tokenfilters = " + tokenfilters);
@@ -642,7 +641,7 @@
             .filter(c -> c.getName().endsWith("Stemmer"))
             .map(stemmerCast)
             .sorted(Comparator.comparing(Class::getName))
-            .collect(Collectors.toList());
+            .toList();
     if (VERBOSE) {
       System.out.println("snowballStemmers = " + snowballStemmers);
     }
@@ -786,7 +785,7 @@
         if (cause instanceof IllegalArgumentException
             || (cause instanceof NullPointerException && Stream.of(args).anyMatch(Objects::isNull))
             || cause instanceof UnsupportedOperationException) {
-          // thats ok, ignore
+          // that's ok, ignore
           if (VERBOSE) {
             System.err.println("Ignoring IAE/UOE/NPE from ctor:");
             cause.printStackTrace(System.err);
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java
index 892abfd..bc07bce 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java
@@ -30,7 +30,6 @@
 import java.util.TreeSet;
 import java.util.function.Consumer;
 import java.util.function.IntPredicate;
-import java.util.stream.Collectors;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.IntsRef;
 import org.apache.lucene.util.fst.FST;
@@ -108,7 +107,7 @@
           }
         });
 
-    return roots.stream().sorted().collect(Collectors.toList());
+    return roots.stream().sorted().toList();
   }
 
   private static boolean isWorseThan(int score, CharsRef candidate, Weighted<Root<String>> root) {
@@ -141,7 +140,7 @@
         }
       }
     }
-    return expanded.stream().limit(MAX_GUESSES).collect(Collectors.toList());
+    return expanded.stream().limit(MAX_GUESSES).toList();
   }
 
   // find minimum threshold for a passable suggestion
@@ -223,7 +222,7 @@
           }
         });
 
-    return result.stream().limit(MAX_WORDS).collect(Collectors.toList());
+    return result.stream().limit(MAX_WORDS).toList();
   }
 
   private void processAffixes(boolean prefixes, String word, AffixProcessor processor) {
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Hunspell.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Hunspell.java
index 3b58e0f..f288ef9 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Hunspell.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Hunspell.java
@@ -28,7 +28,6 @@
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
-import java.util.stream.Collectors;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.IntsRef;
 
@@ -304,10 +303,7 @@
    * Dictionary#lookupEntries}.
    */
   public List<String> getRoots(String word) {
-    return stemmer.stem(word).stream()
-        .map(CharsRef::toString)
-        .distinct()
-        .collect(Collectors.toList());
+    return stemmer.stem(word).stream().map(CharsRef::toString).distinct().toList();
   }
 
   /**
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFlattenGraphFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFlattenGraphFilter.java
index eafd623..28748f4 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFlattenGraphFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFlattenGraphFilter.java
@@ -23,7 +23,6 @@
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Random;
-import java.util.stream.Collectors;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.AutomatonToTokenStream;
 import org.apache.lucene.analysis.CharArraySet;
@@ -84,16 +83,14 @@
         new CannedTokenStream(
             0,
             12,
-            new Token[] {
-              token("wtf", 1, 1, 0, 3),
-              token("what", 0, 1, 0, 3),
-              token("wow", 0, 1, 0, 3),
-              token("the", 1, 1, 0, 3),
-              token("that's", 0, 1, 0, 3),
-              token("fudge", 1, 1, 0, 3),
-              token("funny", 0, 1, 0, 3),
-              token("happened", 1, 1, 4, 12)
-            });
+            token("wtf", 1, 1, 0, 3),
+            token("what", 0, 1, 0, 3),
+            token("wow", 0, 1, 0, 3),
+            token("the", 1, 1, 0, 3),
+            token("that's", 0, 1, 0, 3),
+            token("fudge", 1, 1, 0, 3),
+            token("funny", 0, 1, 0, 3),
+            token("happened", 1, 1, 4, 12));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -116,16 +113,14 @@
         new CannedTokenStream(
             0,
             12,
-            new Token[] {
-              token("wtf", 1, 5, 0, 3),
-              token("what", 0, 1, 0, 3),
-              token("wow", 0, 3, 0, 3),
-              token("the", 1, 1, 0, 3),
-              token("fudge", 1, 3, 0, 3),
-              token("that's", 1, 1, 0, 3),
-              token("funny", 1, 1, 0, 3),
-              token("happened", 1, 1, 4, 12)
-            });
+            token("wtf", 1, 5, 0, 3),
+            token("what", 0, 1, 0, 3),
+            token("wow", 0, 3, 0, 3),
+            token("the", 1, 1, 0, 3),
+            token("fudge", 1, 3, 0, 3),
+            token("that's", 1, 1, 0, 3),
+            token("funny", 1, 1, 0, 3),
+            token("happened", 1, 1, 4, 12));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -149,16 +144,14 @@
         new CannedTokenStream(
             0,
             12,
-            new Token[] {
-              token("what", 1, 1, 0, 3),
-              token("wow", 0, 3, 0, 3),
-              token("wtf", 0, 5, 0, 3),
-              token("the", 1, 1, 0, 3),
-              token("fudge", 1, 3, 0, 3),
-              token("that's", 1, 1, 0, 3),
-              token("funny", 1, 1, 0, 3),
-              token("happened", 1, 1, 4, 12)
-            });
+            token("what", 1, 1, 0, 3),
+            token("wow", 0, 3, 0, 3),
+            token("wtf", 0, 5, 0, 3),
+            token("the", 1, 1, 0, 3),
+            token("fudge", 1, 3, 0, 3),
+            token("that's", 1, 1, 0, 3),
+            token("funny", 1, 1, 0, 3),
+            token("happened", 1, 1, 4, 12));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -182,14 +175,12 @@
         new CannedTokenStream(
             0,
             20,
-            new Token[] {
-              token("wizard", 1, 1, 0, 6),
-              token("wizard_of_oz", 0, 3, 0, 12),
-              token("of", 1, 1, 7, 9),
-              token("oz", 1, 1, 10, 12),
-              token("oz_screams", 0, 2, 10, 20),
-              token("screams", 1, 1, 13, 20),
-            });
+            token("wizard", 1, 1, 0, 6),
+            token("wizard_of_oz", 0, 3, 0, 12),
+            token("of", 1, 1, 7, 9),
+            token("oz", 1, 1, 10, 12),
+            token("oz_screams", 0, 2, 10, 20),
+            token("screams", 1, 1, 13, 20));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -209,12 +200,10 @@
         new CannedTokenStream(
             0,
             22,
-            new Token[] {
-              token("hello", 1, 1, 0, 5),
-              token("pseudo", 1, 1, 6, 12),
-              token("world", 1, 1, 13, 18),
-              token("fun", 1, 1, 19, 22),
-            });
+            token("hello", 1, 1, 0, 5),
+            token("pseudo", 1, 1, 6, 12),
+            token("world", 1, 1, 13, 18),
+            token("fun", 1, 1, 19, 22));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -234,9 +223,9 @@
         new CannedTokenStream(
             0,
             13,
-            new Token[] {
-              token("hello", 1, 1, 0, 5), token("hole", 2, 1, 6, 10), token("fun", 1, 1, 11, 13),
-            });
+            token("hello", 1, 1, 0, 5),
+            token("hole", 2, 1, 6, 10),
+            token("fun", 1, 1, 11, 13));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -259,9 +248,9 @@
         new CannedTokenStream(
             0,
             12,
-            new Token[] {
-              token("wizard", 1, 1, 0, 6), token("woz", 0, 3, 0, 12), token("oz", 2, 1, 10, 12),
-            });
+            token("wizard", 1, 1, 0, 6),
+            token("woz", 0, 3, 0, 12),
+            token("oz", 2, 1, 10, 12));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -282,9 +271,9 @@
         new CannedTokenStream(
             0,
             27,
-            new Token[] {
-              token("dog", 1, 3, 0, 5), token("puppy", 0, 3, 0, 5), token("flies", 3, 1, 6, 11),
-            });
+            token("dog", 1, 3, 0, 5),
+            token("puppy", 0, 3, 0, 5),
+            token("flies", 3, 1, 6, 11));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -305,20 +294,18 @@
         new CannedTokenStream(
             0,
             11,
-            new Token[] {
-              token("a", 1, 1, 0, 1),
-              token("b", 0, 2, 0, 1),
-              token("a", 1, 2, 2, 3),
-              token("b", 1, 2, 2, 3),
-              token("a", 1, 2, 4, 5),
-              token("b", 1, 2, 4, 5),
-              token("a", 1, 2, 6, 7),
-              token("b", 1, 2, 6, 7),
-              token("a", 1, 2, 8, 9),
-              token("b", 1, 2, 8, 9),
-              token("a", 1, 2, 10, 11),
-              token("b", 1, 2, 10, 11),
-            });
+            token("a", 1, 1, 0, 1),
+            token("b", 0, 2, 0, 1),
+            token("a", 1, 2, 2, 3),
+            token("b", 1, 2, 2, 3),
+            token("a", 1, 2, 4, 5),
+            token("b", 1, 2, 4, 5),
+            token("a", 1, 2, 6, 7),
+            token("b", 1, 2, 6, 7),
+            token("a", 1, 2, 8, 9),
+            token("b", 1, 2, 8, 9),
+            token("a", 1, 2, 10, 11),
+            token("b", 1, 2, 10, 11));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -340,9 +327,7 @@
   public void testAltPathFirstStepHole() throws Exception {
     TokenStream in =
         new CannedTokenStream(
-            0,
-            3,
-            new Token[] {token("abc", 1, 3, 0, 3), token("b", 1, 1, 1, 2), token("c", 1, 1, 2, 3)});
+            0, 3, token("abc", 1, 3, 0, 3), token("b", 1, 1, 1, 2), token("c", 1, 1, 2, 3));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -356,7 +341,7 @@
         3);
   }
 
-  // Last node in an alt path fixes outputnode of long path. In this graph the follow up node fixes
+  // Last node in an alt path fixes output node of long path. In this graph the follow-up node fixes
   // that.
   // incorrect pos length of abc = 1
   public void testAltPathLastStepHole() throws Exception {
@@ -364,12 +349,10 @@
         new CannedTokenStream(
             0,
             4,
-            new Token[] {
-              token("abc", 1, 3, 0, 3),
-              token("a", 0, 1, 0, 1),
-              token("b", 1, 1, 1, 2),
-              token("d", 2, 1, 3, 4)
-            });
+            token("abc", 1, 3, 0, 3),
+            token("a", 0, 1, 0, 1),
+            token("b", 1, 1, 1, 2),
+            token("d", 2, 1, 3, 4));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -389,9 +372,9 @@
         new CannedTokenStream(
             0,
             28,
-            new Token[] {
-              token("hello", 1, 1, 0, 5), token("hole", 5, 1, 20, 24), token("fun", 1, 1, 25, 28),
-            });
+            token("hello", 1, 1, 0, 5),
+            token("hole", 5, 1, 20, 24),
+            token("fun", 1, 1, 25, 28));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -411,9 +394,7 @@
   public void testAltPathLastStepLongHole() throws Exception {
     TokenStream in =
         new CannedTokenStream(
-            0,
-            4,
-            new Token[] {token("abc", 1, 3, 0, 3), token("a", 0, 1, 0, 1), token("d", 3, 1, 3, 4)});
+            0, 4, token("abc", 1, 3, 0, 3), token("a", 0, 1, 0, 1), token("d", 3, 1, 3, 4));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -434,9 +415,7 @@
   public void testAltPathLastStepHoleWithoutEndToken() throws Exception {
     TokenStream in =
         new CannedTokenStream(
-            0,
-            2,
-            new Token[] {token("abc", 1, 3, 0, 3), token("a", 0, 1, 0, 1), token("b", 1, 1, 1, 2)});
+            0, 2, token("abc", 1, 3, 0, 3), token("a", 0, 1, 0, 1), token("b", 1, 1, 1, 2));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -457,9 +436,7 @@
   public void testAltPathLastStepHoleFollowedByHole() throws Exception {
     TokenStream in =
         new CannedTokenStream(
-            0,
-            5,
-            new Token[] {token("abc", 1, 3, 0, 3), token("b", 1, 1, 1, 2), token("e", 3, 1, 4, 5)});
+            0, 5, token("abc", 1, 3, 0, 3), token("b", 1, 1, 1, 2), token("e", 3, 1, 4, 5));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -480,14 +457,12 @@
         new CannedTokenStream(
             0,
             5,
-            new Token[] {
-              token("abc", 1, 3, 0, 3),
-              token("a", 0, 1, 0, 1),
-              token("b", 1, 1, 1, 2),
-              token("cde", 1, 3, 2, 5),
-              token("d", 1, 1, 3, 4),
-              token("e", 1, 1, 4, 5)
-            });
+            token("abc", 1, 3, 0, 3),
+            token("a", 0, 1, 0, 1),
+            token("b", 1, 1, 1, 2),
+            token("cde", 1, 3, 2, 5),
+            token("d", 1, 1, 3, 4),
+            token("e", 1, 1, 4, 5));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -509,13 +484,11 @@
         new CannedTokenStream(
             0,
             5,
-            new Token[] {
-              token("abc", 1, 3, 0, 3),
-              token("b", 1, 1, 1, 2),
-              token("cde", 1, 3, 2, 5),
-              token("d", 1, 1, 3, 4),
-              token("e", 1, 1, 4, 5)
-            });
+            token("abc", 1, 3, 0, 3),
+            token("b", 1, 1, 1, 2),
+            token("cde", 1, 3, 2, 5),
+            token("d", 1, 1, 3, 4),
+            token("e", 1, 1, 4, 5));
 
     TokenStream out = new FlattenGraphFilter(in);
 
@@ -531,7 +504,7 @@
 
   // When the first token is a hole there is no original token to offset from.
   public void testFirstTokenHole() throws Exception {
-    TokenStream in = new CannedTokenStream(0, 9, new Token[] {token("start", 2, 1, 0, 5)});
+    TokenStream in = new CannedTokenStream(0, 9, token("start", 2, 1, 0, 5));
     TokenStream out = new FlattenGraphFilter(in);
 
     assertTokenStreamContents(
@@ -547,13 +520,11 @@
         new CannedTokenStream(
             0,
             9,
-            new Token[] {
-              token("a", 1, 1, 4, 8),
-              token("abc", 0, 3, 4, 7),
-              token("cd", 2, 2, 6, 8),
-              token("d", 1, 1, 7, 8),
-              token("e", 1, 1, 8, 9)
-            });
+            token("a", 1, 1, 4, 8),
+            token("abc", 0, 3, 4, 7),
+            token("cd", 2, 2, 6, 8),
+            token("d", 1, 1, 7, 8),
+            token("e", 1, 1, 8, 9));
     TokenStream out = new FlattenGraphFilter(in);
     assertTokenStreamContents(
         out,
@@ -568,11 +539,7 @@
   public void testShingledGapAltPath() throws Exception {
     TokenStream in =
         new CannedTokenStream(
-            0,
-            4,
-            new Token[] {
-              token("abc", 1, 3, 0, 3), token("abcd", 0, 4, 0, 4), token("cd", 2, 2, 2, 4),
-            });
+            0, 4, token("abc", 1, 3, 0, 3), token("abcd", 0, 4, 0, 4), token("cd", 2, 2, 2, 4));
     TokenStream out = new FlattenGraphFilter(in);
     assertTokenStreamContents(
         out,
@@ -591,16 +558,14 @@
         new CannedTokenStream(
             0,
             7,
-            new Token[] {
-              token("a", 1, 1, 0, 1),
-              token("ab", 0, 2, 0, 2),
-              token("abcdef", 0, 6, 0, 6),
-              token("abcd", 0, 4, 0, 4),
-              token("bcdef", 1, 5, 1, 7),
-              token("def", 2, 3, 4, 7),
-              token("e", 1, 1, 5, 6),
-              token("f", 1, 1, 6, 7)
-            });
+            token("a", 1, 1, 0, 1),
+            token("ab", 0, 2, 0, 2),
+            token("abcdef", 0, 6, 0, 6),
+            token("abcd", 0, 4, 0, 4),
+            token("bcdef", 1, 5, 1, 7),
+            token("def", 2, 3, 4, 7),
+            token("e", 1, 1, 5, 6),
+            token("f", 1, 1, 6, 7));
     TokenStream out = new FlattenGraphFilter(in);
     assertTokenStreamContents(
         out,
@@ -618,11 +583,7 @@
   public void testShingleWithLargeLeadingGap() throws IOException {
     TokenStream in =
         new CannedTokenStream(
-            0,
-            6,
-            new Token[] {
-              token("abcde", 1, 5, 0, 5), token("ef", 4, 2, 4, 6), token("f", 1, 1, 5, 6),
-            });
+            0, 6, token("abcde", 1, 5, 0, 5), token("ef", 4, 2, 4, 6), token("f", 1, 1, 5, 6));
     TokenStream out = new FlattenGraphFilter(in);
     assertTokenStreamContents(
         out,
@@ -780,7 +741,7 @@
     }
     acceptStrings.sort(Comparator.naturalOrder());
 
-    acceptStrings = acceptStrings.stream().limit(wordCount).collect(Collectors.toList());
+    acceptStrings = acceptStrings.stream().limit(wordCount).toList();
     Automaton nonFlattenedAutomaton = Automata.makeStringUnion(acceptStrings);
 
     TokenStream ts = AutomatonToTokenStream.toTokenStream(nonFlattenedAutomaton);
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailTokenizer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailTokenizer.java
index 7d709e6..024a493 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailTokenizer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailTokenizer.java
@@ -27,7 +27,6 @@
 import java.util.Arrays;
 import java.util.List;
 import java.util.Random;
-import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenFilter;
@@ -527,7 +526,7 @@
           urlList.add(line);
         }
       }
-      urls = urlList.toArray(new String[urlList.size()]);
+      urls = urlList.toArray(new String[0]);
     } finally {
       if (null != bufferedReader) {
         bufferedReader.close();
@@ -576,7 +575,7 @@
           emailList.add(line);
         }
       }
-      emails = emailList.toArray(new String[emailList.size()]);
+      emails = emailList.toArray(new String[0]);
     } finally {
       if (null != bufferedReader) {
         bufferedReader.close();
@@ -667,7 +666,7 @@
           urlList.add(line);
         }
       }
-      urls = urlList.toArray(new String[urlList.size()]);
+      urls = urlList.toArray(new String[0]);
     } finally {
       if (null != bufferedReader) {
         bufferedReader.close();
@@ -881,7 +880,7 @@
           }
         };
 
-    for (String tld : TLDs.collect(Collectors.toList())) {
+    for (String tld : TLDs.toList()) {
       String URL = "example." + tld;
       BaseTokenStreamTestCase.assertAnalyzesTo(
           analyzer, URL, new String[] {URL}, new String[] {"<URL>"});
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java
index 68f6922..9b23a81 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java
@@ -97,7 +97,7 @@
     AtomicBoolean failTest = new AtomicBoolean();
 
     Map<String, List<Long>> global = new LinkedHashMap<>();
-    for (Path aff : findAllAffixFiles().collect(Collectors.toList())) {
+    for (Path aff : findAllAffixFiles().toList()) {
       Map<String, List<Long>> local = new LinkedHashMap<>();
       ByteArrayOutputStream baos = new ByteArrayOutputStream();
       try (ExposePosition is = new ExposePosition(Files.readAllBytes(aff))) {
@@ -184,9 +184,7 @@
         };
 
     List<Callable<Void>> tasks =
-        findAllAffixFiles()
-            .map(aff -> (Callable<Void>) () -> process.apply(aff))
-            .collect(Collectors.toList());
+        findAllAffixFiles().map(aff -> (Callable<Void>) () -> process.apply(aff)).toList();
     try {
       for (Future<?> future : executor.invokeAll(tasks)) {
         future.get();
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java
index 5c2ec42..2ce977b 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java
@@ -23,7 +23,6 @@
 import java.util.Collection;
 import java.util.Set;
 import java.util.TreeSet;
-import java.util.stream.Collectors;
 import org.junit.Assert;
 import org.junit.AssumptionViolatedException;
 import org.junit.Test;
@@ -72,7 +71,7 @@
       }
     }
 
-    return names.stream().map(s -> new Object[] {s, tests.resolve(s)}).collect(Collectors.toList());
+    return names.stream().map(s -> new Object[] {s, tests.resolve(s)}).toList();
   }
 
   @Test
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java
index 0498995..9bbb091 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java
@@ -34,7 +34,6 @@
 import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
 import java.util.regex.Pattern;
-import java.util.stream.Collectors;
 import org.apache.lucene.tests.util.LuceneTestCase;
 import org.apache.lucene.util.NamedThreadFactory;
 import org.junit.Assume;
@@ -184,7 +183,7 @@
         loadWords(code, wordCount, dictionary).stream()
             .distinct()
             .filter(w -> hasQuickSuggestions(speller, base, optimized, w))
-            .collect(Collectors.toList());
+            .toList();
     System.out.println("Checking " + words.size() + " misspelled words");
 
     measure(
diff --git a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java
index 6de1d60..bac5fdf 100644
--- a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java
+++ b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java
@@ -40,7 +40,6 @@
 import java.util.function.Predicate;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
-import java.util.stream.Collectors;
 
 /**
  * Downloads/generates lucene/analysis/icu/src/data/utr30/*.txt for the specified icu release tag.
@@ -97,7 +96,7 @@
               && !name.equals(NFKC_CF_TXT);
         };
     try (var stream = Files.list(Paths.get(".")).filter(predicate)) {
-      for (Path file : stream.collect(Collectors.toList())) {
+      for (Path file : stream.toList()) {
         expandDataFileRules(file);
       }
     }
diff --git a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java
index a1ce01a..b7f322e 100644
--- a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java
+++ b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java
@@ -16,18 +16,18 @@
  */
 package org.apache.lucene.analysis.icu;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 import com.ibm.icu.text.RuleBasedBreakIterator;
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.OutputStream;
-import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.List;
-import java.util.stream.Collectors;
 
 /**
  * Command-line utility to converts RuleBasedBreakIterator (.rbbi) files into binary compiled form
@@ -38,8 +38,8 @@
   static String getRules(Path ruleFile) throws IOException {
     StringBuilder rules = new StringBuilder();
     InputStream in = Files.newInputStream(ruleFile);
-    BufferedReader cin = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8));
-    String line = null;
+    BufferedReader cin = new BufferedReader(new InputStreamReader(in, UTF_8));
+    String line;
     while ((line = cin.readLine()) != null) {
       if (!line.startsWith("#")) {
         rules.append(line);
@@ -54,10 +54,7 @@
   static void compile(Path srcDir, Path destDir) throws Exception {
     List<Path> files;
     try (var stream = Files.list(srcDir)) {
-      files =
-          stream
-              .filter(name -> name.getFileName().toString().endsWith("rbbi"))
-              .collect(Collectors.toList());
+      files = stream.filter(name -> name.getFileName().toString().endsWith("rbbi")).toList();
     }
 
     if (files.isEmpty()) throw new IOException("No input files matching *.rbbi at: " + srcDir);
diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/completion/KatakanaRomanizer.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/completion/KatakanaRomanizer.java
index 9f68724..8e137a8 100644
--- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/completion/KatakanaRomanizer.java
+++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/completion/KatakanaRomanizer.java
@@ -16,16 +16,16 @@
  */
 package org.apache.lucene.analysis.ja.completion;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 import java.io.BufferedReader;
 import java.io.InputStreamReader;
-import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.stream.Collectors;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.CharsRefBuilder;
 
@@ -39,14 +39,13 @@
 public class KatakanaRomanizer {
   private static final String ROMAJI_MAP_FILE = "romaji_map.txt";
 
-  private static KatakanaRomanizer INSTANCE;
+  private static final KatakanaRomanizer INSTANCE;
 
   static {
     // Build romaji-map and keystroke arrays from the pre-defined Katakana-Romaji mapping file.
     try (InputStreamReader is =
             new InputStreamReader(
-                KatakanaRomanizer.class.getResourceAsStream(ROMAJI_MAP_FILE),
-                Charset.forName("UTF-8"));
+                KatakanaRomanizer.class.getResourceAsStream(ROMAJI_MAP_FILE), UTF_8);
         BufferedReader ir = new BufferedReader(is)) {
       Map<CharsRef, List<CharsRef>> romajiMap = new HashMap<>();
       String line;
@@ -118,7 +117,7 @@
       List<CharsRef> candidates =
           romajiMap.get(keystrokes[matched.keystrokeLen - 1][matched.keystrokeIndex]);
 
-      if (pendingOutputs.size() == 0) {
+      if (pendingOutputs.isEmpty()) {
         // There is no pending output.
         // Add the matched keystrokes to pending outputs list.
         for (CharsRef cref : candidates) {
@@ -166,7 +165,7 @@
         output.append(input.chars, pos, input.length - pos);
       }
     }
-    return pendingOutputs.stream().map(CharsRefBuilder::get).collect(Collectors.toList());
+    return pendingOutputs.stream().map(CharsRefBuilder::get).toList();
   }
 
   private MatchedKeystroke longestKeystrokeMatch(CharsRef input, int inputOffset) {
diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionaryBuilder.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionaryBuilder.java
index 8afddb9..fe8d7fd 100644
--- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionaryBuilder.java
+++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionaryBuilder.java
@@ -26,7 +26,6 @@
 import java.util.Arrays;
 import java.util.Comparator;
 import java.util.List;
-import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import org.apache.lucene.analysis.util.CSVUtil;
 import org.apache.lucene.util.IntsRefBuilder;
@@ -57,10 +56,7 @@
   public TokenInfoDictionaryWriter build(Path dir) throws IOException {
     try (Stream<Path> files = Files.list(dir)) {
       List<Path> csvFiles =
-          files
-              .filter(path -> path.getFileName().toString().endsWith(".csv"))
-              .sorted()
-              .collect(Collectors.toList());
+          files.filter(path -> path.getFileName().toString().endsWith(".csv")).sorted().toList();
       return buildDictionary(csvFiles);
     }
   }
@@ -123,7 +119,7 @@
         scratch.grow(token.length());
         scratch.setLength(token.length());
         for (int i = 0; i < token.length(); i++) {
-          scratch.setIntAt(i, (int) token.charAt(i));
+          scratch.setIntAt(i, token.charAt(i));
         }
         fstCompiler.add(scratch.get(), ord);
       }
@@ -144,7 +140,7 @@
    * 4-9 - pos
    * 10  - base form
    * 11  - reading
-   * 12  - pronounciation
+   * 12  - pronunciation
    *
    * UniDic features
    *
@@ -178,7 +174,7 @@
 
       // If the surface reading is non-existent, use surface form for reading and pronunciation.
       // This happens with punctuation in UniDic and there are possibly other cases as well
-      if (features[13].length() == 0) {
+      if (features[13].isEmpty()) {
         features2[11] = features[0];
         features2[12] = features[0];
       } else {
diff --git a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/dict/TokenInfoDictionaryBuilder.java b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/dict/TokenInfoDictionaryBuilder.java
index f66abba..8639f66 100644
--- a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/dict/TokenInfoDictionaryBuilder.java
+++ b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/dict/TokenInfoDictionaryBuilder.java
@@ -26,7 +26,6 @@
 import java.util.Arrays;
 import java.util.Comparator;
 import java.util.List;
-import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import org.apache.lucene.analysis.util.CSVUtil;
 import org.apache.lucene.util.IntsRefBuilder;
@@ -42,8 +41,8 @@
    */
   private int offset = 0;
 
-  private String encoding;
-  private Normalizer.Form normalForm;
+  private final String encoding;
+  private final Normalizer.Form normalForm;
 
   TokenInfoDictionaryBuilder(String encoding, boolean normalizeEntries) {
     this.encoding = encoding;
@@ -53,10 +52,7 @@
   public TokenInfoDictionaryWriter build(Path dir) throws IOException {
     try (Stream<Path> files = Files.list(dir)) {
       List<Path> csvFiles =
-          files
-              .filter(path -> path.getFileName().toString().endsWith(".csv"))
-              .sorted()
-              .collect(Collectors.toList());
+          files.filter(path -> path.getFileName().toString().endsWith(".csv")).sorted().toList();
       return buildDictionary(csvFiles);
     }
   }
diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestFSTDictionary.java b/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestFSTDictionary.java
index d9ae90a..bbe97ec 100644
--- a/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestFSTDictionary.java
+++ b/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestFSTDictionary.java
@@ -22,7 +22,6 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
-import java.util.stream.Collectors;
 import org.apache.lucene.store.ByteBuffersDataOutput;
 import org.apache.lucene.tests.codecs.uniformsplit.Rot13CypherTestUtil;
 import org.apache.lucene.tests.util.LuceneTestCase;
@@ -107,7 +106,7 @@
     for (int i = 0; i < blockFPs.length; i++) {
       blockFPs[i] = i;
     }
-    List<BytesRef> blockKeys = vocab.stream().map(BytesRef::new).collect(Collectors.toList());
+    List<BytesRef> blockKeys = vocab.stream().map(BytesRef::new).toList();
     FSTDictionary indexDictionary = createFSTDictionary(blockKeys, blockFPs);
     IndexDictionary.Browser browser = indexDictionary.browser();
     for (int i = 0; i < vocab.size(); i++) {
diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestTermBytes.java b/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestTermBytes.java
index a5f9097..ea178bc 100644
--- a/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestTermBytes.java
+++ b/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestTermBytes.java
@@ -144,15 +144,14 @@
   }
 
   private void validateExpectedSuffix(Map<String, String> vocab) {
-    List<BytesRef> src =
-        vocab.keySet().stream().sorted().map(BytesRef::new).collect(Collectors.toList());
+    List<BytesRef> src = vocab.keySet().stream().sorted().map(BytesRef::new).toList();
     List<TermBytes> output = compressPrefixes(src);
     validateMapList(
         vocab,
-        src.stream().map(BytesRef::utf8ToString).collect(Collectors.toList()),
+        src.stream().map(BytesRef::utf8ToString).toList(),
         output.stream()
             .map(e -> e.getSuffixOffset() + createSuffixBytes(e).utf8ToString())
-            .collect(Collectors.toList()));
+            .toList());
   }
 
   private BytesRef createSuffixBytes(TermBytes termBytes) {
@@ -167,21 +166,19 @@
   }
 
   private void validateExpectedMDP(Map<String, String> vocab) {
-    List<BytesRef> src =
-        vocab.keySet().stream().sorted().map(BytesRef::new).collect(Collectors.toList());
+    List<BytesRef> src = vocab.keySet().stream().sorted().map(BytesRef::new).toList();
     List<TermBytes> output = compressPrefixes(src);
     validateMapList(
         vocab,
-        src.stream().map(BytesRef::utf8ToString).collect(Collectors.toList()),
+        src.stream().map(BytesRef::utf8ToString).toList(),
         output.stream()
             .map(e -> new BytesRef(e.getTerm().bytes, 0, e.getMdpLength()).utf8ToString())
-            .collect(Collectors.toList()));
+            .toList());
   }
 
   private void validateIncrementalDecoding(Map<String, String> vocab) {
     BytesRef previous = new BytesRef(80);
-    List<BytesRef> src =
-        vocab.keySet().stream().sorted().map(BytesRef::new).collect(Collectors.toList());
+    List<BytesRef> src = vocab.keySet().stream().sorted().map(BytesRef::new).toList();
     List<TermBytes> output = compressPrefixes(src);
 
     for (int i = 0; i < src.size(); i++) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index 68a2ce1..417fa44 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -383,8 +383,8 @@
   private final Deque<MergePolicy.OneMerge> pendingMerges = new ArrayDeque<>();
   private final Set<MergePolicy.OneMerge> runningMerges = new HashSet<>();
   private final List<MergePolicy.OneMerge> mergeExceptions = new ArrayList<>();
+  private final Merges merges = new Merges();
   private long mergeGen;
-  private Merges merges = new Merges();
   private boolean didMessageState;
   private final AtomicInteger flushCount = new AtomicInteger();
   private final AtomicInteger flushDeletesCount = new AtomicInteger();
@@ -657,7 +657,7 @@
                                               sr.close();
                                             }
                                           })
-                              .collect(Collectors.toList()));
+                              .toList());
                     }
                   };
             }
@@ -6616,7 +6616,7 @@
           @Override
           public FieldInfosBuilder newFieldInfosBuilder(String softDeletesFieldName) {
             return new FieldInfosBuilder() {
-              private FieldInfos.Builder builder =
+              private final FieldInfos.Builder builder =
                   new FieldInfos.Builder(new FieldInfos.FieldNumbers(softDeletesFieldName));
 
               @Override
diff --git a/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
index 0b7a2cb..f10e0fc 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
@@ -83,7 +83,7 @@
       PAUSED,
       /** Other reason. */
       OTHER
-    };
+    }
 
     private final ReentrantLock pauseLock = new ReentrantLock();
     private final Condition pausing = pauseLock.newCondition();
@@ -103,7 +103,7 @@
     /** Creates a new merge progress info. */
     public OneMergeProgress() {
       // Place all the pause reasons in there immediately so that we can simply update values.
-      pauseTimesNS = new EnumMap<PauseReason, AtomicLong>(PauseReason.class);
+      pauseTimesNS = new EnumMap<>(PauseReason.class);
       for (PauseReason p : PauseReason.values()) {
         pauseTimesNS.put(p, new AtomicLong());
       }
@@ -170,8 +170,7 @@
     /** Returns pause reasons and associated times in nanoseconds. */
     public Map<PauseReason, Long> getPauseTimes() {
       Set<Entry<PauseReason, AtomicLong>> entries = pauseTimesNS.entrySet();
-      return entries.stream()
-          .collect(Collectors.toMap((e) -> e.getKey(), (e) -> e.getValue().get()));
+      return entries.stream().collect(Collectors.toMap(Entry::getKey, (e) -> e.getValue().get()));
     }
 
     final void setMergeThread(Thread owner) {
@@ -223,7 +222,7 @@
      * @param segments List of {@link SegmentCommitInfo}s to be merged.
      */
     public OneMerge(List<SegmentCommitInfo> segments) {
-      if (0 == segments.size()) {
+      if (segments.isEmpty()) {
         throw new RuntimeException("segments must include at least one segment");
       }
       // clone the list, as the in list may be based off original SegmentInfos and may be modified
@@ -275,7 +274,7 @@
     /**
      * Called by {@link IndexWriter} after the merge is done and all readers have been closed.
      *
-     * @param success true iff the merge finished successfully ie. was committed
+     * @param success true iff the merge finished successfully i.e. was committed
      * @param segmentDropped true iff the merged segment was dropped since it was fully deleted
      */
     public void mergeFinished(boolean success, boolean segmentDropped) throws IOException {}
@@ -284,7 +283,7 @@
     final void close(
         boolean success, boolean segmentDropped, IOConsumer<MergeReader> readerConsumer)
         throws IOException {
-      // this method is final to ensure we never miss a super call to cleanup and finish the merge
+      // this method is final to ensure we never miss a super call to clean up and finish the merge
       if (mergeCompleted.complete(success) == false) {
         throw new IllegalStateException("merge has already finished");
       }
@@ -524,10 +523,7 @@
 
     CompletableFuture<Void> getMergeCompletedFutures() {
       return CompletableFuture.allOf(
-          merges.stream()
-              .map(m -> m.mergeCompleted)
-              .collect(Collectors.toList())
-              .toArray(CompletableFuture<?>[]::new));
+          merges.stream().map(m -> m.mergeCompleted).toArray(CompletableFuture<?>[]::new));
     }
 
     /** Waits, until interrupted, for all merges to complete. */
@@ -763,7 +759,7 @@
   }
 
   /**
-   * Return the byte size of the provided {@link SegmentCommitInfo}, pro-rated by percentage of
+   * Return the byte size of the provided {@link SegmentCommitInfo}, prorated by percentage of
    * non-deleted documents is set.
    */
   protected long size(SegmentCommitInfo info, MergeContext mergeContext) throws IOException {
diff --git a/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java b/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java
index 72e077f..1bac886 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java
@@ -30,7 +30,6 @@
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.LongSupplier;
-import java.util.stream.Collectors;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.CollectionUtil;
@@ -177,7 +176,7 @@
 
   /**
    * Releases the {@link ReadersAndUpdates}. This should only be called if the {@link
-   * #get(SegmentCommitInfo, boolean)} is called with the create paramter set to true.
+   * #get(SegmentCommitInfo, boolean)} is called with the 'create' parameter set to true.
    *
    * @return <code>true</code> if any files were written by this release call.
    */
@@ -276,7 +275,7 @@
   /**
    * Returns a list of all currently maintained ReadersAndUpdates sorted by it's ram consumption
    * largest to smallest. This list can also contain readers that don't consume any ram at this
-   * point ie. don't have any updates buffered.
+   * point i.e. don't have any updates buffered.
    */
   synchronized List<ReadersAndUpdates> getReadersByRam() {
     class RamRecordingHolder {
@@ -295,19 +294,17 @@
       }
       readersByRam = new ArrayList<>(readerMap.size());
       for (ReadersAndUpdates rld : readerMap.values()) {
-        // we have to record the ram usage once and then sort
-        // since the ram usage can change concurrently and that will confuse the sort or hit an
+        // we have to record the RAM usage once and then sort
+        // since the RAM usage can change concurrently and that will confuse the sort or hit an
         // assertion
         // the we can acquire here is not enough we would need to lock all ReadersAndUpdates to make
-        // sure it doesn't
-        // change
+        // sure it doesn't change
         readersByRam.add(new RamRecordingHolder(rld));
       }
     }
     // Sort this outside of the lock by largest ramBytesUsed:
     CollectionUtil.introSort(readersByRam, (a, b) -> Long.compare(b.ramBytesUsed, a.ramBytesUsed));
-    return Collections.unmodifiableList(
-        readersByRam.stream().map(h -> h.updates).collect(Collectors.toList()));
+    return readersByRam.stream().map(h -> h.updates).toList();
   }
 
   /** Remove all our references to readers, and commits any pending changes. */
@@ -334,7 +331,7 @@
         priorE = IOUtils.useOrSuppress(priorE, t);
       }
     }
-    assert readerMap.size() == 0;
+    assert readerMap.isEmpty();
     if (priorE != null) {
       throw IOUtils.rethrowAlways(priorE);
     }
diff --git a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseMatcher.java b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseMatcher.java
index f6bcadb..79c7d2f 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseMatcher.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseMatcher.java
@@ -22,7 +22,6 @@
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
-import java.util.stream.Collectors;
 import org.apache.lucene.index.Impact;
 import org.apache.lucene.index.Impacts;
 import org.apache.lucene.index.ImpactsEnum;
@@ -62,8 +61,7 @@
     super(matchCost);
 
     final DocIdSetIterator approximation =
-        ConjunctionUtils.intersectIterators(
-            Arrays.stream(postings).map(p -> p.postings).collect(Collectors.toList()));
+        ConjunctionUtils.intersectIterators(Arrays.stream(postings).map(p -> p.postings).toList());
     final ImpactsSource impactsSource =
         mergeImpacts(Arrays.stream(postings).map(p -> p.impacts).toArray(ImpactsEnum[]::new));
 
@@ -82,8 +80,7 @@
     for (PhraseQuery.PostingsAndFreq posting : postings) {
       postingsAndPositions.add(new PostingsAndPosition(posting.postings, posting.position));
     }
-    this.postings =
-        postingsAndPositions.toArray(new PostingsAndPosition[postingsAndPositions.size()]);
+    this.postings = postingsAndPositions.toArray(new PostingsAndPosition[0]);
   }
 
   @Override
@@ -204,7 +201,7 @@
 
     return new ImpactsSource() {
 
-      class SubIterator {
+      static class SubIterator {
         final Iterator<Impact> iterator;
         Impact current;
 
@@ -263,7 +260,7 @@
             final int docIdUpTo = getDocIdUpTo(level);
 
             PriorityQueue<SubIterator> pq =
-                new PriorityQueue<SubIterator>(impacts.length) {
+                new PriorityQueue<>(impacts.length) {
                   @Override
                   protected boolean lessThan(SubIterator a, SubIterator b) {
                     return a.current.freq < b.current.freq;
diff --git a/lucene/core/src/java/org/apache/lucene/search/MatchesUtils.java b/lucene/core/src/java/org/apache/lucene/search/MatchesUtils.java
index ceebc54..99f36ea 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MatchesUtils.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MatchesUtils.java
@@ -24,7 +24,6 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Objects;
-import java.util.stream.Collectors;
 import java.util.stream.StreamSupport;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.util.BytesRefIterator;
@@ -62,12 +61,11 @@
 
   /** Amalgamate a collection of {@link Matches} into a single object */
   public static Matches fromSubMatches(List<Matches> subMatches) {
-    if (subMatches == null || subMatches.size() == 0) {
+    if (subMatches == null || subMatches.isEmpty()) {
       return null;
     }
-    List<Matches> sm =
-        subMatches.stream().filter(m -> m != MATCH_WITH_NO_TERMS).collect(Collectors.toList());
-    if (sm.size() == 0) {
+    List<Matches> sm = subMatches.stream().filter(m -> m != MATCH_WITH_NO_TERMS).toList();
+    if (sm.isEmpty()) {
       return MATCH_WITH_NO_TERMS;
     }
     if (sm.size() == 1) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseMatcher.java b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseMatcher.java
index ca70a52..68e4364 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseMatcher.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseMatcher.java
@@ -25,7 +25,6 @@
 import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
-import java.util.stream.Collectors;
 import org.apache.lucene.index.Impact;
 import org.apache.lucene.index.Impacts;
 import org.apache.lucene.index.ImpactsSource;
@@ -100,8 +99,7 @@
     }
 
     approximation =
-        ConjunctionUtils.intersectIterators(
-            Arrays.stream(postings).map(p -> p.postings).collect(Collectors.toList()));
+        ConjunctionUtils.intersectIterators(Arrays.stream(postings).map(p -> p.postings).toList());
     // What would be a good upper bound of the sloppy frequency? A sum of the
     // sub frequencies would be correct, but it is usually so much higher than
     // the actual sloppy frequency that it doesn't help skip irrelevant
@@ -334,8 +332,7 @@
   private int collide(PhrasePositions pp) {
     int tpPos = tpPos(pp);
     PhrasePositions[] rg = rptGroups[pp.rptGroup];
-    for (int i = 0; i < rg.length; i++) {
-      PhrasePositions pp2 = rg[i];
+    for (PhrasePositions pp2 : rg) {
       if (pp2 != pp && tpPos(pp2) == tpPos) {
         return pp2.rptInd;
       }
@@ -511,13 +508,7 @@
    */
   private void sortRptGroups(ArrayList<ArrayList<PhrasePositions>> rgs) {
     rptGroups = new PhrasePositions[rgs.size()][];
-    Comparator<PhrasePositions> cmprtr =
-        new Comparator<PhrasePositions>() {
-          @Override
-          public int compare(PhrasePositions pp1, PhrasePositions pp2) {
-            return pp1.offset - pp2.offset;
-          }
-        };
+    Comparator<PhrasePositions> cmprtr = Comparator.comparingInt(pp -> pp.offset);
     for (int i = 0; i < rptGroups.length; i++) {
       PhrasePositions[] rg = rgs.get(i).toArray(new PhrasePositions[0]);
       Arrays.sort(rg, cmprtr);
@@ -567,7 +558,7 @@
       HashMap<Term, Integer> tg = termGroups(rptTerms, bb);
       HashSet<Integer> distinctGroupIDs = new HashSet<>(tg.values());
       for (int i = 0; i < distinctGroupIDs.size(); i++) {
-        tmp.add(new HashSet<PhrasePositions>());
+        tmp.add(new HashSet<>());
       }
       for (PhrasePositions pp : rpp) {
         for (Term t : pp.terms) {
@@ -586,8 +577,8 @@
     return res;
   }
 
-  /** Actual position in doc of a PhrasePosition, relies on that position = tpPos - offset) */
-  private final int tpPos(PhrasePositions pp) {
+  /** Actual position in doc of a PhrasePosition, relies on that position = tpPos - offset */
+  private int tpPos(PhrasePositions pp) {
     return pp.position + pp.offset;
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/SynonymQuery.java b/lucene/core/src/java/org/apache/lucene/search/SynonymQuery.java
index f44ceaf..807f413 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SynonymQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SynonymQuery.java
@@ -24,7 +24,6 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Objects;
-import java.util.stream.Collectors;
 import org.apache.lucene.index.Impact;
 import org.apache.lucene.index.Impacts;
 import org.apache.lucene.index.ImpactsEnum;
@@ -101,7 +100,7 @@
 
     /** Builds the {@link SynonymQuery}. */
     public SynonymQuery build() {
-      Collections.sort(terms, Comparator.comparing(a -> a.term));
+      terms.sort(Comparator.comparing(a -> a.term));
       return new SynonymQuery(terms.toArray(new TermAndBoost[0]), field);
     }
   }
@@ -117,8 +116,7 @@
   }
 
   public List<Term> getTerms() {
-    return Collections.unmodifiableList(
-        Arrays.stream(terms).map(t -> new Term(field, t.term)).collect(Collectors.toList()));
+    return Arrays.stream(terms).map(t -> new Term(field, t.term)).toList();
   }
 
   @Override
@@ -232,8 +230,7 @@
       if (indexTerms == null) {
         return super.matches(context, doc);
       }
-      List<Term> termList =
-          Arrays.stream(terms).map(t -> new Term(field, t.term)).collect(Collectors.toList());
+      List<Term> termList = Arrays.stream(terms).map(t -> new Term(field, t.term)).toList();
       return MatchesUtils.forField(
           field,
           () -> DisjunctionMatchesIterator.fromTerms(context, doc, getQuery(), field, termList));
@@ -357,7 +354,7 @@
     assert impactsEnums.length == boosts.length;
     return new ImpactsSource() {
 
-      class SubIterator {
+      static class SubIterator {
         final Iterator<Impact> iterator;
         int previousFreq;
         Impact current;
@@ -439,7 +436,7 @@
                           .map(
                               impact ->
                                   new Impact((int) Math.ceil(impact.freq * boost), impact.norm))
-                          .collect(Collectors.toList());
+                          .toList();
                 } else {
                   impactList = impacts[i].getImpacts(impactsLevel);
                 }
diff --git a/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java b/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java
index f36a651..22da713 100644
--- a/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java
@@ -98,8 +98,7 @@
    * Maps files that we are trying to delete (or we tried already but failed) before attempting to
    * delete that key.
    */
-  private final Set<String> pendingDeletes =
-      Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
+  private final Set<String> pendingDeletes = ConcurrentHashMap.newKeySet();
 
   private final AtomicInteger opsSinceLastDelete = new AtomicInteger();
 
@@ -186,7 +185,7 @@
       }
     }
 
-    String[] array = entries.toArray(new String[entries.size()]);
+    String[] array = entries.toArray(new String[0]);
     // Directory.listAll javadocs state that we sort the results here, so we don't let filesystem
     // specifics leak out of this abstraction:
     Arrays.sort(array);
@@ -214,7 +213,7 @@
     maybeDeletePendingFiles();
     // If this file was pending delete, we are now bringing it back to life:
     if (pendingDeletes.remove(name)) {
-      privateDeleteFile(name, true); // try again to delete it - this is best effort
+      privateDeleteFile(name, true); // try again to delete it - this is the best effort
       pendingDeletes.remove(name); // watch out - if the delete fails it put
     }
     return new FSIndexOutput(name);
@@ -266,8 +265,8 @@
     }
     maybeDeletePendingFiles();
     if (pendingDeletes.remove(dest)) {
-      privateDeleteFile(dest, true); // try again to delete it - this is best effort
-      pendingDeletes.remove(dest); // watch out if the delete fails it's back in here.
+      privateDeleteFile(dest, true); // try again to delete it - this is the best effort
+      pendingDeletes.remove(dest); // watch out if the delete fails, it's back in here
     }
     Files.move(directory.resolve(source), directory.resolve(dest), StandardCopyOption.ATOMIC_MOVE);
   }
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestBytesRefAttImpl.java b/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestBytesRefAttImpl.java
index 3e1f427..8a590bc 100644
--- a/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestBytesRefAttImpl.java
+++ b/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestBytesRefAttImpl.java
@@ -16,7 +16,7 @@
  */
 package org.apache.lucene.analysis.tokenattributes;
 
-import java.util.stream.Stream;
+import java.util.Arrays;
 import org.apache.lucene.tests.util.LuceneTestCase;
 import org.apache.lucene.util.AttributeImpl;
 import org.apache.lucene.util.BytesRef;
@@ -49,7 +49,7 @@
   public void testLucene9856() {
     assertTrue(
         "BytesTermAttributeImpl must explicitly declare to implement TermToBytesRefAttribute",
-        Stream.of(BytesTermAttributeImpl.class.getInterfaces())
-            .anyMatch(TermToBytesRefAttribute.class::equals));
+        Arrays.asList(BytesTermAttributeImpl.class.getInterfaces())
+            .contains(TermToBytesRefAttribute.class));
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/document/TestLongHashSet.java b/lucene/core/src/test/org/apache/lucene/document/TestLongHashSet.java
index 71d9291..510d68d 100644
--- a/lucene/core/src/test/org/apache/lucene/document/TestLongHashSet.java
+++ b/lucene/core/src/test/org/apache/lucene/document/TestLongHashSet.java
@@ -128,7 +128,7 @@
       if (values.length > 0 && random().nextBoolean()) {
         values[values.length / 2] = Long.MIN_VALUE;
       }
-      Set<Long> set1 = LongStream.of(values).mapToObj(Long::valueOf).collect(Collectors.toSet());
+      Set<Long> set1 = LongStream.of(values).boxed().collect(Collectors.toSet());
       Arrays.sort(values);
       LongHashSet set2 = new LongHashSet(values);
       assertEquals(set1, set2);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java b/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
index 95964ff..06c4b5c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
@@ -28,7 +28,6 @@
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
-import java.util.stream.Collectors;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.StringField;
@@ -412,14 +411,12 @@
     ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
     expectThrows(
         IllegalArgumentException.class,
-        () -> {
-          cms.setMaxMergesAndThreads(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, 3);
-        });
+        () ->
+            cms.setMaxMergesAndThreads(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, 3));
     expectThrows(
         IllegalArgumentException.class,
-        () -> {
-          cms.setMaxMergesAndThreads(3, ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
-        });
+        () ->
+            cms.setMaxMergesAndThreads(3, ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS));
   }
 
   public void testLiveMaxMergeCount() throws Exception {
@@ -653,8 +650,7 @@
 
           @Override
           public boolean isEnabled(String component) {
-            if (component.equals("MS")) return true;
-            return false;
+            return component.equals("MS");
           }
         });
     iwc.setMaxBufferedDocs(2);
@@ -681,9 +677,7 @@
     for (Thread t : mergeThreadSet) {
       String name = t.getName();
       List<String> threadMsgs =
-          messages.stream()
-              .filter(line -> line.startsWith("merge thread " + name))
-              .collect(Collectors.toList());
+          messages.stream().filter(line -> line.startsWith("merge thread " + name)).toList();
       assertTrue(
           "Expected:·a·value·equal·to·or·greater·than·3,·got:"
               + threadMsgs.size()
@@ -736,15 +730,13 @@
 
     expectThrows(
         IllegalArgumentException.class,
-        () -> {
-          cms.setMaxMergesAndThreads(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, 4);
-        });
+        () ->
+            cms.setMaxMergesAndThreads(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, 4));
 
     expectThrows(
         IllegalArgumentException.class,
-        () -> {
-          cms.setMaxMergesAndThreads(4, ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS);
-        });
+        () ->
+            cms.setMaxMergesAndThreads(4, ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS));
 
     cms.setMaxMergesAndThreads(
         ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS,
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java b/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java
index 4f6da4a..3916697 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java
@@ -23,7 +23,6 @@
 import java.nio.file.Path;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashSet;
 import java.util.List;
@@ -75,7 +74,7 @@
  * <p>Each per-segment index lives in a private directory next to the main index, and they are
  * deleted once their segments are removed from the index. They are "volatile", meaning if e.g. the
  * index is replicated to another machine, it's OK to not copy parallel segments indices, since they
- * will just be regnerated (at a cost though).
+ * will just be regenerated (at a cost though).
  */
 
 // @SuppressSysoutChecks(bugUrl="we print stuff")
@@ -97,8 +96,7 @@
     private final Path segsPath;
 
     /** Which segments have been closed, but their parallel index is not yet not removed. */
-    private final Set<SegmentIDAndGen> closedSegments =
-        Collections.newSetFromMap(new ConcurrentHashMap<SegmentIDAndGen, Boolean>());
+    private final Set<SegmentIDAndGen> closedSegments = ConcurrentHashMap.newKeySet();
 
     /** Holds currently open parallel readers for each segment. */
     private final Map<SegmentIDAndGen, LeafReader> parallelReaders = new ConcurrentHashMap<>();
@@ -154,8 +152,8 @@
     protected abstract IndexWriterConfig getIndexWriterConfig() throws IOException;
 
     /**
-     * Optional method to validate that the provided parallell reader in fact reflects the changes
-     * in schemaGen.
+     * Optional method to validate that the provided parallel reader in fact reflects the changes in
+     * schemaGen.
      */
     protected void checkParallelReader(LeafReader reader, LeafReader parallelReader, long schemaGen)
         throws IOException {}
@@ -287,7 +285,7 @@
 
     // Make sure we deleted all parallel indices for segments that are no longer in the main index:
     private void assertNoExtraSegments() throws IOException {
-      Set<String> liveIDs = new HashSet<String>();
+      Set<String> liveIDs = new HashSet<>();
       for (SegmentCommitInfo info : SegmentInfos.readLatestCommit(indexDir)) {
         String idString = StringHelper.idToString(info.info.getId());
         liveIDs.add(idString);
@@ -585,7 +583,7 @@
       SegmentInfos lastCommit = SegmentInfos.readLatestCommit(indexDir);
       if (DEBUG) System.out.println("TEST: prune");
 
-      Set<String> liveIDs = new HashSet<String>();
+      Set<String> liveIDs = new HashSet<>();
       for (SegmentCommitInfo info : lastCommit) {
         String idString = StringHelper.idToString(info.info.getId());
         liveIDs.add(idString);
@@ -790,7 +788,7 @@
           throws IOException {
         IndexWriterConfig iwc = newIndexWriterConfig();
 
-        // The order of our docIDs must precisely matching incoming reader:
+        // The order of our docIDs must precisely match incoming reader:
         iwc.setMergePolicy(new LogByteSizeMergePolicy());
         IndexWriter w = new IndexWriter(parallelDir, iwc);
         int maxDoc = reader.maxDoc();
@@ -847,7 +845,7 @@
           throws IOException {
         IndexWriterConfig iwc = newIndexWriterConfig();
 
-        // The order of our docIDs must precisely matching incoming reader:
+        // The order of our docIDs must precisely match incoming reader:
         iwc.setMergePolicy(new LogByteSizeMergePolicy());
         IndexWriter w = new IndexWriter(parallelDir, iwc);
         int maxDoc = reader.maxDoc();
@@ -957,7 +955,7 @@
         tmp.setFloorSegmentMB(.01);
         iwc.setMergePolicy(tmp);
         if (TEST_NIGHTLY) {
-          // during nightly tests, we might use too many files if we arent careful
+          // during nightly tests, we might use too many files if we aren't careful
           iwc.setUseCompoundFile(true);
         }
         return iwc;
@@ -977,7 +975,7 @@
           throws IOException {
         IndexWriterConfig iwc = newIndexWriterConfig();
 
-        // The order of our docIDs must precisely matching incoming reader:
+        // The order of our docIDs must precisely match incoming reader:
         iwc.setMergePolicy(new LogByteSizeMergePolicy());
         IndexWriter w = new IndexWriter(parallelDir, iwc);
         int maxDoc = reader.maxDoc();
@@ -1596,14 +1594,7 @@
         assertTrue(value <= max);
       }
 
-      Arrays.sort(
-          hits.scoreDocs,
-          new Comparator<ScoreDoc>() {
-            @Override
-            public int compare(ScoreDoc a, ScoreDoc b) {
-              return a.doc - b.doc;
-            }
-          });
+      Arrays.sort(hits.scoreDocs, Comparator.comparingInt(a -> a.doc));
 
       NumericDocValues numbers = MultiDocValues.getNumericValues(s.getIndexReader(), "number");
       for (ScoreDoc hit : hits.scoreDocs) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index c1687ac..d897ea4 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -863,7 +863,7 @@
     dir.close();
   }
 
-  private class IndexerThreadInterrupt extends Thread {
+  private static class IndexerThreadInterrupt extends Thread {
     volatile boolean failed;
     volatile boolean finish;
 
@@ -4267,7 +4267,7 @@
     IndexWriterConfig iwc = newIndexWriterConfig();
     iwc.setMergePolicy(
         new FilterMergePolicy(newMergePolicy()) {
-          boolean keepFullyDeletedSegment = random().nextBoolean();
+          final boolean keepFullyDeletedSegment = random().nextBoolean();
 
           @Override
           public boolean keepFullyDeletedSegment(IOSupplier<CodecReader> readerIOSupplier) {
@@ -4693,7 +4693,7 @@
             List<SegmentCommitInfo> fullyDeletedSegments =
                 segmentInfos.asList().stream()
                     .filter(s -> s.info.maxDoc() - s.getDelCount() == 0)
-                    .collect(Collectors.toList());
+                    .toList();
             if (fullyDeletedSegments.isEmpty()) {
               return null;
             }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
index f70f14a..51b502a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
@@ -18,7 +18,6 @@
 
 import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean;
 import static com.carrotsearch.randomizedtesting.RandomizedTest.randomLongBetween;
-import static java.util.stream.Collectors.toList;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -410,7 +409,7 @@
 
     addDirThreads.close(true);
 
-    assertTrue(addDirThreads.failures.size() == 0);
+    assertTrue(addDirThreads.failures.isEmpty());
 
     TestUtil.checkIndex(mainDir);
 
@@ -736,11 +735,7 @@
     IndexSearcher searcher = newSearcher(r);
     assertEquals(100, searcher.count(q));
 
-    expectThrows(
-        AlreadyClosedException.class,
-        () -> {
-          DirectoryReader.openIfChanged(r);
-        });
+    expectThrows(AlreadyClosedException.class, () -> DirectoryReader.openIfChanged(r));
 
     r.close();
     dir1.close();
@@ -769,7 +764,7 @@
     DirectoryReader r = DirectoryReader.open(writer);
 
     final int numIterations = 10;
-    final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());
+    final List<Throwable> excs = Collections.synchronizedList(new ArrayList<>());
 
     // Only one thread can addIndexes at a time, because
     // IndexWriter acquires a write lock in each directory:
@@ -812,8 +807,8 @@
       }
     }
 
-    for (int i = 0; i < threads.length; i++) {
-      threads[i].join();
+    for (Thread thread : threads) {
+      thread.join();
     }
     // final check
     DirectoryReader r2 = DirectoryReader.openIfChanged(r);
@@ -853,7 +848,7 @@
         newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy(2));
     if (TEST_NIGHTLY) {
       // if we have a ton of iterations we need to make sure we don't do unnecessary
-      // extra flushing otherwise we will timeout on nightly
+      // extra flushing otherwise we will time out on nightly
       iwc.setRAMBufferSizeMB(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB);
       iwc.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
     }
@@ -1148,11 +1143,7 @@
     // other NRT reader, since it is already marked closed!
     for (int i = 0; i < 2; i++) {
       shouldFail.set(true);
-      expectThrows(
-          FakeIOException.class,
-          () -> {
-            DirectoryReader.open(writer).close();
-          });
+      expectThrows(FakeIOException.class, () -> DirectoryReader.open(writer).close());
     }
 
     writer.close();
@@ -1214,7 +1205,7 @@
     final long MISSING_VALUE =
         ASC_SORT ? Long.MAX_VALUE : Long.MIN_VALUE; // missing values at the end
 
-    // create a comparator that sort leaf readers according with
+    // create a comparator that sort leaf readers according to
     // the min value (asc sort) or max value (desc sort) of its points
     Comparator<LeafReader> leafSorter =
         Comparator.comparingLong(
@@ -1347,13 +1338,9 @@
   // provided leafSorter
   private static void assertLeavesSorted(
       DirectoryReader reader, Comparator<LeafReader> leafSorter) {
-    List<LeafReader> lrs =
-        reader.leaves().stream().map(LeafReaderContext::reader).collect(toList());
+    List<LeafReader> lrs = reader.leaves().stream().map(LeafReaderContext::reader).toList();
     List<LeafReader> expectedSortedlrs =
-        reader.leaves().stream()
-            .map(LeafReaderContext::reader)
-            .sorted(leafSorter)
-            .collect(toList());
+        reader.leaves().stream().map(LeafReaderContext::reader).sorted(leafSorter).toList();
     assertEquals(expectedSortedlrs, lrs);
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermsHashPerField.java b/lucene/core/src/test/org/apache/lucene/index/TestTermsHashPerField.java
index 9c9d39f..a6bcea3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermsHashPerField.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermsHashPerField.java
@@ -20,7 +20,7 @@
 import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 import com.carrotsearch.randomizedtesting.generators.RandomStrings;
 import java.io.IOException;
-import java.util.Arrays;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -257,21 +257,19 @@
           RandomStrings.randomRealisticUnicodeOfCodepointLengthBetween(random(), 1, 10);
       postingMap.putIfAbsent(newBytesRef(randomString), new Posting());
     }
-    List<BytesRef> bytesRefs = Arrays.asList(postingMap.keySet().toArray(new BytesRef[0]));
+    List<BytesRef> bytesRefs = new ArrayList<>(postingMap.keySet());
     Collections.sort(bytesRefs);
     int numDocs = 1 + random().nextInt(200);
     int termOrd = 0;
-    for (int i = 0; i < numDocs; i++) {
+    for (int doc = 0; doc < numDocs; doc++) {
       int numTerms = 1 + random().nextInt(200);
-      int doc = i;
       for (int j = 0; j < numTerms; j++) {
         BytesRef ref = RandomPicks.randomFrom(random(), bytesRefs);
         Posting posting = postingMap.get(ref);
         if (posting.termId == -1) {
           posting.termId = termOrd++;
         }
-        posting.docAndFreq.putIfAbsent(doc, 0);
-        posting.docAndFreq.compute(doc, (key, oldVal) -> oldVal + 1);
+        posting.docAndFreq.merge(doc, 1, Integer::sum);
         hash.add(ref, doc);
       }
       hash.finish();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestLRUQueryCache.java b/lucene/core/src/test/org/apache/lucene/search/TestLRUQueryCache.java
index fddeb96..905263c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestLRUQueryCache.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestLRUQueryCache.java
@@ -42,7 +42,6 @@
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.IntConsumer;
-import java.util.stream.Collectors;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.LongPoint;
@@ -198,7 +197,7 @@
                                         .map(
                                             filterCollector ->
                                                 (DummyTotalHitCountCollector) filterCollector.in)
-                                        .collect(Collectors.toList()));
+                                        .toList());
                               }
                             });
                     assertEquals(totalHits2, totalHits1);
@@ -555,7 +554,7 @@
   }
 
   /** DummyQuery with Accountable, pretending to be a memory-eating query */
-  private class AccountableDummyQuery extends DummyQuery implements Accountable {
+  private static class AccountableDummyQuery extends DummyQuery implements Accountable {
 
     @Override
     public long ramBytesUsed() {
@@ -841,7 +840,7 @@
           @Override
           protected void onHit(Object readerCoreKey, Query query) {
             super.onHit(readerCoreKey, query);
-            switch (indexId.get(readerCoreKey).intValue()) {
+            switch (indexId.get(readerCoreKey)) {
               case 1:
                 hitCount1.incrementAndGet();
                 break;
@@ -856,7 +855,7 @@
           @Override
           protected void onMiss(Object readerCoreKey, Query query) {
             super.onMiss(readerCoreKey, query);
-            switch (indexId.get(readerCoreKey).intValue()) {
+            switch (indexId.get(readerCoreKey)) {
               case 1:
                 missCount1.incrementAndGet();
                 break;
@@ -1331,11 +1330,7 @@
     public void onUse(final Query query) {
       AtomicInteger count;
       synchronized (counts) {
-        count = counts.get(query);
-        if (count == null) {
-          count = new AtomicInteger();
-          counts.put(query, count);
-        }
+        count = counts.computeIfAbsent(query, k -> new AtomicInteger());
       }
       count.incrementAndGet();
     }
@@ -1388,8 +1383,8 @@
     weight = new WeightWrapper(weight, scorerCalled, bulkScorerCalled);
     weight = cache.doCache(weight, NEVER_CACHE);
     weight.bulkScorer(leaf);
-    assertEquals(true, bulkScorerCalled.get());
-    assertEquals(false, scorerCalled.get());
+    assertTrue(bulkScorerCalled.get());
+    assertFalse(scorerCalled.get());
     assertEquals(0, cache.getCacheCount());
 
     searcher.getIndexReader().close();
@@ -1779,7 +1774,7 @@
     assertEquals(2, searcher.count(query));
     assertEquals(2, query.scorerCreatedCount.get()); // both segments cached
 
-    w.updateNumericDocValue(new Term("text", "text"), "field", 2l);
+    w.updateNumericDocValue(new Term("text", "text"), "field", 2L);
     reader.close();
     reader = DirectoryReader.open(w);
     searcher =
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiCollectorManager.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiCollectorManager.java
index 27543e1..b25f9af 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMultiCollectorManager.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiCollectorManager.java
@@ -25,7 +25,6 @@
 import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.function.Predicate;
-import java.util.stream.Collectors;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.LeafReaderContext;
@@ -53,10 +52,8 @@
     for (int iter = 0; iter < 100; iter++) {
       int docs = RandomNumbers.randomIntBetween(random(), 1000, 10000);
       SortedSet<Integer> expected = generateDocIds(docs, random());
-      List<Integer> expectedEven =
-          expected.stream().filter(evenPredicate).collect(Collectors.toList());
-      List<Integer> expectedOdd =
-          expected.stream().filter(oddPredicate).collect(Collectors.toList());
+      List<Integer> expectedEven = expected.stream().filter(evenPredicate).toList();
+      List<Integer> expectedOdd = expected.stream().filter(oddPredicate).toList();
 
       // Test only wrapping one of the collector managers:
       MultiCollectorManager mcm = new MultiCollectorManager(cm1);
@@ -291,7 +288,7 @@
 
   private static CollectorManager<?, ?> collectorManager(
       ScoreMode scoreMode, Class<?> expectedScorer) {
-    return new CollectorManager<Collector, Object>() {
+    return new CollectorManager<>() {
 
       @Override
       public Collector newCollector() throws IOException {
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java
index ec84478..bd954f1 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java
@@ -46,7 +46,7 @@
     if (hasWorkingMMapOnWindows()) {
       dirs0.add(new MMapDirectory(path));
     }
-    final FSDirectory[] dirs = dirs0.stream().toArray(FSDirectory[]::new);
+    final FSDirectory[] dirs = dirs0.toArray(FSDirectory[]::new);
 
     for (int i = 0; i < dirs.length; i++) {
       FSDirectory dir = dirs[i];
@@ -58,8 +58,7 @@
       out.writeBytes(largeBuffer, largeBuffer.length);
       out.close();
 
-      for (int j = 0; j < dirs.length; j++) {
-        FSDirectory d2 = dirs[j];
+      for (FSDirectory d2 : dirs) {
         d2.ensureOpen();
         assertTrue(slowFileExists(d2, fname));
         assertEquals(1 + largeBuffer.length, d2.fileLength(fname));
@@ -84,19 +83,14 @@
       // delete with a different dir
       dirs[(i + 1) % dirs.length].deleteFile(fname);
 
-      for (int j = 0; j < dirs.length; j++) {
-        FSDirectory d2 = dirs[j];
+      for (FSDirectory d2 : dirs) {
         assertFalse(slowFileExists(d2, fname));
       }
 
       Lock lock = dir.obtainLock(lockname);
 
       for (Directory other : dirs) {
-        expectThrows(
-            LockObtainFailedException.class,
-            () -> {
-              other.obtainLock(lockname);
-            });
+        expectThrows(LockObtainFailedException.class, () -> other.obtainLock(lockname));
       }
 
       lock.close();
@@ -106,8 +100,7 @@
       lock.close();
     }
 
-    for (int i = 0; i < dirs.length; i++) {
-      FSDirectory dir = dirs[i];
+    for (FSDirectory dir : dirs) {
       dir.ensureOpen();
       dir.close();
       assertFalse(dir.isOpen);
@@ -117,18 +110,11 @@
   // LUCENE-1468
   public void testNotDirectory() throws Throwable {
     Path path = createTempDir("testnotdir");
-    Directory fsDir = new NIOFSDirectory(path);
-    try {
+    try (Directory fsDir = new NIOFSDirectory(path)) {
       IndexOutput out = fsDir.createOutput("afile", newIOContext(random()));
       out.close();
       assertTrue(slowFileExists(fsDir, "afile"));
-      expectThrows(
-          IOException.class,
-          () -> {
-            new NIOFSDirectory(path.resolve("afile"));
-          });
-    } finally {
-      fsDir.close();
+      expectThrows(IOException.class, () -> new NIOFSDirectory(path.resolve("afile")));
     }
   }
 
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
index 2298ef8..aff4fe2 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
@@ -71,23 +71,23 @@
     // we should see only fdx,fdt files here
     String[] files = primaryDir.listAll();
     assertTrue(files.length > 0);
-    for (int x = 0; x < files.length; x++) {
-      String ext = FileSwitchDirectory.getExtension(files[x]);
+    for (String file : files) {
+      String ext = FileSwitchDirectory.getExtension(file);
       assertTrue(fileExtensions.contains(ext));
     }
     files = secondaryDir.listAll();
     assertTrue(files.length > 0);
     // we should not see fdx,fdt files here
-    for (int x = 0; x < files.length; x++) {
-      String ext = FileSwitchDirectory.getExtension(files[x]);
+    for (String file : files) {
+      String ext = FileSwitchDirectory.getExtension(file);
       assertFalse(fileExtensions.contains(ext));
     }
     reader.close();
     writer.close();
 
     files = fsd.listAll();
-    for (int i = 0; i < files.length; i++) {
-      assertNotNull(files[i]);
+    for (String file : files) {
+      assertNotNull(file);
     }
     fsd.close();
   }
@@ -110,12 +110,8 @@
   public void testNoDir() throws Throwable {
     Path primDir = createTempDir("foo");
     Path secondDir = createTempDir("bar");
-    Directory dir = newFSSwitchDirectory(primDir, secondDir, Collections.<String>emptySet());
-    expectThrows(
-        IndexNotFoundException.class,
-        () -> {
-          DirectoryReader.open(dir);
-        });
+    Directory dir = newFSSwitchDirectory(primDir, secondDir, Collections.emptySet());
+    expectThrows(IndexNotFoundException.class, () -> DirectoryReader.open(dir));
 
     dir.close();
   }
@@ -151,7 +147,7 @@
 
   @Override
   protected Directory getDirectory(Path path) throws IOException {
-    Set<String> extensions = new HashSet<String>();
+    Set<String> extensions = new HashSet<>();
     if (random().nextBoolean()) {
       extensions.add("cfs");
     }
@@ -187,8 +183,7 @@
             true)) {
       dir.createOutput("foo.tim", IOContext.DEFAULT).close();
       Function<String[], Long> stripExtra =
-          array ->
-              Arrays.asList(array).stream().filter(f -> f.startsWith("extra") == false).count();
+          array -> Arrays.stream(array).filter(f -> f.startsWith("extra") == false).count();
       try (IndexInput indexInput = dir.openInput("foo.tim", IOContext.DEFAULT)) {
         assert indexInput != null;
         dir.deleteFile("foo.tim");
diff --git a/lucene/distribution.tests/src/test/org/apache/lucene/distribution/TestModularLayer.java b/lucene/distribution.tests/src/test/org/apache/lucene/distribution/TestModularLayer.java
index b6f426f..577f0f9 100644
--- a/lucene/distribution.tests/src/test/org/apache/lucene/distribution/TestModularLayer.java
+++ b/lucene/distribution.tests/src/test/org/apache/lucene/distribution/TestModularLayer.java
@@ -218,7 +218,7 @@
               }
 
               final int runtimeVersion = Runtime.version().feature();
-              if (jarVersions.contains(Integer.valueOf(runtimeVersion))) {
+              if (jarVersions.contains(runtimeVersion)) {
                 Assertions.assertThat(
                         loader.loadClass("org.apache.lucene.store.MemorySegmentIndexInput"))
                     .isNotNull();
@@ -231,11 +231,10 @@
   public void testAllCoreModulesAreNamedModules() {
     Assertions.assertThat(allLuceneModules)
         .allSatisfy(
-            module -> {
-              Assertions.assertThat(module.descriptor().isAutomatic())
-                  .as(module.descriptor().name())
-                  .isFalse();
-            });
+            module ->
+                Assertions.assertThat(module.descriptor().isAutomatic())
+                    .as(module.descriptor().name())
+                    .isFalse());
   }
 
   /** Ensure all modules have the same (expected) version. */
@@ -283,9 +282,7 @@
     try (ModuleReader reader = module.open();
         Stream<String> entryStream = reader.list()) {
       List<String> serviceProviderEntryList =
-          entryStream
-              .filter(entry -> serviceEntryPattern.matcher(entry).find())
-              .collect(Collectors.toList());
+          entryStream.filter(entry -> serviceEntryPattern.matcher(entry).find()).toList();
 
       for (String entry : serviceProviderEntryList) {
         List<String> implementations;
@@ -294,7 +291,7 @@
               Arrays.stream(new String(is.readAllBytes(), StandardCharsets.UTF_8).split("\n"))
                   .map(String::trim)
                   .filter(line -> !line.isBlank() && !line.startsWith("#"))
-                  .collect(Collectors.toList());
+                  .toList();
         }
 
         Matcher matcher = serviceEntryPattern.matcher(entry);
@@ -344,10 +341,7 @@
 
       if (module.descriptor().name().equals("org.apache.lucene.core")) {
         // Internal packages should not be exported to unqualified targets.
-        jarPackages.removeIf(
-            entry -> {
-              return entry.startsWith("org.apache.lucene.internal");
-            });
+        jarPackages.removeIf(entry -> entry.startsWith("org.apache.lucene.internal"));
 
         // Internal packages should use qualified exports.
         moduleExports.removeIf(
@@ -364,11 +358,10 @@
       Assertions.assertThat(moduleExports)
           .as("Exported packages in module: " + module.descriptor().name())
           .allSatisfy(
-              export -> {
-                Assertions.assertThat(export.targets())
-                    .as("We only support unqualified exports for now?")
-                    .isEmpty();
-              })
+              export ->
+                  Assertions.assertThat(export.targets())
+                      .as("We only support unqualified exports for now?")
+                      .isEmpty())
           .map(ModuleDescriptor.Exports::source)
           .containsExactlyInAnyOrderElementsOf(jarPackages);
     }
@@ -392,11 +385,10 @@
       Assertions.assertThat(moduleOpens)
           .as("Open packages in module: " + module.descriptor().name())
           .allSatisfy(
-              export -> {
-                Assertions.assertThat(export.targets())
-                    .as("Opens should only be targeted to Lucene Core.")
-                    .containsExactly("org.apache.lucene.core");
-              })
+              export ->
+                  Assertions.assertThat(export.targets())
+                      .as("Opens should only be targeted to Lucene Core.")
+                      .containsExactly("org.apache.lucene.core"))
           .map(ModuleDescriptor.Opens::source)
           .containsExactlyInAnyOrderElementsOf(jarPackages);
     }
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
index da4bb4f..0140c35 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
@@ -29,7 +29,6 @@
 import java.util.Set;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
-import java.util.stream.Collectors;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -211,7 +210,7 @@
     DrillDownQuery ddq = new DrillDownQuery(config);
     ddq.add("Color", "Blue");
 
-    // Setup an IndexSearcher that will try to cache queries aggressively:
+    // Set up an IndexSearcher that will try to cache queries aggressively:
     IndexSearcher searcher = getNewSearcher(writer.getReader());
     searcher.setQueryCachingPolicy(
         new QueryCachingPolicy() {
@@ -224,7 +223,7 @@
           }
         });
 
-    // Setup a DS instance for searching:
+    // Set up a DS instance for searching:
     TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoWriter);
     DrillSideways ds = getNewDrillSideways(searcher, config, taxoReader);
 
@@ -250,10 +249,7 @@
 
     // test getTopChildren(0, dim)
     expectThrows(
-        IllegalArgumentException.class,
-        () -> {
-          concurrentResult.facets.getTopChildren(0, "Color");
-        });
+        IllegalArgumentException.class, () -> concurrentResult.facets.getTopChildren(0, "Color"));
 
     writer.close();
     IOUtils.close(searcher.getIndexReader(), taxoReader, taxoWriter, dir, taxoDir);
@@ -491,11 +487,7 @@
 
     // test getAllDims(0)
     DrillSidewaysResult finalR1 = r;
-    expectThrows(
-        IllegalArgumentException.class,
-        () -> {
-          finalR1.facets.getAllDims(0);
-        });
+    expectThrows(IllegalArgumentException.class, () -> finalR1.facets.getAllDims(0));
 
     // More interesting case: drill-down on two fields
     ddq = new DrillDownQuery(config);
@@ -585,11 +577,7 @@
 
     // test getTopChildren(0, dim)
     DrillSidewaysResult finalR = r;
-    expectThrows(
-        IllegalArgumentException.class,
-        () -> {
-          finalR.facets.getTopChildren(0, "Author");
-        });
+    expectThrows(IllegalArgumentException.class, () -> finalR.facets.getTopChildren(0, "Author"));
   }
 
   public void testBasicWithCollectorManager() throws Exception {
@@ -1003,7 +991,7 @@
     int[] dims;
 
     // 2nd value per dim for the doc (so we test
-    // multi-valued fields):
+    // multivalued fields):
     int[] dims2;
     boolean deleted;
 
@@ -1084,7 +1072,7 @@
           values.add(s);
         }
       }
-      dimValues[dim] = values.toArray(new String[values.size()]);
+      dimValues[dim] = values.toArray(new String[0]);
       valueCount *= 2;
     }
 
@@ -1657,7 +1645,7 @@
           .sorted(comparator)
           .map(cr -> new DocAndScore(cr.docAndScore))
           .limit(numDocs)
-          .collect(Collectors.toList());
+          .toList();
     }
   }
 
@@ -1927,8 +1915,7 @@
         if (VERBOSE) {
           idx = 0;
           System.out.println("      expected (sorted)");
-          for (int i = 0; i < topNIDs.length; i++) {
-            int expectedOrd = topNIDs[i];
+          for (int expectedOrd : topNIDs) {
             String value = dimValues[dim][expectedOrd];
             System.out.println(
                 "        "
@@ -2113,11 +2100,7 @@
         topNDimsResult.get(0).toString());
 
     // test getAllDims(0)
-    expectThrows(
-        IllegalArgumentException.class,
-        () -> {
-          facets.getAllDims(0);
-        });
+    expectThrows(IllegalArgumentException.class, () -> facets.getAllDims(0));
     // More interesting case: drill-down on two fields
     ddq = new DrillDownQuery(config);
     ddq.add("Author", "Lisa");
@@ -2140,7 +2123,7 @@
   }
 
   public void testScorer() throws Exception {
-    // LUCENE-6001 some scorers, eg ReqExlScorer, can hit NPE if cost is called after nextDoc
+    // LUCENE-6001 some scorers, e.g. ReqExlScorer, can hit NPE if cost is called after nextDoc
     Directory dir = newDirectory();
     Directory taxoDir = newDirectory();
 
@@ -2235,7 +2218,7 @@
       facets = new MultiFacets(drillSidewaysFacets, drillDownFacets);
     }
 
-    // Facets computed using FacetsCollecter exposed in DrillSidewaysResult
+    // Facets computed using FacetsCollector exposed in DrillSidewaysResult
     // should match the Facets computed by {@link DrillSideways#buildFacetsResult}
     FacetResult facetResultActual = facets.getTopChildren(2, "dim");
     FacetResult facetResultExpected = r.facets.getTopChildren(2, "dim");
@@ -2251,7 +2234,7 @@
 
   @Test
   public void testDrillSidewaysSearchUseCorrectIterator() throws Exception {
-    // This test reproduces an issue (see github #12211) where DrillSidewaysScorer would ultimately
+    // This test reproduces an issue (see GitHub #12211) where DrillSidewaysScorer would ultimately
     // cause multiple consecutive calls to TwoPhaseIterator::matches, which results in a failed
     // assert in the PostingsReaderBase implementation (or a failing to match a document that should
     // have matched, if asserts are disabled).
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/facetset/TestExactFacetSetMatcher.java b/lucene/facet/src/test/org/apache/lucene/facet/facetset/TestExactFacetSetMatcher.java
index 3fbb865..9b72e0d 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/facetset/TestExactFacetSetMatcher.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/facetset/TestExactFacetSetMatcher.java
@@ -20,7 +20,6 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
-import java.util.stream.Collectors;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.StringField;
@@ -414,11 +413,7 @@
 
   private static Query createFastMatchQuery(String field, int... values) {
     return new TermInSetQuery(
-        field,
-        Arrays.stream(values)
-            .mapToObj(String::valueOf)
-            .map(BytesRef::new)
-            .collect(Collectors.toList()));
+        field, Arrays.stream(values).mapToObj(String::valueOf).map(BytesRef::new).toList());
   }
 
   private static void addFastMatchField(
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java b/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java
index f64b551..4a8a537 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java
@@ -30,7 +30,6 @@
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.StringField;
@@ -112,11 +111,9 @@
               new String[0],
               3,
               -1,
-              new LabelAndValue[] {
-                new LabelAndValue("bar", 1),
-                new LabelAndValue("foo", 2),
-                new LabelAndValue("zoo", 1)
-              });
+              new LabelAndValue("bar", 1),
+              new LabelAndValue("foo", 2),
+              new LabelAndValue("zoo", 1));
 
           // test getAllDims
           List<FacetResult> results = facets.getAllDims(10);
@@ -154,11 +151,7 @@
               topDimsResults1.get(0).toString());
 
           // test getTopDims(0)
-          expectThrows(
-              IllegalArgumentException.class,
-              () -> {
-                facets.getAllDims(0);
-              });
+          expectThrows(IllegalArgumentException.class, () -> facets.getAllDims(0));
 
           // test getSpecificValue
           assertEquals(2, facets.getSpecificValue("a", "foo"));
@@ -362,7 +355,7 @@
         try {
           Facets facets = getAllFacets(searcher, state, exec);
 
-          // since a is not set to be hierarchical but _is_ multi-valued, we expect a value of 2
+          // since a is not set to be hierarchical but _is_ multivalued, we expect a value of 2
           // (since two unique docs contain at least one value for this dim):
           assertEquals(
               "dim=a path=[] value=2 childCount=3\n  foo (2)\n  bar (1)\n  zoo (1)\n",
@@ -383,11 +376,9 @@
               new String[0],
               3,
               2,
-              new LabelAndValue[] {
-                new LabelAndValue("bar", 1),
-                new LabelAndValue("foo", 2),
-                new LabelAndValue("zoo", 1)
-              });
+              new LabelAndValue("bar", 1),
+              new LabelAndValue("foo", 2),
+              new LabelAndValue("zoo", 1));
 
           assertFacetResult(
               facets.getAllChildren("c", "buzz"),
@@ -395,11 +386,9 @@
               new String[] {"buzz"},
               3,
               2,
-              new LabelAndValue[] {
-                new LabelAndValue("bee", 1),
-                new LabelAndValue("bif", 2),
-                new LabelAndValue("biz", 1)
-              });
+              new LabelAndValue("bee", 1),
+              new LabelAndValue("bif", 2),
+              new LabelAndValue("biz", 1));
 
           assertFacetResult(
               facets.getAllChildren("c", "buzz", "bif"),
@@ -407,7 +396,7 @@
               new String[] {"buzz", "bif"},
               1,
               2,
-              new LabelAndValue[] {new LabelAndValue("baf", 2)});
+              new LabelAndValue("baf", 2));
 
           // test getSpecificValue (and make sure hierarchical dims are supported: LUCENE-10584):
           assertEquals(2, facets.getSpecificValue("c", "buzz"));
@@ -473,22 +462,11 @@
 
         // test getAllChildren
         assertFacetResult(
-            facets.getAllChildren("a"),
-            "a",
-            new String[0],
-            1,
-            1,
-            new LabelAndValue[] {
-              new LabelAndValue("bar", 1),
-            });
+            facets.getAllChildren("a"), "a", new String[0], 1, 1, new LabelAndValue("bar", 1));
 
         // test topNChildren = 0
         Facets finalFacets = facets;
-        expectThrows(
-            IllegalArgumentException.class,
-            () -> {
-              finalFacets.getTopChildren(0, "a");
-            });
+        expectThrows(IllegalArgumentException.class, () -> finalFacets.getTopChildren(0, "a"));
 
         ExecutorService exec =
             new ThreadPoolExecutor(
@@ -496,7 +474,7 @@
                 TestUtil.nextInt(random(), 2, 6),
                 Long.MAX_VALUE,
                 TimeUnit.MILLISECONDS,
-                new LinkedBlockingQueue<Runnable>(),
+                new LinkedBlockingQueue<>(),
                 new NamedThreadFactory("TestIndexSearcher"));
         try {
           facets = new ConcurrentSortedSetDocValuesFacetCounts(state, exec);
@@ -579,9 +557,9 @@
             new String[0],
             3,
             3,
-            new LabelAndValue[] {
-              new LabelAndValue("bar", 1), new LabelAndValue("baz", 1), new LabelAndValue("buz", 1),
-            });
+            new LabelAndValue("bar", 1),
+            new LabelAndValue("baz", 1),
+            new LabelAndValue("buz", 1));
 
         assertFacetResult(
             facets.getAllChildren("b"),
@@ -589,9 +567,8 @@
             new String[0],
             2,
             3,
-            new LabelAndValue[] {
-              new LabelAndValue("bar", 2), new LabelAndValue("buzz", 1),
-            });
+            new LabelAndValue("bar", 2),
+            new LabelAndValue("buzz", 1));
 
         ExecutorService exec =
             new ThreadPoolExecutor(
@@ -599,7 +576,7 @@
                 TestUtil.nextInt(random(), 2, 6),
                 Long.MAX_VALUE,
                 TimeUnit.MILLISECONDS,
-                new LinkedBlockingQueue<Runnable>(),
+                new LinkedBlockingQueue<>(),
                 new NamedThreadFactory("TestIndexSearcher"));
         try {
           facets = new ConcurrentSortedSetDocValuesFacetCounts(state, exec);
@@ -673,9 +650,8 @@
               new String[0],
               2,
               3,
-              new LabelAndValue[] {
-                new LabelAndValue("baz", 1), new LabelAndValue("foo", 2),
-              });
+              new LabelAndValue("baz", 1),
+              new LabelAndValue("foo", 2));
 
           // DrillDown:
           DrillDownQuery q = new DrillDownQuery(config);
@@ -998,10 +974,7 @@
               searcher.search(new MatchAllDocsQuery(), new FacetsCollectorManager());
 
           expectThrows(
-              IllegalStateException.class,
-              () -> {
-                new SortedSetDocValuesFacetCounts(state, c);
-              });
+              IllegalStateException.class, () -> new SortedSetDocValuesFacetCounts(state, c));
         }
       }
     }
@@ -1108,11 +1081,9 @@
               new String[0],
               3,
               3,
-              new LabelAndValue[] {
-                new LabelAndValue("foo1", 1),
-                new LabelAndValue("foo2", 1),
-                new LabelAndValue("foo3", 1),
-              });
+              new LabelAndValue("foo1", 1),
+              new LabelAndValue("foo2", 1),
+              new LabelAndValue("foo3", 1));
 
           assertFacetResult(
               facets.getAllChildren("b"),
@@ -1120,17 +1091,11 @@
               new String[0],
               2,
               2,
-              new LabelAndValue[] {
-                new LabelAndValue("bar1", 1), new LabelAndValue("bar2", 1),
-              });
+              new LabelAndValue("bar1", 1),
+              new LabelAndValue("bar2", 1));
 
           assertFacetResult(
-              facets.getAllChildren("c"),
-              "c",
-              new String[0],
-              1,
-              1,
-              new LabelAndValue[] {new LabelAndValue("baz1", 1)});
+              facets.getAllChildren("c"), "c", new String[0], 1, 1, new LabelAndValue("baz1", 1));
 
           assertFacetResult(
               facets.getAllChildren("d"),
@@ -1138,7 +1103,8 @@
               new String[0],
               2,
               2,
-              new LabelAndValue[] {new LabelAndValue("biz1", 1), new LabelAndValue("biz2", 1)});
+              new LabelAndValue("biz1", 1),
+              new LabelAndValue("biz2", 1));
 
           Collection<Accountable> resources = state.getChildResources();
           assertTrue(state.toString().contains(FacetsConfig.DEFAULT_INDEX_FIELD_NAME));
@@ -1217,14 +1183,10 @@
               new String[] {"foo"},
               2,
               2,
-              new LabelAndValue[] {new LabelAndValue("bar", 1), new LabelAndValue("baz", 1)});
+              new LabelAndValue("bar", 1),
+              new LabelAndValue("baz", 1));
           assertFacetResult(
-              facets.getAllChildren("d"),
-              "d",
-              new String[0],
-              1,
-              2,
-              new LabelAndValue[] {new LabelAndValue("foo", 2)});
+              facets.getAllChildren("d"), "d", new String[0], 1, 2, new LabelAndValue("foo", 2));
 
           Collection<Accountable> resources = state.getChildResources();
           assertTrue(state.toString().contains(FacetsConfig.DEFAULT_INDEX_FIELD_NAME));
@@ -1285,7 +1247,8 @@
               new String[0],
               2,
               2,
-              new LabelAndValue[] {new LabelAndValue("foo1", 1), new LabelAndValue("foo2", 1)});
+              new LabelAndValue("foo1", 1),
+              new LabelAndValue("foo2", 1));
         } finally {
           if (exec != null) exec.shutdownNow();
         }
@@ -1346,7 +1309,8 @@
               new String[0],
               2,
               2,
-              new LabelAndValue[] {new LabelAndValue("boo", 1), new LabelAndValue("foo", 2)});
+              new LabelAndValue("boo", 1),
+              new LabelAndValue("foo", 2));
         } finally {
           if (exec != null) exec.shutdownNow();
         }
@@ -1411,20 +1375,13 @@
               // Slow, yet hopefully bug-free, faceting:
               @SuppressWarnings({"rawtypes", "unchecked"})
               Map<String, Integer>[] expectedCounts = new HashMap[numDims];
-              for (int i = 0; i < numDims; i++) {
-                expectedCounts[i] = new HashMap<>();
-              }
+              Arrays.setAll(expectedCounts, i -> new HashMap<>());
 
               for (TestDoc doc : testDocs) {
                 if (doc.content.equals(searchToken)) {
                   for (int j = 0; j < numDims; j++) {
                     if (doc.dims[j] != null) {
-                      Integer v = expectedCounts[j].get(doc.dims[j]);
-                      if (v == null) {
-                        expectedCounts[j].put(doc.dims[j], 1);
-                      } else {
-                        expectedCounts[j].put(doc.dims[j], v.intValue() + 1);
-                      }
+                      expectedCounts[j].merge(doc.dims[j], 1, Integer::sum);
                     }
                   }
                 }
@@ -1445,12 +1402,12 @@
                           "dim" + i,
                           new String[0],
                           totCount,
-                          labelValues.toArray(new LabelAndValue[labelValues.size()]),
+                          labelValues.toArray(new LabelAndValue[0]),
                           labelValues.size()));
                 }
               }
 
-              // Sort by highest value, tie break by value:
+              // Sort by highest value, tie-break by value:
               sortFacetResults(expected);
 
               List<FacetResult> actual = facets.getAllDims(10);
@@ -1603,18 +1560,15 @@
                     } else {
                       newLabelAndValues = labelAndValues;
                     }
-                    newLabelAndValues =
-                        Arrays.stream(newLabelAndValues)
-                            .sorted(
-                                (o1, o2) -> {
-                                  if (o1.value.equals(o2.value)) {
-                                    return new BytesRef(o1.label).compareTo(new BytesRef(o2.label));
-                                  } else {
-                                    return o2.value.intValue() - o1.value.intValue();
-                                  }
-                                })
-                            .collect(Collectors.toList())
-                            .toArray(LabelAndValue[]::new);
+                    Arrays.sort(
+                        newLabelAndValues,
+                        (o1, o2) -> {
+                          if (o1.value.equals(o2.value)) {
+                            return new BytesRef(o1.label).compareTo(new BytesRef(o2.label));
+                          } else {
+                            return o2.value.intValue() - o1.value.intValue();
+                          }
+                        });
                     FacetResult newResult =
                         new FacetResult(result.dim, result.path, 0, newLabelAndValues, childCount);
                     expectedResults.put(parentDimPathString, newResult);
@@ -1842,9 +1796,7 @@
 
           expectThrows(
               IllegalArgumentException.class,
-              () -> {
-                facets.getTopChildren(5, "non-existent dimension", "with a path");
-              });
+              () -> facets.getTopChildren(5, "non-existent dimension", "with a path"));
         } finally {
           if (exec != null) exec.shutdownNow();
         }
@@ -1911,7 +1863,7 @@
           TestUtil.nextInt(random(), 2, 6),
           Long.MAX_VALUE,
           TimeUnit.MILLISECONDS,
-          new LinkedBlockingQueue<Runnable>(),
+          new LinkedBlockingQueue<>(),
           new NamedThreadFactory("TestIndexSearcher"));
     }
   }
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/matchhighlight/TestMatchHighlighter.java b/lucene/highlighter/src/test/org/apache/lucene/search/matchhighlight/TestMatchHighlighter.java
index ef1f038..0f38d65 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/matchhighlight/TestMatchHighlighter.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/matchhighlight/TestMatchHighlighter.java
@@ -29,7 +29,6 @@
 import java.util.Objects;
 import java.util.Set;
 import java.util.function.Function;
-import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.LowerCaseFilter;
@@ -336,164 +335,157 @@
     for (String field : List.of(FLD_TEXT1, FLD_TEXT2)) {
       String inputDocument = "The quick brown fox jumps over the lazy dog";
 
-      List<String[]> queryResultPairs =
-          Arrays.asList(
-              new String[][] {
-                {"fn:ordered(brown dog)", "0. %s: The quick >brown fox jumps over the lazy dog<"},
-                {
-                  "fn:within(fn:or(lazy quick) 1 fn:or(dog fox))",
-                  "0. %s: The quick brown fox jumps over the >lazy< dog"
-                },
-                {
-                  "fn:containedBy(fox fn:ordered(brown fox dog))",
-                  "0. %s: The quick brown >fox< jumps over the lazy dog"
-                },
-                {
-                  "fn:atLeast(2 quick fox \"furry dog\")",
-                  "0. %s: The >quick brown fox< jumps over the lazy dog"
-                },
-                {
-                  "fn:maxgaps(0 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))",
-                  "0. %s: The quick brown fox jumps over the >lazy dog<"
-                },
-                {
-                  "fn:maxgaps(1 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))",
-                  "0. %s: The >quick brown fox< jumps over the >lazy dog<"
-                },
-                {
-                  "fn:maxwidth(2 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))",
-                  "0. %s: The quick brown fox jumps over the >lazy dog<"
-                },
-                {
-                  "fn:maxwidth(3 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))",
-                  "0. %s: The >quick brown fox< jumps over the >lazy dog<"
-                },
-                {"fn:or(quick \"fox\")", "0. %s: The >quick< brown >fox< jumps over the lazy dog"},
-                {"fn:or(\"quick fox\")"},
-                {
-                  "fn:phrase(quick brown fox)",
-                  "0. %s: The >quick brown fox< jumps over the lazy dog"
-                },
-                {"fn:wildcard(jump*)", "0. %s: The quick brown fox >jumps< over the lazy dog"},
-                {"fn:wildcard(br*n)", "0. %s: The quick >brown< fox jumps over the lazy dog"},
-                {"fn:fuzzyTerm(fxo)", "0. %s: The quick brown >fox< jumps over the lazy dog"},
-                {"fn:or(dog fox)", "0. %s: The quick brown >fox< jumps over the lazy >dog<"},
-                {
-                  "fn:phrase(fn:ordered(quick fox) jumps)",
-                  "0. %s: The >quick brown fox jumps< over the lazy dog"
-                },
-                {
-                  "fn:ordered(quick jumps dog)",
-                  "0. %s: The >quick brown fox jumps over the lazy dog<"
-                },
-                {
-                  "fn:ordered(quick fn:or(fox dog))",
-                  "0. %s: The >quick brown fox< jumps over the lazy dog"
-                },
-                {
-                  "fn:ordered(quick jumps fn:or(fox dog))",
-                  "0. %s: The >quick brown fox jumps over the lazy dog<"
-                },
-                {
-                  "fn:unordered(dog jumps quick)",
-                  "0. %s: The >quick brown fox jumps over the lazy dog<"
-                },
-                {
-                  "fn:unordered(fn:or(fox dog) quick)",
-                  "0. %s: The >quick brown fox< jumps over the lazy dog"
-                },
-                {
-                  "fn:unordered(fn:phrase(brown fox) fn:phrase(fox jumps))",
-                  "0. %s: The quick >brown fox jumps< over the lazy dog"
-                },
-                {"fn:ordered(fn:phrase(brown fox) fn:phrase(fox jumps))"},
-                {"fn:unorderedNoOverlaps(fn:phrase(brown fox) fn:phrase(fox jumps))"},
-                {
-                  "fn:before(fn:or(brown lazy) fox)",
-                  "0. %s: The quick >brown< fox jumps over the lazy dog"
-                },
-                {
-                  "fn:before(fn:or(brown lazy) fn:or(dog fox))",
-                  "0. %s: The quick >brown< fox jumps over the >lazy< dog"
-                },
-                {
-                  "fn:after(fn:or(brown lazy) fox)",
-                  "0. %s: The quick brown fox jumps over the >lazy< dog"
-                },
-                {
-                  "fn:after(fn:or(brown lazy) fn:or(dog fox))",
-                  "0. %s: The quick brown fox jumps over the >lazy< dog"
-                },
-                {
-                  "fn:within(fn:or(fox dog) 1 fn:or(quick lazy))",
-                  "0. %s: The quick brown fox jumps over the lazy >dog<"
-                },
-                {
-                  "fn:within(fn:or(fox dog) 2 fn:or(quick lazy))",
-                  "0. %s: The quick brown >fox< jumps over the lazy >dog<"
-                },
-                {
-                  "fn:notWithin(fn:or(fox dog) 1 fn:or(quick lazy))",
-                  "0. %s: The quick brown >fox< jumps over the lazy dog"
-                },
-                {
-                  "fn:containedBy(fn:or(fox dog) fn:ordered(quick lazy))",
-                  "0. %s: The quick brown >fox< jumps over the lazy dog"
-                },
-                {
-                  "fn:notContainedBy(fn:or(fox dog) fn:ordered(quick lazy))",
-                  "0. %s: The quick brown fox jumps over the lazy >dog<"
-                },
-                {
-                  "fn:containing(fn:atLeast(2 quick fox dog) jumps)",
-                  "0. %s: The quick brown >fox jumps over the lazy dog<"
-                },
-                {
-                  "fn:notContaining(fn:ordered(fn:or(the The) fn:or(fox dog)) brown)",
-                  "0. %s: The quick brown fox jumps over >the lazy dog<"
-                },
-                {
-                  "fn:overlapping(fn:phrase(brown fox) fn:phrase(fox jumps))",
-                  "0. %s: The quick >brown fox< jumps over the lazy dog"
-                },
-                {
-                  "fn:overlapping(fn:or(fox dog) fn:extend(lazy 2 2))",
-                  "0. %s: The quick brown fox jumps over the lazy >dog<"
-                },
-                {
-                  "fn:nonOverlapping(fn:phrase(brown fox) fn:phrase(lazy dog))",
-                  "0. %s: The quick >brown fox< jumps over the lazy dog"
-                },
-                {
-                  "fn:nonOverlapping(fn:or(fox dog) fn:extend(lazy 2 2))",
-                  "0. %s: The quick brown >fox< jumps over the lazy dog"
-                },
-                {
-                  "fn:atLeast(2 fn:unordered(furry dog) fn:unordered(brown dog) lazy quick)",
-                  "0. %s: The >quick >brown fox jumps over the lazy<<> dog<"
-                },
-                {"fn:extend(fox 1 2)", "0. %s: The quick >brown fox jumps over< the lazy dog"},
-                {
-                  "fn:extend(fn:or(dog fox) 2 0)",
-                  "0. %s: The >quick brown fox< jumps over >the lazy dog<"
-                },
-                {
-                  "fn:containedBy(fn:or(fox dog) fn:extend(lazy 3 3))",
-                  "0. %s: The quick brown fox jumps over the lazy >dog<"
-                },
-                {
-                  "fn:notContainedBy(fn:or(fox dog) fn:extend(lazy 3 3))",
-                  "0. %s: The quick brown >fox< jumps over the lazy dog"
-                },
-                {
-                  "fn:containing(fn:extend(fn:or(lazy brown) 1 1) fn:or(fox dog))",
-                  "0. %s: The >quick brown fox< jumps over >the lazy dog<"
-                },
-                {
-                  "fn:notContaining(fn:extend(fn:or(fox dog) 1 0) fn:or(brown yellow))",
-                  "0. %s: The quick brown fox jumps over the >lazy dog<"
-                }
-              });
+      String[][] queryResultPairs =
+          new String[][] {
+            {"fn:ordered(brown dog)", "0. %s: The quick >brown fox jumps over the lazy dog<"},
+            {
+              "fn:within(fn:or(lazy quick) 1 fn:or(dog fox))",
+              "0. %s: The quick brown fox jumps over the >lazy< dog"
+            },
+            {
+              "fn:containedBy(fox fn:ordered(brown fox dog))",
+              "0. %s: The quick brown >fox< jumps over the lazy dog"
+            },
+            {
+              "fn:atLeast(2 quick fox \"furry dog\")",
+              "0. %s: The >quick brown fox< jumps over the lazy dog"
+            },
+            {
+              "fn:maxgaps(0 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))",
+              "0. %s: The quick brown fox jumps over the >lazy dog<"
+            },
+            {
+              "fn:maxgaps(1 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))",
+              "0. %s: The >quick brown fox< jumps over the >lazy dog<"
+            },
+            {
+              "fn:maxwidth(2 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))",
+              "0. %s: The quick brown fox jumps over the >lazy dog<"
+            },
+            {
+              "fn:maxwidth(3 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))",
+              "0. %s: The >quick brown fox< jumps over the >lazy dog<"
+            },
+            {"fn:or(quick \"fox\")", "0. %s: The >quick< brown >fox< jumps over the lazy dog"},
+            {"fn:or(\"quick fox\")"},
+            {"fn:phrase(quick brown fox)", "0. %s: The >quick brown fox< jumps over the lazy dog"},
+            {"fn:wildcard(jump*)", "0. %s: The quick brown fox >jumps< over the lazy dog"},
+            {"fn:wildcard(br*n)", "0. %s: The quick >brown< fox jumps over the lazy dog"},
+            {"fn:fuzzyTerm(fxo)", "0. %s: The quick brown >fox< jumps over the lazy dog"},
+            {"fn:or(dog fox)", "0. %s: The quick brown >fox< jumps over the lazy >dog<"},
+            {
+              "fn:phrase(fn:ordered(quick fox) jumps)",
+              "0. %s: The >quick brown fox jumps< over the lazy dog"
+            },
+            {"fn:ordered(quick jumps dog)", "0. %s: The >quick brown fox jumps over the lazy dog<"},
+            {
+              "fn:ordered(quick fn:or(fox dog))",
+              "0. %s: The >quick brown fox< jumps over the lazy dog"
+            },
+            {
+              "fn:ordered(quick jumps fn:or(fox dog))",
+              "0. %s: The >quick brown fox jumps over the lazy dog<"
+            },
+            {
+              "fn:unordered(dog jumps quick)",
+              "0. %s: The >quick brown fox jumps over the lazy dog<"
+            },
+            {
+              "fn:unordered(fn:or(fox dog) quick)",
+              "0. %s: The >quick brown fox< jumps over the lazy dog"
+            },
+            {
+              "fn:unordered(fn:phrase(brown fox) fn:phrase(fox jumps))",
+              "0. %s: The quick >brown fox jumps< over the lazy dog"
+            },
+            {"fn:ordered(fn:phrase(brown fox) fn:phrase(fox jumps))"},
+            {"fn:unorderedNoOverlaps(fn:phrase(brown fox) fn:phrase(fox jumps))"},
+            {
+              "fn:before(fn:or(brown lazy) fox)",
+              "0. %s: The quick >brown< fox jumps over the lazy dog"
+            },
+            {
+              "fn:before(fn:or(brown lazy) fn:or(dog fox))",
+              "0. %s: The quick >brown< fox jumps over the >lazy< dog"
+            },
+            {
+              "fn:after(fn:or(brown lazy) fox)",
+              "0. %s: The quick brown fox jumps over the >lazy< dog"
+            },
+            {
+              "fn:after(fn:or(brown lazy) fn:or(dog fox))",
+              "0. %s: The quick brown fox jumps over the >lazy< dog"
+            },
+            {
+              "fn:within(fn:or(fox dog) 1 fn:or(quick lazy))",
+              "0. %s: The quick brown fox jumps over the lazy >dog<"
+            },
+            {
+              "fn:within(fn:or(fox dog) 2 fn:or(quick lazy))",
+              "0. %s: The quick brown >fox< jumps over the lazy >dog<"
+            },
+            {
+              "fn:notWithin(fn:or(fox dog) 1 fn:or(quick lazy))",
+              "0. %s: The quick brown >fox< jumps over the lazy dog"
+            },
+            {
+              "fn:containedBy(fn:or(fox dog) fn:ordered(quick lazy))",
+              "0. %s: The quick brown >fox< jumps over the lazy dog"
+            },
+            {
+              "fn:notContainedBy(fn:or(fox dog) fn:ordered(quick lazy))",
+              "0. %s: The quick brown fox jumps over the lazy >dog<"
+            },
+            {
+              "fn:containing(fn:atLeast(2 quick fox dog) jumps)",
+              "0. %s: The quick brown >fox jumps over the lazy dog<"
+            },
+            {
+              "fn:notContaining(fn:ordered(fn:or(the The) fn:or(fox dog)) brown)",
+              "0. %s: The quick brown fox jumps over >the lazy dog<"
+            },
+            {
+              "fn:overlapping(fn:phrase(brown fox) fn:phrase(fox jumps))",
+              "0. %s: The quick >brown fox< jumps over the lazy dog"
+            },
+            {
+              "fn:overlapping(fn:or(fox dog) fn:extend(lazy 2 2))",
+              "0. %s: The quick brown fox jumps over the lazy >dog<"
+            },
+            {
+              "fn:nonOverlapping(fn:phrase(brown fox) fn:phrase(lazy dog))",
+              "0. %s: The quick >brown fox< jumps over the lazy dog"
+            },
+            {
+              "fn:nonOverlapping(fn:or(fox dog) fn:extend(lazy 2 2))",
+              "0. %s: The quick brown >fox< jumps over the lazy dog"
+            },
+            {
+              "fn:atLeast(2 fn:unordered(furry dog) fn:unordered(brown dog) lazy quick)",
+              "0. %s: The >quick >brown fox jumps over the lazy<<> dog<"
+            },
+            {"fn:extend(fox 1 2)", "0. %s: The quick >brown fox jumps over< the lazy dog"},
+            {
+              "fn:extend(fn:or(dog fox) 2 0)",
+              "0. %s: The >quick brown fox< jumps over >the lazy dog<"
+            },
+            {
+              "fn:containedBy(fn:or(fox dog) fn:extend(lazy 3 3))",
+              "0. %s: The quick brown fox jumps over the lazy >dog<"
+            },
+            {
+              "fn:notContainedBy(fn:or(fox dog) fn:extend(lazy 3 3))",
+              "0. %s: The quick brown >fox< jumps over the lazy dog"
+            },
+            {
+              "fn:containing(fn:extend(fn:or(lazy brown) 1 1) fn:or(fox dog))",
+              "0. %s: The >quick brown fox< jumps over >the lazy dog<"
+            },
+            {
+              "fn:notContaining(fn:extend(fn:or(fox dog) 1 0) fn:or(brown yellow))",
+              "0. %s: The quick brown fox jumps over the >lazy dog<"
+            }
+          };
 
       // Verify assertions.
       new IndexBuilder(this::toField)
@@ -789,9 +781,8 @@
       }
     }
 
-    var expectedTrimmed =
-        Stream.of(expectedFormattedLines).map(String::trim).collect(Collectors.toList());
-    var actualTrimmed = actualLines.stream().map(String::trim).collect(Collectors.toList());
+    var expectedTrimmed = Stream.of(expectedFormattedLines).map(String::trim).toList();
+    var actualTrimmed = actualLines.stream().map(String::trim).toList();
     if (!Objects.equals(expectedTrimmed, actualTrimmed)) {
       throw new AssertionError(
           "Actual hits were:\n"
@@ -807,8 +798,8 @@
             docHighlights ->
                 docHighlights.fields.entrySet().stream()
                     .map(e -> e.getKey() + ": " + String.join(", ", e.getValue()))
-                    .collect(Collectors.toList()))
-        .collect(Collectors.toList());
+                    .toList())
+        .toList();
   }
 
   private IndexableField toField(String name, String value) {
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/SearchPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/SearchPanelProvider.java
index 23e2388..b001545 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/SearchPanelProvider.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/SearchPanelProvider.java
@@ -35,7 +35,6 @@
 import java.util.Locale;
 import java.util.Objects;
 import java.util.Set;
-import java.util.stream.Collectors;
 import javax.swing.BorderFactory;
 import javax.swing.JButton;
 import javax.swing.JCheckBox;
@@ -140,7 +139,7 @@
 
   private final JButton searchBtn = new JButton();
 
-  private JCheckBox exactHitsCntCB = new JCheckBox();
+  private final JCheckBox exactHitsCntCB = new JCheckBox();
 
   private final JButton mltBtn = new JButton();
 
@@ -811,16 +810,10 @@
               });
       operatorRegistry
           .get(FieldValuesTabOperator.class)
-          .ifPresent(
-              operator -> {
-                operator.setFields(searchModel.getFieldNames());
-              });
+          .ifPresent(operator -> operator.setFields(searchModel.getFieldNames()));
       operatorRegistry
           .get(MLTTabOperator.class)
-          .ifPresent(
-              operator -> {
-                operator.setFields(searchModel.getFieldNames());
-              });
+          .ifPresent(operator -> operator.setFields(searchModel.getFieldNames()));
 
       queryStringTA.setText("*:*");
       parsedQueryTA.setText("");
@@ -864,7 +857,7 @@
     VALUES(4),
     MLT(5);
 
-    private int tabIdx;
+    private final int tabIdx;
 
     Tab(int tabIdx) {
       this.tabIdx = tabIdx;
@@ -937,7 +930,7 @@
                       String v = String.join(",", Arrays.asList(e.getValue()));
                       return e.getKey() + "=" + v + ";";
                     })
-                .collect(Collectors.toList());
+                .toList();
         data[i][Column.VALUE.getIndex()] = String.join(" ", concatValues);
       }
     }
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/TokenAttributeDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/TokenAttributeDialogFactory.java
index 46399d0..0f13514 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/TokenAttributeDialogFactory.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/TokenAttributeDialogFactory.java
@@ -24,7 +24,6 @@
 import java.awt.Window;
 import java.io.IOException;
 import java.util.List;
-import java.util.stream.Collectors;
 import javax.swing.BorderFactory;
 import javax.swing.JButton;
 import javax.swing.JDialog;
@@ -103,7 +102,7 @@
                 att ->
                     att.getAttValues().entrySet().stream()
                         .map(e -> TokenAttValue.of(att.getAttClass(), e.getKey(), e.getValue())))
-            .collect(Collectors.toList());
+            .toList();
     TableUtils.setupTable(
         attributesTable,
         ListSelectionModel.SINGLE_SELECTION,
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogFactory.java
index 723cd21..b49ef94 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogFactory.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogFactory.java
@@ -35,7 +35,6 @@
 import java.util.List;
 import java.util.logging.Level;
 import java.util.logging.Logger;
-import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 import javax.swing.BorderFactory;
 import javax.swing.BoxLayout;
@@ -152,9 +151,7 @@
     this.indexOptionsDialogFactory = IndexOptionsDialogFactory.getInstance();
     this.helpDialogFactory = HelpDialogFactory.getInstance();
     this.newFieldList =
-        IntStream.range(0, ROW_COUNT)
-            .mapToObj(i -> NewField.newInstance())
-            .collect(Collectors.toList());
+        IntStream.range(0, ROW_COUNT).mapToObj(i -> NewField.newInstance()).toList();
 
     operatorRegistry.register(AddDocumentDialogOperator.class, this);
     indexHandler.addObserver(new Observer());
@@ -388,7 +385,7 @@
               .filter(nf -> !nf.isDeleted())
               .filter(nf -> !StringUtils.isNullOrEmpty(nf.getName()))
               .filter(nf -> !StringUtils.isNullOrEmpty(nf.getValue()))
-              .collect(Collectors.toList());
+              .toList();
       if (validFields.isEmpty()) {
         infoTA.setText("Please add one or more fields. Name and Value are both required.");
         return;
@@ -411,7 +408,6 @@
       log.info("Added document: " + doc);
     }
 
-    @SuppressWarnings("unchecked")
     private IndexableField toIndexableField(NewField nf) throws Exception {
       final Constructor<? extends IndexableField> constr;
       if (nf.getType().equals(TextField.class) || nf.getType().equals(StringField.class)) {
@@ -505,9 +501,9 @@
       OPTIONS("Options", 3, String.class),
       VALUE("Value", 4, String.class);
 
-      private String colName;
-      private int index;
-      private Class<?> type;
+      private final String colName;
+      private final int index;
+      private final Class<?> type;
 
       Column(String colName, int index, Class<?> type) {
         this.colName = colName;
@@ -589,7 +585,7 @@
 
   static final class OptionsCellRenderer implements TableCellRenderer {
 
-    private JDialog dialog;
+    private final JDialog dialog;
 
     private final IndexOptionsDialogFactory indexOptionsDialogFactory;
 
@@ -609,7 +605,6 @@
     }
 
     @Override
-    @SuppressWarnings("unchecked")
     public Component getTableCellRendererComponent(
         JTable table, Object value, boolean isSelected, boolean hasFocus, int row, int column) {
       if (table != null && this.table != table) {
@@ -635,9 +630,7 @@
                             title,
                             500,
                             500,
-                            (factory) -> {
-                              factory.setNewField(newFieldList.get(row));
-                            });
+                            (factory) -> factory.setNewField(newFieldList.get(row)));
                   }
                 }
               });
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/TermVectorDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/TermVectorDialogFactory.java
index 8095c2c..3530162 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/TermVectorDialogFactory.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/TermVectorDialogFactory.java
@@ -133,9 +133,9 @@
       POSITIONS("Positions", 2, String.class),
       OFFSETS("Offsets", 3, String.class);
 
-      private String colName;
-      private int index;
-      private Class<?> type;
+      private final String colName;
+      private final int index;
+      private final Class<?> type;
 
       Column(String colName, int index, Class<?> type) {
         this.colName = colName;
@@ -172,23 +172,14 @@
         String termText = entry.getTermText();
         long freq = tvEntries.get(i).getFreq();
         String positions =
-            String.join(
-                ",",
-                entry.getPositions().stream()
-                    .map(pos -> Integer.toString(pos.getPosition()))
-                    .collect(Collectors.toList()));
+            entry.getPositions().stream()
+                .map(pos -> Integer.toString(pos.getPosition()))
+                .collect(Collectors.joining(","));
         String offsets =
-            String.join(
-                ",",
-                entry.getPositions().stream()
-                    .filter(
-                        pos -> pos.getStartOffset().isPresent() && pos.getEndOffset().isPresent())
-                    .map(
-                        pos ->
-                            Integer.toString(pos.getStartOffset().orElse(-1))
-                                + "-"
-                                + Integer.toString(pos.getEndOffset().orElse(-1)))
-                    .collect(Collectors.toList()));
+            entry.getPositions().stream()
+                .filter(pos -> pos.getStartOffset().isPresent() && pos.getEndOffset().isPresent())
+                .map(pos -> pos.getStartOffset().orElse(-1) + "-" + pos.getEndOffset().orElse(-1))
+                .collect(Collectors.joining(","));
 
         data[i] = new Object[] {termText, freq, positions, offsets};
       }
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelProvider.java
index 54b6b69..7df0869 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelProvider.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelProvider.java
@@ -39,7 +39,6 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
-import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 import javax.swing.BorderFactory;
 import javax.swing.DefaultComboBoxModel;
@@ -164,7 +163,7 @@
     panel.add(confDirBtn);
     buildBtn.setText(
         FontUtils.elegantIconHtml(
-            "&#xe102;", MessageUtils.getLocalizedMessage("analysis.button.build_analyzser")));
+            "&#xe102;", MessageUtils.getLocalizedMessage("analysis.button.build_analyzer")));
     buildBtn.setFont(StyleConstants.FONT_BUTTON_LARGE);
     buildBtn.setMargin(new Insets(3, 3, 3, 3));
     buildBtn.addActionListener(listeners::buildAnalyzer);
@@ -464,11 +463,10 @@
     int ret = fileChooser.showOpenDialog(containerPanel);
     if (ret == JFileChooser.APPROVE_OPTION) {
       File[] files = fileChooser.getSelectedFiles();
-      analysisModel.addExternalJars(
-          Arrays.stream(files).map(File::getAbsolutePath).collect(Collectors.toList()));
+      analysisModel.addExternalJars(Arrays.stream(files).map(File::getAbsolutePath).toList());
       operatorRegistry
           .get(CustomAnalyzerPanelOperator.class)
-          .ifPresent(operator -> operator.resetAnalysisComponents());
+          .ifPresent(CustomAnalyzerPanelOperator::resetAnalysisComponents);
       messageBroker.showStatusMessage("External jars were added.");
     }
   }
@@ -569,8 +567,7 @@
         selectedItem,
         tfParamsList.get(tfParamsList.size() - 1),
         () -> {
-          selectedTfList.setModel(
-              new DefaultComboBoxModel<>(updatedList.toArray(new String[updatedList.size()])));
+          selectedTfList.setModel(new DefaultComboBoxModel<>(updatedList.toArray(new String[0])));
           tfFactoryCombo.setSelectedItem("");
           tfEditBtn.setEnabled(true);
           buildBtn.setEnabled(true);
@@ -617,9 +614,7 @@
         -1,
         selectedItem,
         tokParams,
-        () -> {
-          buildBtn.setEnabled(true);
-        });
+        () -> buildBtn.setEnabled(true));
   }
 
   private void editTokenFilters() {
@@ -704,7 +699,7 @@
         IntStream.range(0, cfParamsList.size())
             .filter(i -> !deletedIndexes.contains(i))
             .mapToObj(cfParamsList::get)
-            .collect(Collectors.toList());
+            .toList();
     cfParamsList.clear();
     cfParamsList.addAll(updatedParamList);
     assert selectedCfList.getModel().getSize() == cfParamsList.size();
@@ -725,7 +720,7 @@
         IntStream.range(0, tfParamsList.size())
             .filter(i -> !deletedIndexes.contains(i))
             .mapToObj(tfParamsList::get)
-            .collect(Collectors.toList());
+            .toList();
     tfParamsList.clear();
     tfParamsList.addAll(updatedParamList);
     assert selectedTfList.getModel().getSize() == tfParamsList.size();
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelProvider.java
index 63e3daa..36528a1 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelProvider.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelProvider.java
@@ -22,7 +22,6 @@
 import java.awt.event.MouseAdapter;
 import java.awt.event.MouseEvent;
 import java.util.List;
-import java.util.stream.Collectors;
 import javax.swing.JLabel;
 import javax.swing.JPanel;
 import javax.swing.JScrollPane;
@@ -198,7 +197,7 @@
                     att ->
                         att.getAttValues().entrySet().stream()
                             .map(e -> e.getKey() + "=" + e.getValue()))
-                .collect(Collectors.toList());
+                .toList();
         data[i][Column.ATTR.getIndex()] = String.join(",", attValues);
       }
     }
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ListUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ListUtils.java
index fc69261..7e4261c 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ListUtils.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ListUtils.java
@@ -19,7 +19,6 @@
 
 import java.util.List;
 import java.util.function.IntFunction;
-import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 import javax.swing.JList;
 import javax.swing.ListModel;
@@ -34,7 +33,7 @@
 
   public static <T, R> List<R> getAllItems(JList<T> jlist, IntFunction<R> mapFunc) {
     ListModel<T> model = jlist.getModel();
-    return IntStream.range(0, model.getSize()).mapToObj(mapFunc).collect(Collectors.toList());
+    return IntStream.range(0, model.getSize()).mapToObj(mapFunc).toList();
   }
 
   private ListUtils() {}
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TableUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TableUtils.java
index a14cfb5..9ffc84a 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TableUtils.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TableUtils.java
@@ -75,8 +75,8 @@
   }
 
   public static <T extends TableColumnInfo> String[] columnNames(T[] columns) {
-    return columnMap(columns).entrySet().stream()
-        .map(e -> e.getValue().getColName())
+    return columnMap(columns).values().stream()
+        .map(TableColumnInfo::getColName)
         .toArray(String[]::new);
   }
 
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisImpl.java
index fb3a065..3fc4630 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisImpl.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisImpl.java
@@ -33,7 +33,6 @@
 import java.util.Locale;
 import java.util.Map;
 import java.util.Objects;
-import java.util.stream.Collectors;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.CharFilterFactory;
 import org.apache.lucene.analysis.TokenFilterFactory;
@@ -81,19 +80,17 @@
 
   @Override
   public Collection<String> getAvailableCharFilters() {
-    return CharFilterFactory.availableCharFilters().stream().sorted().collect(Collectors.toList());
+    return CharFilterFactory.availableCharFilters().stream().sorted().toList();
   }
 
   @Override
   public Collection<String> getAvailableTokenizers() {
-    return TokenizerFactory.availableTokenizers().stream().sorted().collect(Collectors.toList());
+    return TokenizerFactory.availableTokenizers().stream().sorted().toList();
   }
 
   @Override
   public Collection<String> getAvailableTokenFilters() {
-    return TokenFilterFactory.availableTokenFilters().stream()
-        .sorted()
-        .collect(Collectors.toList());
+    return TokenFilterFactory.availableTokenFilters().stream().sorted().toList();
   }
 
   @Override
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsImpl.java
index a103eee..c8f03fd 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsImpl.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsImpl.java
@@ -119,7 +119,7 @@
       return ic.getFileNames().stream()
           .map(name -> File.of(indexPath, name))
           .sorted(Comparator.comparing(File::getFileName))
-          .collect(Collectors.toList());
+          .toList();
     } catch (IOException e) {
       throw new LukeException(
           String.format(Locale.ENGLISH, "Failed to load files for commit generation %d", commitGen),
@@ -138,7 +138,7 @@
       return infos.asList().stream()
           .map(Segment::of)
           .sorted(Comparator.comparing(Segment::getName))
-          .collect(Collectors.toList());
+          .toList();
     } catch (IOException e) {
       throw new LukeException(
           String.format(
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TopTerms.java b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TopTerms.java
index 5e5bb69..b6d4c14 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TopTerms.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TopTerms.java
@@ -22,11 +22,10 @@
 import java.util.Map;
 import java.util.Objects;
 import java.util.WeakHashMap;
-import java.util.stream.Collectors;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.misc.HighFreqTerms;
 
-/** An utility class that collects terms and their statistics in a specific field. */
+/** A utility class that collects terms and their statistics in a specific field. */
 final class TopTerms {
 
   private final IndexReader reader;
@@ -52,8 +51,7 @@
           HighFreqTerms.getHighFreqTerms(
               reader, numTerms, field, new HighFreqTerms.DocFreqComparator());
 
-      List<TermStats> topTerms =
-          Arrays.stream(stats).map(TermStats::of).collect(Collectors.toList());
+      List<TermStats> topTerms = Arrays.stream(stats).map(TermStats::of).toList();
 
       // cache computed statistics for later uses
       topTermsCache.put(field, topTerms);
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java
index cd2fb87..56a942b 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java
@@ -107,7 +107,7 @@
         .map(f -> IndexUtils.getFieldInfo(reader, f))
         .filter(info -> !info.getDocValuesType().equals(DocValuesType.NONE))
         .map(info -> info.name)
-        .collect(Collectors.toList());
+        .toList();
   }
 
   @Override
@@ -116,7 +116,7 @@
         .map(f -> IndexUtils.getFieldInfo(reader, f))
         .filter(info -> !info.getIndexOptions().equals(IndexOptions.NONE))
         .map(info -> info.name)
-        .collect(Collectors.toList());
+        .toList();
   }
 
   @Override
@@ -155,7 +155,7 @@
         query = query.rewrite(searcher);
       } catch (IOException e) {
         throw new LukeException(
-            String.format(Locale.ENGLISH, "Failed to rewrite query: %s", query.toString()), e);
+            String.format(Locale.ENGLISH, "Failed to rewrite query: %s", query), e);
       }
     }
 
@@ -428,7 +428,7 @@
                   new SortField(name, SortField.Type.FLOAT),
                   new SortField(name, SortField.Type.DOUBLE)
                 })
-            .collect(Collectors.toList());
+            .toList();
 
       case SORTED_NUMERIC:
         return Arrays.stream(
@@ -438,7 +438,7 @@
                   new SortedNumericSortField(name, SortField.Type.FLOAT),
                   new SortedNumericSortField(name, SortField.Type.DOUBLE)
                 })
-            .collect(Collectors.toList());
+            .toList();
 
       case SORTED:
         return Arrays.stream(
@@ -446,7 +446,7 @@
                   new SortField(name, SortField.Type.STRING),
                   new SortField(name, SortField.Type.STRING_VAL)
                 })
-            .collect(Collectors.toList());
+            .toList();
 
       case SORTED_SET:
         return Collections.singletonList(new SortedSetSortField(name, false));
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/util/IndexUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/models/util/IndexUtils.java
index cab0bf5..95dbea6 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/models/util/IndexUtils.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/models/util/IndexUtils.java
@@ -36,7 +36,6 @@
 import java.util.Objects;
 import java.util.logging.Level;
 import java.util.logging.Logger;
-import java.util.stream.Collectors;
 import java.util.stream.StreamSupport;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
@@ -98,7 +97,7 @@
     // find all valid index directories in this directory
     Files.walkFileTree(
         root,
-        new SimpleFileVisitor<Path>() {
+        new SimpleFileVisitor<>() {
           @Override
           public FileVisitResult preVisitDirectory(Path path, BasicFileAttributes attrs)
               throws IOException {
@@ -127,7 +126,7 @@
     if (readers.size() == 1) {
       return readers.get(0);
     } else {
-      return new MultiReader(readers.toArray(new IndexReader[readers.size()]));
+      return new MultiReader(readers.toArray(new IndexReader[0]));
     }
   }
 
@@ -448,7 +447,7 @@
   public static Collection<String> getFieldNames(IndexReader reader) {
     return StreamSupport.stream(getFieldInfos(reader).spliterator(), false)
         .map(f -> f.name)
-        .collect(Collectors.toList());
+        .toList();
   }
 
   /**
diff --git a/lucene/luke/src/resources/org/apache/lucene/luke/app/desktop/messages/messages.properties b/lucene/luke/src/resources/org/apache/lucene/luke/app/desktop/messages/messages.properties
index 6dbb609..56becab 100644
--- a/lucene/luke/src/resources/org/apache/lucene/luke/app/desktop/messages/messages.properties
+++ b/lucene/luke/src/resources/org/apache/lucene/luke/app/desktop/messages/messages.properties
@@ -184,7 +184,7 @@
 analysis.radio.preset=Preset
 analysis.radio.custom=Custom
 analysis.button.browse=Browse
-analysis.button.build_analyzser=Build Analyzer
+analysis.button.build_analyzer=Build Analyzer
 analysis.button.test=Test Analyzer
 analysis.checkbox.step_by_step=Step By Step
 analysis.hyperlink.load_jars=Load external jars
diff --git a/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/TestSimpleIniFile.java b/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/TestSimpleIniFile.java
index 8db6c1c..6952539 100644
--- a/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/TestSimpleIniFile.java
+++ b/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/TestSimpleIniFile.java
@@ -24,7 +24,6 @@
 import java.nio.file.Path;
 import java.util.List;
 import java.util.Map;
-import java.util.stream.Collectors;
 import org.apache.lucene.tests.util.LuceneTestCase;
 import org.junit.Test;
 
@@ -37,7 +36,7 @@
     assertTrue(Files.isRegularFile(path));
 
     try (BufferedReader br = Files.newBufferedReader(path, StandardCharsets.UTF_8)) {
-      List<String> lines = br.lines().collect(Collectors.toList());
+      List<String> lines = br.lines().toList();
       assertEquals(8, lines.size());
       assertEquals("[section1]", lines.get(0));
       assertEquals("s1 = aaa", lines.get(1));
diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTree.java b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTree.java
index 0a827b3..c910d73 100644
--- a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTree.java
+++ b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTree.java
@@ -24,7 +24,6 @@
 import java.util.Optional;
 import java.util.function.BiConsumer;
 import java.util.function.Function;
-import java.util.stream.Collectors;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.util.BytesRef;
 
@@ -67,11 +66,7 @@
 
   /** Returns a string of {@code width} spaces */
   protected String space(int width) {
-    StringBuilder sb = new StringBuilder();
-    for (int i = 0; i < width; i++) {
-      sb.append(" ");
-    }
-    return sb.toString();
+    return " ".repeat(width);
   }
 
   /** Returns a leaf node for a particular term */
@@ -149,16 +144,14 @@
   /** Returns a conjunction of a set of child nodes */
   public static QueryTree conjunction(
       List<Function<TermWeightor, QueryTree>> children, TermWeightor weightor) {
-    if (children.size() == 0) {
+    if (children.isEmpty()) {
       throw new IllegalArgumentException("Cannot build a conjunction with no children");
     }
     if (children.size() == 1) {
       return children.get(0).apply(weightor);
     }
-    List<QueryTree> qt = children.stream().map(f -> f.apply(weightor)).collect(Collectors.toList());
-    List<QueryTree> restricted =
-        qt.stream().filter(t -> t.weight() > 0).collect(Collectors.toList());
-    if (restricted.size() == 0) {
+    List<QueryTree> qt = children.stream().map(f -> f.apply(weightor)).toList();
+    if (qt.stream().noneMatch(t -> t.weight() > 0)) {
       // all children are ANY, so just return the first one
       return qt.get(0);
     }
@@ -172,13 +165,13 @@
   /** Returns a disjunction of a set of child nodes */
   public static QueryTree disjunction(
       List<Function<TermWeightor, QueryTree>> children, TermWeightor weightor) {
-    if (children.size() == 0) {
+    if (children.isEmpty()) {
       throw new IllegalArgumentException("Cannot build a disjunction with no children");
     }
     if (children.size() == 1) {
       return children.get(0).apply(weightor);
     }
-    List<QueryTree> qt = children.stream().map(f -> f.apply(weightor)).collect(Collectors.toList());
+    List<QueryTree> qt = children.stream().map(f -> f.apply(weightor)).toList();
     Optional<QueryTree> firstAnyChild = qt.stream().filter(q -> q.weight() == 0).findAny();
     // if any of the children is an ANY node, just return that, otherwise build the disjunction
     return firstAnyChild.orElseGet(() -> new DisjunctionQueryTree(qt));
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/ConjunctionIntervalsSource.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/ConjunctionIntervalsSource.java
index 90e15a0..73bc0d6 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/ConjunctionIntervalsSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/ConjunctionIntervalsSource.java
@@ -20,7 +20,6 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.stream.Collectors;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.Query;
@@ -80,10 +79,7 @@
       subs.add(mi);
     }
     IntervalIterator it =
-        combine(
-            subs.stream()
-                .map(m -> IntervalMatches.wrapMatches(m, doc))
-                .collect(Collectors.toList()));
+        combine(subs.stream().map(m -> IntervalMatches.wrapMatches(m, doc)).toList());
     if (it.advance(doc) != doc) {
       return null;
     }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/DisjunctionIntervalsSource.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/DisjunctionIntervalsSource.java
index 7e1a701..6f68353 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/DisjunctionIntervalsSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/DisjunctionIntervalsSource.java
@@ -75,7 +75,7 @@
         subIterators.add(it);
       }
     }
-    if (subIterators.size() == 0) {
+    if (subIterators.isEmpty()) {
       return null;
     }
     return new DisjunctionIntervalIterator(subIterators);
@@ -91,14 +91,12 @@
         subMatches.add(mi);
       }
     }
-    if (subMatches.size() == 0) {
+    if (subMatches.isEmpty()) {
       return null;
     }
     DisjunctionIntervalIterator it =
         new DisjunctionIntervalIterator(
-            subMatches.stream()
-                .map(m -> IntervalMatches.wrapMatches(m, doc))
-                .collect(Collectors.toList()));
+            subMatches.stream().map(m -> IntervalMatches.wrapMatches(m, doc)).toList());
     if (it.advance(doc) != doc) {
       return null;
     }
@@ -170,7 +168,7 @@
       this.approximation = new DisjunctionDISIApproximation(disiQueue);
       this.iterators = iterators;
       this.intervalQueue =
-          new PriorityQueue<IntervalIterator>(iterators.size()) {
+          new PriorityQueue<>(iterators.size()) {
             @Override
             protected boolean lessThan(IntervalIterator a, IntervalIterator b) {
               return a.end() < b.end() || (a.end() == b.end() && a.start() >= b.start());
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/Disjunctions.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/Disjunctions.java
index 7d37c33..c50c7fc 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/Disjunctions.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/Disjunctions.java
@@ -21,7 +21,6 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.function.Function;
-import java.util.stream.Collectors;
 import org.apache.lucene.search.IndexSearcher;
 
 final class Disjunctions {
@@ -59,7 +58,7 @@
     if (rewritten.size() == 1) {
       return Collections.singletonList(function.apply(rewritten.get(0)));
     }
-    return rewritten.stream().map(function).collect(Collectors.toList());
+    return rewritten.stream().map(function).toList();
   }
 
   // Given a source containing disjunctions, and a mapping function,
@@ -70,7 +69,7 @@
     if (disjuncts.size() == 1) {
       return Collections.singletonList(function.apply(disjuncts.get(0)));
     }
-    return disjuncts.stream().map(function).collect(Collectors.toList());
+    return disjuncts.stream().map(function).toList();
   }
 
   // Separate out disjunctions into individual sources
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimizingConjunctionIntervalsSource.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimizingConjunctionIntervalsSource.java
index d00e182..1cdb5ab 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimizingConjunctionIntervalsSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimizingConjunctionIntervalsSource.java
@@ -21,7 +21,6 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
-import java.util.stream.Collectors;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.Query;
@@ -65,9 +64,7 @@
     }
     IntervalIterator it =
         combine(
-            subs.stream()
-                .map(m -> IntervalMatches.wrapMatches(m, doc))
-                .collect(Collectors.toList()),
+            subs.stream().map(m -> IntervalMatches.wrapMatches(m, doc)).toList(),
             cacheIterators(subs));
     if (it.advance(doc) != doc) {
       return null;
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java
index eebdbba..6d1413e 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java
@@ -63,7 +63,7 @@
   //   0         1         2         3         4         5         6         7         8         9
   //
   // 012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
-  private static String[] field1_docs = {
+  private static final String[] field1_docs = {
     "Nothing of interest to anyone here",
     "Pease porridge hot, pease porridge cold, pease porridge in the pot nine days old.  Some like it hot, some like it cold, some like it in the pot nine days old",
     "Pease porridge cold, pease porridge hot, pease porridge in the pot twelve days old.  Some like it cold, some like it hot, some like it in the fraggle",
@@ -76,7 +76,7 @@
   //   0         1         2         3         4         5         6         7         8         9
   //
   // 012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
-  private static String[] field2_docs = {
+  private static final String[] field2_docs = {
     "In Xanadu did Kubla Khan a stately pleasure dome decree",
     "Where Alph the sacred river ran through caverns measureless to man",
     "a b a c b a b c",
@@ -88,7 +88,7 @@
 
   private static Directory directory;
   private static IndexSearcher searcher;
-  private static Analyzer analyzer = new StandardAnalyzer(CharArraySet.EMPTY_SET);
+  private static final Analyzer analyzer = new StandardAnalyzer(CharArraySet.EMPTY_SET);
 
   private static final FieldType FIELD_TYPE = new FieldType(TextField.TYPE_STORED);
 
@@ -202,7 +202,7 @@
           @Override
           public void consumeTerms(Query query, Term... terms) {
             visitedSources[0]++;
-            actualTerms.addAll(Arrays.stream(terms).map(Term::text).collect(Collectors.toList()));
+            actualTerms.addAll(Arrays.stream(terms).map(Term::text).toList());
           }
 
           @Override
@@ -261,9 +261,9 @@
     IllegalArgumentException e =
         expectThrows(
             IllegalArgumentException.class,
-            () -> {
-              Intervals.term("wibble").intervals("id", searcher.getIndexReader().leaves().get(0));
-            });
+            () ->
+                Intervals.term("wibble")
+                    .intervals("id", searcher.getIndexReader().leaves().get(0)));
     assertEquals(
         "Cannot create an IntervalIterator over field id because it has no indexed positions",
         e.getMessage());
@@ -454,10 +454,7 @@
     Collections.shuffle(Arrays.asList(terms), random());
 
     IntervalsSource source =
-        Intervals.or(
-            Arrays.stream(terms)
-                .map((term) -> Intervals.term(term))
-                .toArray((sz) -> new IntervalsSource[sz]));
+        Intervals.or(Arrays.stream(terms).map(Intervals::term).toArray(IntervalsSource[]::new));
     assertEquals(expected, source.toString());
   }
 
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java
index 28d117d..fe520d6 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java
@@ -24,7 +24,6 @@
 import java.util.List;
 import java.util.Spliterator;
 import java.util.function.Consumer;
-import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 import java.util.stream.Stream;
 import java.util.stream.StreamSupport;
@@ -93,12 +92,12 @@
   /**
    * @see #FSTCompletion(FST, boolean, boolean)
    */
-  private boolean exactFirst;
+  private final boolean exactFirst;
 
   /**
    * @see #FSTCompletion(FST, boolean, boolean)
    */
-  private boolean higherWeightsFirst;
+  private final boolean higherWeightsFirst;
 
   /**
    * Constructs an FSTCompletion, specifying higherWeightsFirst and exactFirst.
@@ -146,7 +145,7 @@
       }
 
       Collections.reverse(rootArcs); // we want highest weights first.
-      return rootArcs.toArray(new Arc[rootArcs.size()]);
+      return rootArcs.toArray(new Arc[0]);
     } catch (IOException e) {
       throw new RuntimeException(e);
     }
@@ -196,19 +195,17 @@
    *     then alphabetically (UTF-8 codepoint order).
    */
   public List<Completion> lookup(CharSequence key, int num) {
-    if (key.length() == 0 || automaton == null) {
+    if (key.isEmpty() || automaton == null) {
       return EMPTY_RESULT;
     }
 
     if (!higherWeightsFirst && rootArcs.length > 1) {
-      // We could emit a warning here (?). An optimal strategy for
-      // alphabetically sorted
+      // We could emit a warning here (?). An optimal strategy for alphabetically sorted
       // suggestions would be to add them with a constant weight -- this saves
-      // unnecessary
-      // traversals and sorting.
-      return lookup(key).sorted().limit(num).collect(Collectors.toList());
+      // unnecessary traversals and sorting.
+      return lookup(key).sorted().limit(num).toList();
     } else {
-      return lookup(key).limit(num).collect(Collectors.toList());
+      return lookup(key).limit(num).toList();
     }
   }
 
@@ -221,7 +218,7 @@
    * @return Returns the suggestions
    */
   public Stream<Completion> lookup(CharSequence key) {
-    if (key.length() == 0 || automaton == null) {
+    if (key.isEmpty() || automaton == null) {
       return Stream.empty();
     }
 
@@ -292,8 +289,8 @@
     FST.BytesReader fstReader = automaton.getBytesReader();
 
     class State {
-      Arc<Object> arc;
-      int outputLength;
+      final Arc<Object> arc;
+      final int outputLength;
 
       State(Arc<Object> arc, int outputLength) throws IOException {
         this.arc = automaton.readFirstTargetArc(arc, new Arc<>(), fstReader);
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestFSTCompletion.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestFSTCompletion.java
index ff2e859..d4e6e17 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestFSTCompletion.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestFSTCompletion.java
@@ -23,7 +23,6 @@
 import java.util.List;
 import java.util.Locale;
 import java.util.Random;
-import java.util.stream.Collectors;
 import org.apache.lucene.search.suggest.Input;
 import org.apache.lucene.search.suggest.InputArrayIterator;
 import org.apache.lucene.search.suggest.Lookup.LookupResult;
@@ -98,7 +97,7 @@
             .sorted(
                 Comparator.comparing(
                     completion -> completion.utf8.utf8ToString().toLowerCase(Locale.ROOT)))
-            .collect(Collectors.toList());
+            .toList();
 
     assertMatchEquals(
         completions, "foundation/1", "four/0", "fourblah/1", "fourier/0", "fourty/1.0");
@@ -231,8 +230,8 @@
 
     List<LookupResult> result = lookup.lookup(stringToCharSequence("wit"), true, 5);
     assertEquals(5, result.size());
-    assertTrue(result.get(0).key.toString().equals("wit")); // exact match.
-    assertTrue(result.get(1).key.toString().equals("with")); // highest count.
+    assertEquals("wit", result.get(0).key.toString()); // exact match.
+    assertEquals("with", result.get(1).key.toString()); // highest count.
     tempDir.close();
   }
 
@@ -276,7 +275,7 @@
 
     Directory tempDir = getDirectory();
     FSTCompletionLookup lookup = new FSTCompletionLookup(tempDir, "fst");
-    lookup.build(new InputArrayIterator(freqs.toArray(new Input[freqs.size()])));
+    lookup.build(new InputArrayIterator(freqs.toArray(new Input[0])));
 
     for (Input tf : freqs) {
       final String term = tf.term.utf8ToString();
@@ -315,8 +314,8 @@
                 i < result.length ? result[i] : "--"));
       }
 
-      System.err.println(b.toString());
-      fail("Expected different output:\n" + b.toString());
+      System.err.println(b);
+      fail("Expected different output:\n" + b);
     }
   }
 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/index/RandomPostingsTester.java b/lucene/test-framework/src/java/org/apache/lucene/tests/index/RandomPostingsTester.java
index 870c4f5..3aefda1 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/tests/index/RandomPostingsTester.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/tests/index/RandomPostingsTester.java
@@ -40,7 +40,6 @@
 import java.util.SortedMap;
 import java.util.TreeMap;
 import java.util.function.IntToLongFunction;
-import java.util.stream.Collectors;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
@@ -121,9 +120,9 @@
   private long totalPayloadBytes;
 
   // Holds all postings:
-  private Map<String, SortedMap<BytesRef, SeedAndOrd>> fields;
+  private final Map<String, SortedMap<BytesRef, SeedAndOrd>> fields;
 
-  private FieldInfos fieldInfos;
+  private final FieldInfos fieldInfos;
 
   List<FieldAndTerm> allTerms;
   private int maxDoc;
@@ -1252,9 +1251,7 @@
           Impacts impacts = impactsEnum.getImpacts();
           INDEX_PACKAGE_ACCESS.checkImpacts(impacts, doc);
           impactsCopy =
-              impacts.getImpacts(0).stream()
-                  .map(i -> new Impact(i.freq, i.norm))
-                  .collect(Collectors.toList());
+              impacts.getImpacts(0).stream().map(i -> new Impact(i.freq, i.norm)).toList();
         }
         freq = impactsEnum.freq();
         long norm = docToNorm.applyAsLong(doc);
@@ -1301,9 +1298,7 @@
           for (int level = 0; level < impacts.numLevels(); ++level) {
             if (impacts.getDocIdUpTo(level) >= max) {
               impactsCopy =
-                  impacts.getImpacts(level).stream()
-                      .map(i -> new Impact(i.freq, i.norm))
-                      .collect(Collectors.toList());
+                  impacts.getImpacts(level).stream().map(i -> new Impact(i.freq, i.norm)).toList();
               break;
             }
           }
@@ -1342,9 +1337,7 @@
           for (int level = 0; level < impacts.numLevels(); ++level) {
             if (impacts.getDocIdUpTo(level) >= max) {
               impactsCopy =
-                  impacts.getImpacts(level).stream()
-                      .map(i -> new Impact(i.freq, i.norm))
-                      .collect(Collectors.toList());
+                  impacts.getImpacts(level).stream().map(i -> new Impact(i.freq, i.norm)).toList();
               break;
             }
           }
@@ -1370,12 +1363,12 @@
 
   private static class TestThread extends Thread {
     private Fields fieldsSource;
-    private EnumSet<Option> options;
-    private IndexOptions maxIndexOptions;
-    private IndexOptions maxTestOptions;
-    private boolean alwaysTestMax;
+    private final EnumSet<Option> options;
+    private final IndexOptions maxIndexOptions;
+    private final IndexOptions maxTestOptions;
+    private final boolean alwaysTestMax;
     private RandomPostingsTester postingsTester;
-    private Random random;
+    private final Random random;
 
     public TestThread(
         Random random,
@@ -1684,11 +1677,7 @@
       }
     }
     assertFalse(iterator.hasNext());
-    LuceneTestCase.expectThrows(
-        NoSuchElementException.class,
-        () -> {
-          iterator.next();
-        });
+    LuceneTestCase.expectThrows(NoSuchElementException.class, iterator::next);
   }
 
   /**
diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/tests/util/LuceneTestCase.java
index b9165be..492bf94 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/tests/util/LuceneTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/tests/util/LuceneTestCase.java
@@ -98,7 +98,6 @@
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.regex.Pattern;
-import java.util.stream.Collectors;
 import junit.framework.AssertionFailedError;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
@@ -139,7 +138,6 @@
 import org.apache.lucene.index.ParallelLeafReader;
 import org.apache.lucene.index.PointValues;
 import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.QueryTimeout;
 import org.apache.lucene.index.SerialMergeScheduler;
 import org.apache.lucene.index.SimpleMergedSegmentWarmer;
 import org.apache.lucene.index.SnapshotDeletionPolicy;
@@ -492,7 +490,7 @@
     boolean defaultValue = false;
     for (String property :
         Arrays.asList(
-            "tests.leaveTemporary" /* ANT tasks's (junit4) flag. */,
+            "tests.leaveTemporary" /* ANT tasks' (junit4) flag. */,
             "tests.leavetemporary" /* lowercase */,
             "tests.leavetmpdir" /* default */)) {
       defaultValue |= systemPropertyAsBoolean(property, false);
@@ -565,7 +563,7 @@
   protected static TestRuleMarkFailure suiteFailureMarker;
 
   /** Temporary files cleanup rule. */
-  private static TestRuleTemporaryFilesCleanup tempFilesCleanupRule;
+  private static final TestRuleTemporaryFilesCleanup tempFilesCleanupRule;
 
   /**
    * Ignore tests after hitting a designated number of initial failures. This is truly a "static"
@@ -667,13 +665,14 @@
   // -----------------------------------------------------------------
 
   /** Enforces {@link #setUp()} and {@link #tearDown()} calls are chained. */
-  private TestRuleSetupTeardownChained parentChainCallRule = new TestRuleSetupTeardownChained();
+  private final TestRuleSetupTeardownChained parentChainCallRule =
+      new TestRuleSetupTeardownChained();
 
   /** Save test thread and name. */
-  private TestRuleThreadAndTestName threadAndTestNameRule = new TestRuleThreadAndTestName();
+  private final TestRuleThreadAndTestName threadAndTestNameRule = new TestRuleThreadAndTestName();
 
   /** Taint suite result with individual test failures. */
-  private TestRuleMarkFailure testFailureMarker = new TestRuleMarkFailure(suiteFailureMarker);
+  private final TestRuleMarkFailure testFailureMarker = new TestRuleMarkFailure(suiteFailureMarker);
 
   /**
    * This controls how individual test rules are nested. It is important that _all_ rules declared
@@ -687,13 +686,13 @@
           .around(new TestRuleSetupAndRestoreInstanceEnv())
           .around(parentChainCallRule);
 
-  private static final Map<String, FieldType> fieldToType = new HashMap<String, FieldType>();
+  private static final Map<String, FieldType> fieldToType = new HashMap<>();
 
   enum LiveIWCFlushMode {
     BY_RAM,
     BY_DOCS,
     EITHER
-  };
+  }
 
   /** Set by TestRuleSetupAndRestoreClassEnv */
   static LiveIWCFlushMode liveIWCFlushMode;
@@ -1270,7 +1269,7 @@
         }
       } else {
         // but just in case of something ridiculous...
-        diff.append(current.toString());
+        diff.append(current);
       }
 
       // its possible to be empty, if we "change" a value to what it had before.
@@ -1394,8 +1393,7 @@
       }
 
       Directory fsdir = newFSDirectoryImpl(clazz, f, lf);
-      BaseDirectoryWrapper wrapped = wrapDirectory(random(), fsdir, bare, true);
-      return wrapped;
+      return wrapDirectory(random(), fsdir, bare, true);
     } catch (Exception e) {
       Rethrow.rethrow(e);
       throw null; // dummy to prevent compiler failure
@@ -1894,7 +1892,7 @@
             threads,
             0L,
             TimeUnit.MILLISECONDS,
-            new LinkedBlockingQueue<Runnable>(),
+            new LinkedBlockingQueue<>(),
             new NamedThreadFactory("LuceneTestCase"));
     // uncomment to intensify LUCENE-3840
     // executor.prestartAllCoreThreads();
@@ -2014,13 +2012,7 @@
       ret.setSimilarity(classEnvRule.similarity);
       ret.setQueryCachingPolicy(MAYBE_CACHE_POLICY);
       if (random().nextBoolean()) {
-        ret.setTimeout(
-            new QueryTimeout() {
-              @Override
-              public boolean shouldExit() {
-                return false;
-              }
-            });
+        ret.setTimeout(() -> false);
       }
       return ret;
     }
@@ -2340,7 +2332,7 @@
     int numPasses = 0;
     while (numPasses < 10 && tests.size() < numTests) {
       leftEnum = leftTerms.iterator();
-      BytesRef term = null;
+      BytesRef term;
       while ((term = leftEnum.next()) != null) {
         int code = random.nextInt(10);
         if (code == 0) {
@@ -2446,17 +2438,11 @@
       // in whatever way it wants (e.g. maybe it packs related fields together or something)
       // To fix this, we sort the fields in both documents by name, but
       // we still assume that all instances with same name are in order:
-      Comparator<IndexableField> comp =
-          new Comparator<IndexableField>() {
-            @Override
-            public int compare(IndexableField arg0, IndexableField arg1) {
-              return arg0.name().compareTo(arg1.name());
-            }
-          };
+      Comparator<IndexableField> comp = Comparator.comparing(IndexableField::name);
       List<IndexableField> leftFields = new ArrayList<>(leftDoc.getFields());
       List<IndexableField> rightFields = new ArrayList<>(rightDoc.getFields());
-      Collections.sort(leftFields, comp);
-      Collections.sort(rightFields, comp);
+      leftFields.sort(comp);
+      rightFields.sort(comp);
 
       Iterator<IndexableField> leftIterator = leftFields.iterator();
       Iterator<IndexableField> rightIterator = rightFields.iterator();
@@ -2724,7 +2710,7 @@
             public void visit(int docID, byte[] packedValue) throws IOException {
               int topDocID = ctx.docBase + docID;
               if (docValues.containsKey(topDocID) == false) {
-                docValues.put(topDocID, new HashSet<BytesRef>());
+                docValues.put(topDocID, new HashSet<>());
               }
               docValues.get(topDocID).add(new BytesRef(packedValue.clone()));
             }
@@ -2878,8 +2864,7 @@
       }
     }
 
-    List<String> exceptionTypes =
-        expectedTypes.stream().map(c -> c.getSimpleName()).collect(Collectors.toList());
+    List<String> exceptionTypes = expectedTypes.stream().map(Class::getSimpleName).toList();
 
     if (thrown != null) {
       AssertionFailedError assertion =
@@ -2947,12 +2932,11 @@
       LinkedHashMap<Class<? extends TO>, List<Class<? extends TW>>> expectedOuterToWrappedTypes,
       ThrowingRunnable runnable) {
     final List<Class<? extends TO>> outerClasses =
-        expectedOuterToWrappedTypes.keySet().stream().collect(Collectors.toList());
+        new ArrayList<>(expectedOuterToWrappedTypes.keySet());
     final Throwable thrown = _expectThrows(outerClasses, runnable);
 
     if (null == thrown) {
-      List<String> outerTypes =
-          outerClasses.stream().map(Class::getSimpleName).collect(Collectors.toList());
+      List<String> outerTypes = outerClasses.stream().map(Class::getSimpleName).toList();
       throw new AssertionFailedError(
           "Expected any of the following outer exception types: "
               + outerTypes
@@ -2973,7 +2957,7 @@
             }
           }
           List<String> wrappedTypes =
-              expectedWrappedTypes.stream().map(Class::getSimpleName).collect(Collectors.toList());
+              expectedWrappedTypes.stream().map(Class::getSimpleName).toList();
           AssertionFailedError assertion =
               new AssertionFailedError(
                   "Unexpected wrapped exception type, expected one of "
@@ -2985,8 +2969,7 @@
         }
       }
     }
-    List<String> outerTypes =
-        outerClasses.stream().map(Class::getSimpleName).collect(Collectors.toList());
+    List<String> outerTypes = outerClasses.stream().map(Class::getSimpleName).toList();
     AssertionFailedError assertion =
         new AssertionFailedError(
             "Unexpected outer exception type, expected one of "
diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/util/RamUsageTester.java b/lucene/test-framework/src/java/org/apache/lucene/tests/util/RamUsageTester.java
index 91d8f0c..7e793f2 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/tests/util/RamUsageTester.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/tests/util/RamUsageTester.java
@@ -83,7 +83,7 @@
    * referenced objects.
    *
    * <p><b>Resource Usage:</b> This method internally uses a set of every object seen during
-   * traversals so it does allocate memory (it isn't side-effect free). After the method exits, this
+   * traversals so it does allocate memory (it isn't side effect free). After the method exits, this
    * memory should be GCed.
    */
   public static long ramUsed(Object obj, Accumulator accumulator) {
@@ -113,7 +113,7 @@
    */
   private static long measureObjectSize(Object root, Accumulator accumulator) {
     // Objects seen so far.
-    final Set<Object> seen = Collections.newSetFromMap(new IdentityHashMap<Object, Boolean>());
+    final Set<Object> seen = Collections.newSetFromMap(new IdentityHashMap<>());
     // Class cache with reference Field and precalculated shallow size.
     final IdentityHashMap<Class<?>, ClassCache> classCache = new IdentityHashMap<>();
     // Stack of objects pending traversal. Recursion caused stack overflows.
@@ -194,9 +194,7 @@
       } else if (isJavaModule.test(obClazz) && ob instanceof Map) {
         final List<Object> values =
             ((Map<?, ?>) ob)
-                .entrySet().stream()
-                    .flatMap(e -> Stream.of(e.getKey(), e.getValue()))
-                    .collect(Collectors.toList());
+                .entrySet().stream().flatMap(e -> Stream.of(e.getKey(), e.getValue())).toList();
         return accumulator.accumulateArray(
                 ob,
                 alignedShallowInstanceSize + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER,
@@ -246,7 +244,7 @@
       values = Collections.emptyList();
     } else {
       values =
-          new AbstractList<Object>() {
+          new AbstractList<>() {
 
             @Override
             public Object get(int index) {
@@ -267,10 +265,9 @@
    * their public properties. This is needed for Java 9, which does not allow to look into runtime
    * class fields.
    */
-  @SuppressWarnings("serial")
   private static final Map<Class<?>, ToLongFunction<Object>> SIMPLE_TYPES =
       Collections.unmodifiableMap(
-          new IdentityHashMap<Class<?>, ToLongFunction<Object>>() {
+          new IdentityHashMap<>() {
             {
               init();
             }
@@ -363,7 +360,7 @@
                   cachedInfo =
                       new ClassCache(
                           RamUsageEstimator.alignObjectSize(shallowInstanceSize),
-                          referenceFields.toArray(new Field[referenceFields.size()]));
+                          referenceFields.toArray(new Field[0]));
                   return cachedInfo;
                 });
     return classCache;