Merge branch 'master' into jira/solr15094
merging with master
diff --git a/.github/workflows/docker-test.yml b/.github/workflows/docker-test.yml
index 76fa714..d331df9 100644
--- a/.github/workflows/docker-test.yml
+++ b/.github/workflows/docker-test.yml
@@ -38,6 +38,8 @@
         restore-keys: |
           ${{ runner.os }}-gradle-docker-
           ${{ runner.os }}-gradle-
+    - name: Initialize gradle settings
+      run: ./gradlew localSettings
     - name: Build Docker image with Gradle
       run: ./gradlew solr:docker:docker
     - name: Run tests on Docker image
diff --git a/build.gradle b/build.gradle
index 20e232d..37e52cd 100644
--- a/build.gradle
+++ b/build.gradle
@@ -25,7 +25,6 @@
   id 'de.thetaphi.forbiddenapis' version '3.1' apply false
   id "de.undercouch.download" version "4.0.2" apply false
   id "net.ltgt.errorprone" version "1.2.1" apply false
-  id "com.palantir.docker" version "0.25.0" apply false
   id 'com.diffplug.spotless' version "5.8.2" apply false
 }
 
diff --git a/gradle/help.gradle b/gradle/help.gradle
index 161f072..5fcec75 100644
--- a/gradle/help.gradle
+++ b/gradle/help.gradle
@@ -30,7 +30,7 @@
       ["Git", "help/git.txt", "Git assistance and guides."],
       ["ValidateLogCalls", "help/validateLogCalls.txt", "How to use logging calls efficiently."],
       ["IDEs", "help/IDEs.txt", "IDE support."],
-      ["Docker", "help/docker.txt", "Building Solr Docker images."],
+      ["Docker", "solr/docker/gradle-help.txt", "Building Solr Docker images."],
   ]
 
   helpFiles.each { section, path, sectionInfo ->
diff --git a/gradle/maven/defaults-maven.gradle b/gradle/maven/defaults-maven.gradle
index 443d690..2662a69 100644
--- a/gradle/maven/defaults-maven.gradle
+++ b/gradle/maven/defaults-maven.gradle
@@ -66,7 +66,6 @@
         ":solr:contrib:langid",
         ":solr:contrib:jaegertracer-configurator",
         ":solr:contrib:prometheus-exporter",
-        ":solr:contrib:scripting",
         ":solr:test-framework",
     ]
 
diff --git a/help/docker.txt b/help/docker.txt
deleted file mode 100644
index c86572c..0000000
--- a/help/docker.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-Docker Images for Solr
-======================
-
-Solr docker images are built using Palantir's Docker Gradle plugin, https://github.com/palantir/gradle-docker.
-
-Common Inputs
--------------
-
-The docker image and its tag can be customized via the following options, all accepted via both Environment Variables and Gradle Properties.
-
-Docker Image Repository:
-   Default: "apache/solr"
-   EnvVar: SOLR_DOCKER_IMAGE_REPO
-   Gradle Property: -Psolr.docker.imageRepo
-
-Docker Image Tag:
-   Default: the Solr version, e.g. "9.0.0-SNAPSHOT"
-   EnvVar: SOLR_DOCKER_IMAGE_TAG
-   Gradle Property: -Psolr.docker.imageTag
-
-Docker Image Name: (Use this to explicitly set a whole image name. If given, the image repo and image version options above are ignored.)
-   Default: {image_repo}/{image_tag} (both options provided above, with defaults)
-   EnvVar: SOLR_DOCKER_IMAGE_NAME
-   Gradle Property: -Psolr.docker.imageName
-
-Building
---------
-
-In order to build the Solr Docker image, run:
-
-gradlew docker
-
-The docker build task accepts the following inputs, in addition to the common inputs listed above:
-
-Base Docker Image: (The docker image used for the "FROM" in the Solr Dockerfile)
-   Default: "openjdk:11-jre-slim"
-   EnvVar: SOLR_DOCKER_BASE_IMAGE
-   Gradle Property: -Psolr.docker.baseImage
-
-Github URL or Mirror: (The URL of github or a mirror of github releases. This is of use when building the docker image behind a firewall that does not have access to external Github.)
-   Default: "github.com"
-   EnvVar: SOLR_DOCKER_GITHUB_URL
-   Gradle Property: -Psolr.docker.githubUrl
-
-Testing
--------
-
-To test the docker image, run:
-
-gradlew dockerTest
-
-If a custom docker image name was used, via one of the common inputs described above, then the same input must be used while testing.
-
-You can also specify an explicit list of tests to run, or an explicit list of tests to ignore.
-Both inputs are optional, and by default all tests will be run.
-
-gradlew testDocker --tests create_core,demo
-gradlew testDocker --ignore demo-tini,initdb
\ No newline at end of file
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index fce70e9..f99553b 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -86,8 +86,8 @@
 
 Improvements
 
-* LUCENE-9665 LUCENE-9676 LUCENE-9667 : Hunspell improvements: add SpellChecker API, support default encoding and
-  BREAK/FORBIDDENWORD affix rules, improve stemming of all-caps words (Peter Gromov)
+* LUCENE-9687: Hunspell support improvements: add SpellChecker API, support default encoding and
+  BREAK/FORBIDDENWORD/COMPOUNDRULE affix rules, improve stemming of all-caps words (Peter Gromov)
 
 * LUCENE-9633: Improve match highlighter behavior for degenerate intervals (on non-existing positions).
   (Dawid Weiss)
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CompoundRule.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CompoundRule.java
new file mode 100644
index 0000000..0f89de8
--- /dev/null
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/CompoundRule.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.util.List;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IntsRef;
+
+class CompoundRule {
+  private final char[] data;
+  private final Dictionary dictionary;
+
+  CompoundRule(String rule, Dictionary dictionary) {
+    this.dictionary = dictionary;
+    StringBuilder parsedFlags = new StringBuilder();
+    int pos = 0;
+    while (pos < rule.length()) {
+      int lParen = rule.indexOf("(", pos);
+      if (lParen < 0) {
+        parsedFlags.append(dictionary.flagParsingStrategy.parseFlags(rule.substring(pos)));
+        break;
+      }
+
+      parsedFlags.append(dictionary.flagParsingStrategy.parseFlags(rule.substring(pos, lParen)));
+      int rParen = rule.indexOf(')', lParen + 1);
+      if (rParen < 0) {
+        throw new IllegalArgumentException("Unmatched parentheses: " + rule);
+      }
+
+      parsedFlags.append(
+          dictionary.flagParsingStrategy.parseFlags(rule.substring(lParen + 1, rParen)));
+      pos = rParen + 1;
+      if (pos < rule.length() && (rule.charAt(pos) == '?' || rule.charAt(pos) == '*')) {
+        parsedFlags.append(rule.charAt(pos++));
+      }
+    }
+    data = parsedFlags.toString().toCharArray();
+  }
+
+  boolean mayMatch(List<IntsRef> words, BytesRef scratch) {
+    return match(words, 0, 0, scratch, false);
+  }
+
+  boolean fullyMatches(List<IntsRef> words, BytesRef scratch) {
+    return match(words, 0, 0, scratch, true);
+  }
+
+  private boolean match(
+      List<IntsRef> words, int patternIndex, int wordIndex, BytesRef scratch, boolean fully) {
+    if (patternIndex >= data.length) {
+      return wordIndex >= words.size();
+    }
+    if (wordIndex >= words.size() && !fully) {
+      return true;
+    }
+
+    char flag = data[patternIndex];
+    if (patternIndex < data.length - 1 && data[patternIndex + 1] == '*') {
+      int startWI = wordIndex;
+      while (wordIndex < words.size() && dictionary.hasFlag(words.get(wordIndex), flag, scratch)) {
+        wordIndex++;
+      }
+
+      while (wordIndex >= startWI) {
+        if (match(words, patternIndex + 2, wordIndex, scratch, fully)) {
+          return true;
+        }
+
+        wordIndex--;
+      }
+      return false;
+    }
+
+    boolean currentWordMatches =
+        wordIndex < words.size() && dictionary.hasFlag(words.get(wordIndex), flag, scratch);
+
+    if (patternIndex < data.length - 1 && data[patternIndex + 1] == '?') {
+      if (currentWordMatches && match(words, patternIndex + 2, wordIndex + 1, scratch, fully)) {
+        return true;
+      }
+      return match(words, patternIndex + 2, wordIndex, scratch, fully);
+    }
+
+    return currentWordMatches && match(words, patternIndex + 1, wordIndex + 1, scratch, fully);
+  }
+
+  @Override
+  public String toString() {
+    return new String(data);
+  }
+}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java
index 19cfaa3..5b674e2 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java
@@ -76,31 +76,10 @@
 
   static final char[] NOFLAGS = new char[0];
 
+  static final int FLAG_UNSET = 0;
+  private static final int DEFAULT_FLAGS = 65510;
   private static final char HIDDEN_FLAG = (char) 65511; // called 'ONLYUPCASEFLAG' in Hunspell
 
-  private static final String ALIAS_KEY = "AF";
-  private static final String MORPH_ALIAS_KEY = "AM";
-  private static final String PREFIX_KEY = "PFX";
-  private static final String SUFFIX_KEY = "SFX";
-  private static final String FLAG_KEY = "FLAG";
-  private static final String COMPLEXPREFIXES_KEY = "COMPLEXPREFIXES";
-  private static final String CIRCUMFIX_KEY = "CIRCUMFIX";
-  private static final String IGNORE_KEY = "IGNORE";
-  private static final String ICONV_KEY = "ICONV";
-  private static final String OCONV_KEY = "OCONV";
-  private static final String FULLSTRIP_KEY = "FULLSTRIP";
-  private static final String LANG_KEY = "LANG";
-  private static final String BREAK_KEY = "BREAK";
-  private static final String FORBIDDENWORD_KEY = "FORBIDDENWORD";
-  private static final String KEEPCASE_KEY = "KEEPCASE";
-  private static final String NEEDAFFIX_KEY = "NEEDAFFIX";
-  private static final String PSEUDOROOT_KEY = "PSEUDOROOT";
-  private static final String ONLYINCOMPOUND_KEY = "ONLYINCOMPOUND";
-
-  private static final String NUM_FLAG_TYPE = "num";
-  private static final String UTF8_FLAG_TYPE = "UTF-8";
-  private static final String LONG_FLAG_TYPE = "long";
-
   // TODO: really for suffixes we should reverse the automaton and run them backwards
   private static final String PREFIX_CONDITION_REGEX_PATTERN = "%s.*";
   private static final String SUFFIX_CONDITION_REGEX_PATTERN = ".*%s";
@@ -136,7 +115,7 @@
   static final int AFFIX_APPEND = 3;
 
   // Default flag parsing strategy
-  private FlagParsingStrategy flagParsingStrategy = new SimpleFlagParsingStrategy();
+  FlagParsingStrategy flagParsingStrategy = new SimpleFlagParsingStrategy();
 
   // AF entries
   private String[] aliases;
@@ -158,11 +137,13 @@
   // if no affixes have continuation classes, no need to do 2-level affix stripping
   boolean twoStageAffix;
 
-  int circumfix = -1; // circumfix flag, or -1 if one is not defined
-  int keepcase = -1; // keepcase flag, or -1 if one is not defined
-  int needaffix = -1; // needaffix flag, or -1 if one is not defined
-  int forbiddenword = -1; // forbiddenword flag, or -1 if one is not defined
-  int onlyincompound = -1; // onlyincompound flag, or -1 if one is not defined
+  char circumfix;
+  char keepcase;
+  char needaffix;
+  char forbiddenword;
+  char onlyincompound;
+  int compoundMin = 3;
+  List<CompoundRule> compoundRules; // nullable
 
   // ignored characters (dictionary, affix, inputs)
   private char[] ignore;
@@ -342,83 +323,62 @@
       if (reader.getLineNumber() == 1 && line.startsWith("\uFEFF")) {
         line = line.substring(1);
       }
-      if (line.startsWith(ALIAS_KEY)) {
+      line = line.trim();
+      if (line.isEmpty()) continue;
+
+      String firstWord = line.split("\\s")[0];
+      if ("AF".equals(firstWord)) {
         parseAlias(line);
-      } else if (line.startsWith(MORPH_ALIAS_KEY)) {
+      } else if ("AM".equals(firstWord)) {
         parseMorphAlias(line);
-      } else if (line.startsWith(PREFIX_KEY)) {
+      } else if ("PFX".equals(firstWord)) {
         parseAffix(
             prefixes, line, reader, PREFIX_CONDITION_REGEX_PATTERN, seenPatterns, seenStrips);
-      } else if (line.startsWith(SUFFIX_KEY)) {
+      } else if ("SFX".equals(firstWord)) {
         parseAffix(
             suffixes, line, reader, SUFFIX_CONDITION_REGEX_PATTERN, seenPatterns, seenStrips);
-      } else if (line.startsWith(FLAG_KEY)) {
+      } else if ("FLAG".equals(firstWord)) {
         // Assume that the FLAG line comes before any prefix or suffixes
         // Store the strategy so it can be used when parsing the dic file
         flagParsingStrategy = getFlagParsingStrategy(line);
-      } else if (line.equals(COMPLEXPREFIXES_KEY)) {
+      } else if (line.equals("COMPLEXPREFIXES")) {
         complexPrefixes =
             true; // 2-stage prefix+1-stage suffix instead of 2-stage suffix+1-stage prefix
-      } else if (line.startsWith(CIRCUMFIX_KEY)) {
-        String[] parts = line.split("\\s+");
-        if (parts.length != 2) {
-          throw new ParseException("Illegal CIRCUMFIX declaration", reader.getLineNumber());
-        }
-        circumfix = flagParsingStrategy.parseFlag(parts[1]);
-      } else if (line.startsWith(KEEPCASE_KEY)) {
-        String[] parts = line.split("\\s+");
-        if (parts.length != 2) {
-          throw new ParseException("Illegal KEEPCASE declaration", reader.getLineNumber());
-        }
-        keepcase = flagParsingStrategy.parseFlag(parts[1]);
-      } else if (line.startsWith(NEEDAFFIX_KEY) || line.startsWith(PSEUDOROOT_KEY)) {
-        String[] parts = line.split("\\s+");
-        if (parts.length != 2) {
-          throw new ParseException("Illegal NEEDAFFIX declaration", reader.getLineNumber());
-        }
-        needaffix = flagParsingStrategy.parseFlag(parts[1]);
-      } else if (line.startsWith(ONLYINCOMPOUND_KEY)) {
-        String[] parts = line.split("\\s+");
-        if (parts.length != 2) {
-          throw new ParseException("Illegal ONLYINCOMPOUND declaration", reader.getLineNumber());
-        }
-        onlyincompound = flagParsingStrategy.parseFlag(parts[1]);
-      } else if (line.startsWith(IGNORE_KEY)) {
-        String[] parts = line.split("\\s+");
-        if (parts.length != 2) {
-          throw new ParseException("Illegal IGNORE declaration", reader.getLineNumber());
-        }
-        ignore = parts[1].toCharArray();
+      } else if ("CIRCUMFIX".equals(firstWord)) {
+        circumfix = flagParsingStrategy.parseFlag(singleArgument(reader, line));
+      } else if ("KEEPCASE".equals(firstWord)) {
+        keepcase = flagParsingStrategy.parseFlag(singleArgument(reader, line));
+      } else if ("NEEDAFFIX".equals(firstWord) || "PSEUDOROOT".equals(firstWord)) {
+        needaffix = flagParsingStrategy.parseFlag(singleArgument(reader, line));
+      } else if ("ONLYINCOMPOUND".equals(firstWord)) {
+        onlyincompound = flagParsingStrategy.parseFlag(singleArgument(reader, line));
+      } else if ("IGNORE".equals(firstWord)) {
+        ignore = singleArgument(reader, line).toCharArray();
         Arrays.sort(ignore);
         needsInputCleaning = true;
-      } else if (line.startsWith(ICONV_KEY) || line.startsWith(OCONV_KEY)) {
-        String[] parts = line.split("\\s+");
-        String type = parts[0];
-        if (parts.length != 2) {
-          throw new ParseException("Illegal " + type + " declaration", reader.getLineNumber());
-        }
-        int num = Integer.parseInt(parts[1]);
+      } else if ("ICONV".equals(firstWord) || "OCONV".equals(firstWord)) {
+        int num = Integer.parseInt(singleArgument(reader, line));
         FST<CharsRef> res = parseConversions(reader, num);
-        if (type.equals("ICONV")) {
+        if (line.startsWith("I")) {
           iconv = res;
           needsInputCleaning |= iconv != null;
         } else {
           oconv = res;
           needsOutputCleaning |= oconv != null;
         }
-      } else if (line.startsWith(FULLSTRIP_KEY)) {
+      } else if ("FULLSTRIP".equals(firstWord)) {
         fullStrip = true;
-      } else if (line.startsWith(LANG_KEY)) {
-        language = line.substring(LANG_KEY.length()).trim();
+      } else if ("LANG".equals(firstWord)) {
+        language = singleArgument(reader, line);
         alternateCasing = "tr_TR".equals(language) || "az_AZ".equals(language);
-      } else if (line.startsWith(BREAK_KEY)) {
+      } else if ("BREAK".equals(firstWord)) {
         breaks = parseBreaks(reader, line);
-      } else if (line.startsWith(FORBIDDENWORD_KEY)) {
-        String[] parts = line.split("\\s+");
-        if (parts.length != 2) {
-          throw new ParseException("Illegal FORBIDDENWORD declaration", reader.getLineNumber());
-        }
-        forbiddenword = flagParsingStrategy.parseFlag(parts[1]);
+      } else if ("FORBIDDENWORD".equals(firstWord)) {
+        forbiddenword = flagParsingStrategy.parseFlag(singleArgument(reader, line));
+      } else if ("COMPOUNDMIN".equals(firstWord)) {
+        compoundMin = Math.max(1, Integer.parseInt(singleArgument(reader, line)));
+      } else if ("COMPOUNDRULE".equals(firstWord)) {
+        compoundRules = parseCompoundRules(reader, Integer.parseInt(singleArgument(reader, line)));
       }
     }
 
@@ -442,19 +402,37 @@
     stripOffsets[currentIndex] = currentOffset;
   }
 
+  private String singleArgument(LineNumberReader reader, String line) throws ParseException {
+    return splitBySpace(reader, line, 2)[1];
+  }
+
+  private String[] splitBySpace(LineNumberReader reader, String line, int expectedParts)
+      throws ParseException {
+    String[] parts = line.split("\\s+");
+    if (parts.length < expectedParts
+        || parts.length > expectedParts && !parts[expectedParts].startsWith("#")) {
+      throw new ParseException("Invalid syntax", reader.getLineNumber());
+    }
+    return parts;
+  }
+
+  private List<CompoundRule> parseCompoundRules(LineNumberReader reader, int num)
+      throws IOException, ParseException {
+    List<CompoundRule> compoundRules = new ArrayList<>();
+    for (int i = 0; i < num; i++) {
+      compoundRules.add(new CompoundRule(singleArgument(reader, reader.readLine()), this));
+    }
+    return compoundRules;
+  }
+
   private Breaks parseBreaks(LineNumberReader reader, String line)
       throws IOException, ParseException {
     Set<String> starting = new LinkedHashSet<>();
     Set<String> ending = new LinkedHashSet<>();
     Set<String> middle = new LinkedHashSet<>();
-    int num = Integer.parseInt(line.substring(BREAK_KEY.length()).trim());
+    int num = Integer.parseInt(singleArgument(reader, line));
     for (int i = 0; i < num; i++) {
-      line = reader.readLine();
-      String[] parts = line.split("\\s+");
-      if (!line.startsWith(BREAK_KEY) || parts.length != 2) {
-        throw new ParseException("BREAK chars expected", reader.getLineNumber());
-      }
-      String breakStr = parts[1];
+      String breakStr = singleArgument(reader, reader.readLine());
       if (breakStr.startsWith("^")) {
         starting.add(breakStr.substring(1));
       } else if (breakStr.endsWith("$")) {
@@ -658,11 +636,7 @@
     Map<String, String> mappings = new TreeMap<>();
 
     for (int i = 0; i < num; i++) {
-      String line = reader.readLine();
-      String[] parts = line.split("\\s+");
-      if (parts.length != 3) {
-        throw new ParseException("invalid syntax: " + line, reader.getLineNumber());
-      }
+      String[] parts = splitBySpace(reader, reader.readLine(), 3);
       if (mappings.put(parts[1], parts[2]) != null) {
         throw new IllegalStateException("duplicate mapping specified for: " + parts[1]);
       }
@@ -758,11 +732,11 @@
     }
     String flagType = parts[1];
 
-    if (NUM_FLAG_TYPE.equals(flagType)) {
+    if ("num".equals(flagType)) {
       return new NumFlagParsingStrategy();
-    } else if (UTF8_FLAG_TYPE.equals(flagType)) {
+    } else if ("UTF-8".equals(flagType)) {
       return new SimpleFlagParsingStrategy();
-    } else if (LONG_FLAG_TYPE.equals(flagType)) {
+    } else if ("long".equals(flagType)) {
       return new DoubleASCIIFlagParsingStrategy();
     }
 
@@ -910,7 +884,7 @@
       reuse.append(caseFold(word.charAt(i)));
     }
     reuse.append(FLAG_SEPARATOR);
-    reuse.append(HIDDEN_FLAG);
+    flagParsingStrategy.appendFlag(HIDDEN_FLAG, reuse);
     reuse.append(afterSep, afterSep.charAt(0) == FLAG_SEPARATOR ? 1 : 0, afterSep.length());
     writer.write(reuse.toString().getBytes(StandardCharsets.UTF_8));
   }
@@ -1188,16 +1162,19 @@
     return null;
   }
 
-  boolean isForbiddenWord(char[] word, BytesRef scratch) {
-    if (forbiddenword != -1) {
-      IntsRef forms = lookupWord(word, 0, word.length);
-      if (forms != null) {
-        int formStep = formStep();
-        for (int i = 0; i < forms.length; i += formStep) {
-          if (hasFlag(forms.ints[forms.offset + i], (char) forbiddenword, scratch)) {
-            return true;
-          }
-        }
+  boolean isForbiddenWord(char[] word, int length, BytesRef scratch) {
+    if (forbiddenword != FLAG_UNSET) {
+      IntsRef forms = lookupWord(word, 0, length);
+      return forms != null && hasFlag(forms, forbiddenword, scratch);
+    }
+    return false;
+  }
+
+  boolean hasFlag(IntsRef forms, char flag, BytesRef scratch) {
+    int formStep = formStep();
+    for (int i = 0; i < forms.length; i += formStep) {
+      if (hasFlag(forms.ints[forms.offset + i], flag, scratch)) {
+        return true;
       }
     }
     return false;
@@ -1227,6 +1204,8 @@
      * @return Parsed flags
      */
     abstract char[] parseFlags(String rawFlags);
+
+    abstract void appendFlag(char flag, StringBuilder to);
   }
 
   /**
@@ -1238,6 +1217,11 @@
     public char[] parseFlags(String rawFlags) {
       return rawFlags.toCharArray();
     }
+
+    @Override
+    void appendFlag(char flag, StringBuilder to) {
+      to.append(flag);
+    }
   }
 
   /**
@@ -1258,7 +1242,12 @@
         if (replacement.isEmpty()) {
           continue;
         }
-        flags[upto++] = (char) Integer.parseInt(replacement);
+        int flag = Integer.parseInt(replacement);
+        if (flag == FLAG_UNSET || flag >= Character.MAX_VALUE) { // read default flags as well
+          throw new IllegalArgumentException(
+              "Num flags should be between 0 and " + DEFAULT_FLAGS + ", found " + flag);
+        }
+        flags[upto++] = (char) flag;
       }
 
       if (upto < flags.length) {
@@ -1266,6 +1255,12 @@
       }
       return flags;
     }
+
+    @Override
+    void appendFlag(char flag, StringBuilder to) {
+      to.append((int) flag);
+      to.append(",");
+    }
   }
 
   /**
@@ -1300,14 +1295,24 @@
       builder.getChars(0, builder.length(), flags, 0);
       return flags;
     }
+
+    @Override
+    void appendFlag(char flag, StringBuilder to) {
+      to.append((char) (flag >> 8));
+      to.append((char) (flag & 0xff));
+    }
+  }
+
+  boolean hasCompounding() {
+    return compoundRules != null;
   }
 
   boolean hasFlag(int entryId, char flag, BytesRef scratch) {
-    return hasFlag(decodeFlags(entryId, scratch), flag);
+    return flag != FLAG_UNSET && hasFlag(decodeFlags(entryId, scratch), flag);
   }
 
   static boolean hasFlag(char[] flags, char flag) {
-    return Arrays.binarySearch(flags, flag) >= 0;
+    return flag != FLAG_UNSET && Arrays.binarySearch(flags, flag) >= 0;
   }
 
   CharSequence cleanInput(CharSequence input, StringBuilder reuse) {
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/SpellChecker.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/SpellChecker.java
index a3e765b..b9f29a3 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/SpellChecker.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/SpellChecker.java
@@ -16,7 +16,10 @@
  */
 package org.apache.lucene.analysis.hunspell;
 
+import java.util.ArrayList;
+import java.util.List;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IntsRef;
 
 /**
  * A spell checker based on Hunspell dictionaries. The objects of this class are not thread-safe
@@ -37,26 +40,104 @@
   public boolean spell(String word) {
     if (word.isEmpty()) return true;
 
-    char[] wordChars = word.toCharArray();
-    if (dictionary.isForbiddenWord(wordChars, scratch)) {
-      return false;
+    if (dictionary.needsInputCleaning) {
+      word = dictionary.cleanInput(word, new StringBuilder()).toString();
     }
 
     if (isNumber(word)) {
       return true;
     }
 
-    if (!stemmer.stem(wordChars, word.length()).isEmpty()) {
+    char[] wordChars = word.toCharArray();
+    if (checkWord(wordChars, wordChars.length, false)) {
       return true;
     }
 
-    if (dictionary.breaks.isNotEmpty() && !hasTooManyBreakOccurrences(word)) {
+    WordCase wc = stemmer.caseOf(wordChars, wordChars.length);
+    if ((wc == WordCase.UPPER || wc == WordCase.TITLE) && checkCaseVariants(wordChars, wc)) {
+      return true;
+    }
+
+    if (dictionary.breaks.isNotEmpty()
+        && !hasTooManyBreakOccurrences(word)
+        && !dictionary.isForbiddenWord(wordChars, word.length(), scratch)) {
       return tryBreaks(word);
     }
 
     return false;
   }
 
+  private boolean checkCaseVariants(char[] wordChars, WordCase wordCase) {
+    char[] caseVariant = wordChars;
+    if (wordCase == WordCase.UPPER) {
+      caseVariant = stemmer.caseFoldTitle(caseVariant, wordChars.length);
+      if (checkWord(caseVariant, wordChars.length, true)) {
+        return true;
+      }
+      char[] aposCase = Stemmer.capitalizeAfterApostrophe(caseVariant, wordChars.length);
+      if (aposCase != null && checkWord(aposCase, aposCase.length, true)) {
+        return true;
+      }
+    }
+    return checkWord(stemmer.caseFoldLower(caseVariant, wordChars.length), wordChars.length, true);
+  }
+
+  private boolean checkWord(char[] wordChars, int length, boolean caseVariant) {
+    if (dictionary.isForbiddenWord(wordChars, length, scratch)) {
+      return false;
+    }
+
+    if (!stemmer.doStem(wordChars, length, caseVariant).isEmpty()) {
+      return true;
+    }
+
+    if (dictionary.hasCompounding()) {
+      return checkCompounds(wordChars, 0, length, new ArrayList<>());
+    }
+
+    return false;
+  }
+
+  private boolean checkCompounds(char[] wordChars, int offset, int length, List<IntsRef> words) {
+    if (words.size() >= 100) return false;
+
+    int limit = length - dictionary.compoundMin + 1;
+    for (int breakPos = dictionary.compoundMin; breakPos < limit; breakPos++) {
+      IntsRef forms = dictionary.lookupWord(wordChars, offset, breakPos);
+      if (forms != null) {
+        words.add(forms);
+
+        if (dictionary.compoundRules != null
+            && dictionary.compoundRules.stream().anyMatch(r -> r.mayMatch(words, scratch))) {
+          if (checkLastCompoundPart(wordChars, offset + breakPos, length - breakPos, words)) {
+            return true;
+          }
+
+          if (checkCompounds(wordChars, offset + breakPos, length - breakPos, words)) {
+            return true;
+          }
+        }
+
+        words.remove(words.size() - 1);
+      }
+    }
+
+    return false;
+  }
+
+  private boolean checkLastCompoundPart(
+      char[] wordChars, int start, int length, List<IntsRef> words) {
+    IntsRef forms = dictionary.lookupWord(wordChars, start, length);
+    if (forms == null) return false;
+
+    words.add(forms);
+    boolean result =
+        dictionary.compoundRules != null
+            && dictionary.compoundRules.stream().anyMatch(r -> r.fullyMatches(words, scratch));
+    words.remove(words.size() - 1);
+    return result;
+  }
+
   private static boolean isNumber(String s) {
     int i = 0;
     while (i < s.length()) {
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java
index 1355627..4a337fb 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java
@@ -26,7 +26,6 @@
 import org.apache.lucene.util.IntsRef;
 import org.apache.lucene.util.automaton.CharacterRunAutomaton;
 import org.apache.lucene.util.fst.FST;
-import org.apache.lucene.util.fst.Outputs;
 
 /**
  * Stemmer uses the affix rules declared in the Dictionary to generate one or more stems for a word.
@@ -53,14 +52,14 @@
    */
   public Stemmer(Dictionary dictionary) {
     this.dictionary = dictionary;
+    prefixReader = dictionary.prefixes == null ? null : dictionary.prefixes.getBytesReader();
+    suffixReader = dictionary.suffixes == null ? null : dictionary.suffixes.getBytesReader();
     for (int level = 0; level < 3; level++) {
       if (dictionary.prefixes != null) {
         prefixArcs[level] = new FST.Arc<>();
-        prefixReaders[level] = dictionary.prefixes.getBytesReader();
       }
       if (dictionary.suffixes != null) {
         suffixArcs[level] = new FST.Arc<>();
-        suffixReaders[level] = dictionary.suffixes.getBytesReader();
       }
     }
     formStep = dictionary.formStep();
@@ -98,6 +97,10 @@
     List<CharsRef> list = doStem(word, length, false);
     if (wordCase == WordCase.UPPER) {
       caseFoldTitle(word, length);
+      char[] aposCase = capitalizeAfterApostrophe(titleBuffer, length);
+      if (aposCase != null) {
+        list.addAll(doStem(aposCase, length, true));
+      }
       list.addAll(doStem(titleBuffer, length, true));
     }
     if (wordCase == WordCase.UPPER || wordCase == WordCase.TITLE) {
@@ -112,8 +115,8 @@
   private char[] titleBuffer = new char[8];
 
   /** returns EXACT_CASE,TITLE_CASE, or UPPER_CASE type for the word */
-  private WordCase caseOf(char[] word, int length) {
-    if (dictionary.ignoreCase || length == 0 || !Character.isUpperCase(word[0])) {
+  WordCase caseOf(char[] word, int length) {
+    if (dictionary.ignoreCase || length == 0 || Character.isLowerCase(word[0])) {
       return WordCase.MIXED;
     }
 
@@ -121,22 +124,41 @@
   }
 
   /** folds titlecase variant of word to titleBuffer */
-  private void caseFoldTitle(char[] word, int length) {
+  char[] caseFoldTitle(char[] word, int length) {
     titleBuffer = ArrayUtil.grow(titleBuffer, length);
     System.arraycopy(word, 0, titleBuffer, 0, length);
     for (int i = 1; i < length; i++) {
       titleBuffer[i] = dictionary.caseFold(titleBuffer[i]);
     }
+    return titleBuffer;
   }
 
   /** folds lowercase variant of word (title cased) to lowerBuffer */
-  private void caseFoldLower(char[] word, int length) {
+  char[] caseFoldLower(char[] word, int length) {
     lowerBuffer = ArrayUtil.grow(lowerBuffer, length);
     System.arraycopy(word, 0, lowerBuffer, 0, length);
     lowerBuffer[0] = dictionary.caseFold(lowerBuffer[0]);
+    return lowerBuffer;
   }
 
-  private List<CharsRef> doStem(char[] word, int length, boolean caseVariant) {
+  // Special prefix handling for Catalan, French, Italian:
+  // prefixes separated by apostrophe (SANT'ELIA -> Sant'+Elia).
+  static char[] capitalizeAfterApostrophe(char[] word, int length) {
+    for (int i = 1; i < length - 1; i++) {
+      if (word[i] == '\'') {
+        char next = word[i + 1];
+        char upper = Character.toUpperCase(next);
+        if (upper != next) {
+          char[] copy = ArrayUtil.copyOfSubArray(word, 0, length);
+          copy[i + 1] = Character.toUpperCase(upper);
+          return copy;
+        }
+      }
+    }
+    return null;
+  }
+
+  List<CharsRef> doStem(char[] word, int length, boolean caseVariant) {
     List<CharsRef> stems = new ArrayList<>();
     IntsRef forms = dictionary.lookupWord(word, 0, length);
     if (forms != null) {
@@ -146,20 +168,19 @@
           continue;
         }
         // we can't add this form, it's a pseudostem requiring an affix
-        if (dictionary.needaffix != -1
-            && Dictionary.hasFlag(wordFlags, (char) dictionary.needaffix)) {
+        if (Dictionary.hasFlag(wordFlags, dictionary.needaffix)) {
           continue;
         }
         // we can't add this form, it only belongs inside a compound word
-        if (dictionary.onlyincompound != -1
-            && Dictionary.hasFlag(wordFlags, (char) dictionary.onlyincompound)) {
+        if (Dictionary.hasFlag(wordFlags, dictionary.onlyincompound)) {
           continue;
         }
-        stems.add(newStem(word, length, forms, i));
+        stems.add(newStem(word, 0, length, forms, i));
       }
     }
     try {
-      stems.addAll(stem(word, length, -1, -1, -1, 0, true, true, false, false, caseVariant));
+      stems.addAll(
+          stem(word, 0, length, -1, (char) 0, -1, 0, true, true, false, false, caseVariant));
     } catch (IOException bogus) {
       throw new RuntimeException(bogus);
     }
@@ -168,7 +189,7 @@
 
   private boolean acceptCase(boolean caseVariant, char[] wordFlags) {
     return caseVariant
-        ? dictionary.keepcase == -1 || !Dictionary.hasFlag(wordFlags, (char) dictionary.keepcase)
+        ? !Dictionary.hasFlag(wordFlags, dictionary.keepcase)
         : !Dictionary.hasHiddenFlag(wordFlags);
   }
 
@@ -194,7 +215,7 @@
     return deduped;
   }
 
-  private CharsRef newStem(char[] buffer, int length, IntsRef forms, int formID) {
+  private CharsRef newStem(char[] buffer, int offset, int length, IntsRef forms, int formID) {
     final String exception;
     if (dictionary.hasStemExceptions) {
       int exceptionID = forms.ints[forms.offset + formID + 1];
@@ -212,7 +233,7 @@
       if (exception != null) {
         scratchSegment.append(exception);
       } else {
-        scratchSegment.append(buffer, 0, length);
+        scratchSegment.append(buffer, offset, length);
       }
       try {
         Dictionary.applyMappings(dictionary.oconv, scratchSegment);
@@ -226,19 +247,18 @@
       if (exception != null) {
         return new CharsRef(exception);
       } else {
-        return new CharsRef(buffer, 0, length);
+        return new CharsRef(buffer, offset, length);
       }
     }
   }
 
   // some state for traversing FSTs
-  private final FST.BytesReader[] prefixReaders = new FST.BytesReader[3];
+  private final FST.BytesReader prefixReader;
+  private final FST.BytesReader suffixReader;
 
   @SuppressWarnings({"unchecked", "rawtypes"})
   private final FST.Arc<IntsRef>[] prefixArcs = new FST.Arc[3];
 
-  private final FST.BytesReader[] suffixReaders = new FST.BytesReader[3];
-
   @SuppressWarnings({"unchecked", "rawtypes"})
   private final FST.Arc<IntsRef>[] suffixArcs = new FST.Arc[3];
 
@@ -249,8 +269,8 @@
    * @param previous previous affix that was removed (so we dont remove same one twice)
    * @param prevFlag Flag from a previous stemming step that need to be cross-checked with any
    *     affixes in this recursive step
-   * @param prefixFlag flag of the most inner removed prefix, so that when removing a suffix, it's
-   *     also checked against the word
+   * @param prefixId ID of the most inner removed prefix, so that when removing a suffix, it's also
+   *     checked against the word
    * @param recursionDepth current recursiondepth
    * @param doPrefix true if we should remove prefixes
    * @param doSuffix true if we should remove suffixes
@@ -265,10 +285,11 @@
    */
   private List<CharsRef> stem(
       char[] word,
+      int offset,
       int length,
       int previous,
-      int prevFlag,
-      int prefixFlag,
+      char prevFlag,
+      int prefixId,
       int recursionDepth,
       boolean doPrefix,
       boolean doSuffix,
@@ -282,17 +303,15 @@
 
     if (doPrefix && dictionary.prefixes != null) {
       FST<IntsRef> fst = dictionary.prefixes;
-      Outputs<IntsRef> outputs = fst.outputs;
-      FST.BytesReader bytesReader = prefixReaders[recursionDepth];
       FST.Arc<IntsRef> arc = prefixArcs[recursionDepth];
       fst.getFirstArc(arc);
-      IntsRef NO_OUTPUT = outputs.getNoOutput();
+      IntsRef NO_OUTPUT = fst.outputs.getNoOutput();
       IntsRef output = NO_OUTPUT;
       int limit = dictionary.fullStrip ? length + 1 : length;
       for (int i = 0; i < limit; i++) {
         if (i > 0) {
-          int ch = word[i - 1];
-          if (fst.findTargetArc(ch, arc, arc, bytesReader) == null) {
+          char ch = word[offset + i - 1];
+          if (fst.findTargetArc(ch, arc, arc, prefixReader) == null) {
             break;
           } else if (arc.output() != NO_OUTPUT) {
             output = fst.outputs.add(output, arc.output());
@@ -310,34 +329,23 @@
           }
 
           if (isAffixCompatible(prefix, prevFlag, recursionDepth, false)) {
-            int deAffixedLength = length - i;
-
-            int stripOrd = dictionary.affixData(prefix, Dictionary.AFFIX_STRIP_ORD);
-            int stripStart = dictionary.stripOffsets[stripOrd];
-            int stripEnd = dictionary.stripOffsets[stripOrd + 1];
-            int stripLength = stripEnd - stripStart;
-
-            if (!checkCondition(
-                prefix, dictionary.stripData, stripStart, stripLength, word, i, deAffixedLength)) {
+            char[] strippedWord = stripAffix(word, offset, length, i, prefix, true);
+            if (strippedWord == null) {
               continue;
             }
 
-            char[] strippedWord = new char[stripLength + deAffixedLength];
-            System.arraycopy(dictionary.stripData, stripStart, strippedWord, 0, stripLength);
-            System.arraycopy(word, i, strippedWord, stripLength, deAffixedLength);
-
-            List<CharsRef> stemList =
+            boolean pureAffix = strippedWord == word;
+            stems.addAll(
                 applyAffix(
                     strippedWord,
-                    strippedWord.length,
+                    pureAffix ? offset + i : 0,
+                    pureAffix ? length - i : strippedWord.length,
                     prefix,
                     -1,
                     recursionDepth,
                     true,
                     circumfix,
-                    caseVariant);
-
-            stems.addAll(stemList);
+                    caseVariant));
           }
         }
       }
@@ -345,17 +353,15 @@
 
     if (doSuffix && dictionary.suffixes != null) {
       FST<IntsRef> fst = dictionary.suffixes;
-      Outputs<IntsRef> outputs = fst.outputs;
-      FST.BytesReader bytesReader = suffixReaders[recursionDepth];
       FST.Arc<IntsRef> arc = suffixArcs[recursionDepth];
       fst.getFirstArc(arc);
-      IntsRef NO_OUTPUT = outputs.getNoOutput();
+      IntsRef NO_OUTPUT = fst.outputs.getNoOutput();
       IntsRef output = NO_OUTPUT;
       int limit = dictionary.fullStrip ? 0 : 1;
       for (int i = length; i >= limit; i--) {
         if (i < length) {
-          int ch = word[i];
-          if (fst.findTargetArc(ch, arc, arc, bytesReader) == null) {
+          char ch = word[offset + i];
+          if (fst.findTargetArc(ch, arc, arc, suffixReader) == null) {
             break;
           } else if (arc.output() != NO_OUTPUT) {
             output = fst.outputs.add(output, arc.output());
@@ -373,36 +379,23 @@
           }
 
           if (isAffixCompatible(suffix, prevFlag, recursionDepth, previousWasPrefix)) {
-            int appendLength = length - i;
-            int deAffixedLength = length - appendLength;
-
-            int stripOrd = dictionary.affixData(suffix, Dictionary.AFFIX_STRIP_ORD);
-            int stripStart = dictionary.stripOffsets[stripOrd];
-            int stripEnd = dictionary.stripOffsets[stripOrd + 1];
-            int stripLength = stripEnd - stripStart;
-
-            if (!checkCondition(
-                suffix, word, 0, deAffixedLength, dictionary.stripData, stripStart, stripLength)) {
+            char[] strippedWord = stripAffix(word, offset, length, length - i, suffix, false);
+            if (strippedWord == null) {
               continue;
             }
 
-            char[] strippedWord = new char[stripLength + deAffixedLength];
-            System.arraycopy(word, 0, strippedWord, 0, deAffixedLength);
-            System.arraycopy(
-                dictionary.stripData, stripStart, strippedWord, deAffixedLength, stripLength);
-
-            List<CharsRef> stemList =
+            boolean pureAffix = strippedWord == word;
+            stems.addAll(
                 applyAffix(
                     strippedWord,
-                    strippedWord.length,
+                    pureAffix ? offset : 0,
+                    pureAffix ? i : strippedWord.length,
                     suffix,
-                    prefixFlag,
+                    prefixId,
                     recursionDepth,
                     false,
                     circumfix,
-                    caseVariant);
-
-            stems.addAll(stemList);
+                    caseVariant));
           }
         }
       }
@@ -411,28 +404,57 @@
     return stems;
   }
 
+  /**
+   * @return null if affix conditions isn't met; a reference to the same char[] if the affix has no
+   *     strip data and can thus be simply removed, or a new char[] containing the word affix
+   *     removal
+   */
+  private char[] stripAffix(
+      char[] word, int offset, int length, int affixLen, int affix, boolean isPrefix) {
+    int deAffixedLen = length - affixLen;
+
+    int stripOrd = dictionary.affixData(affix, Dictionary.AFFIX_STRIP_ORD);
+    int stripStart = dictionary.stripOffsets[stripOrd];
+    int stripEnd = dictionary.stripOffsets[stripOrd + 1];
+    int stripLen = stripEnd - stripStart;
+
+    char[] stripData = dictionary.stripData;
+    boolean condition =
+        isPrefix
+            ? checkCondition(
+                affix, stripData, stripStart, stripLen, word, offset + affixLen, deAffixedLen)
+            : checkCondition(affix, word, offset, deAffixedLen, stripData, stripStart, stripLen);
+    if (!condition) {
+      return null;
+    }
+
+    if (stripLen == 0) return word;
+
+    char[] strippedWord = new char[stripLen + deAffixedLen];
+    System.arraycopy(
+        word,
+        offset + (isPrefix ? affixLen : 0),
+        strippedWord,
+        isPrefix ? stripLen : 0,
+        deAffixedLen);
+    System.arraycopy(stripData, stripStart, strippedWord, isPrefix ? 0 : deAffixedLen, stripLen);
+    return strippedWord;
+  }
+
   private boolean isAffixCompatible(
-      int affix, int prevFlag, int recursionDepth, boolean previousWasPrefix) {
+      int affix, char prevFlag, int recursionDepth, boolean previousWasPrefix) {
     int append = dictionary.affixData(affix, Dictionary.AFFIX_APPEND);
 
     if (recursionDepth == 0) {
-      if (dictionary.onlyincompound == -1) {
-        return true;
-      }
-
       // check if affix is allowed in a non-compound word
-      return !dictionary.hasFlag(append, (char) dictionary.onlyincompound, scratch);
+      return !dictionary.hasFlag(append, dictionary.onlyincompound, scratch);
     }
 
     if (isCrossProduct(affix)) {
       // cross check incoming continuation class (flag of previous affix) against list.
       char[] appendFlags = dictionary.decodeFlags(append, scratch);
-      assert prevFlag >= 0;
-      boolean allowed =
-          dictionary.onlyincompound == -1
-              || !Dictionary.hasFlag(appendFlags, (char) dictionary.onlyincompound);
-      if (allowed) {
-        return previousWasPrefix || Dictionary.hasFlag(appendFlags, (char) prevFlag);
+      if (!Dictionary.hasFlag(appendFlags, dictionary.onlyincompound)) {
+        return previousWasPrefix || Dictionary.hasFlag(appendFlags, prevFlag);
       }
     }
 
@@ -472,7 +494,7 @@
    * @param strippedWord Word the affix has been removed and the strip added
    * @param length valid length of stripped word
    * @param affix HunspellAffix representing the affix rule itself
-   * @param prefixFlag when we already stripped a prefix, we cant simply recurse and check the
+   * @param prefixId when we already stripped a prefix, we can't simply recurse and check the
    *     suffix, unless both are compatible so we must check dictionary form against both to add it
    *     as a stem!
    * @param recursionDepth current recursion depth
@@ -481,42 +503,40 @@
    */
   private List<CharsRef> applyAffix(
       char[] strippedWord,
+      int offset,
       int length,
       int affix,
-      int prefixFlag,
+      int prefixId,
       int recursionDepth,
       boolean prefix,
       boolean circumfix,
       boolean caseVariant)
       throws IOException {
     char flag = dictionary.affixData(affix, Dictionary.AFFIX_FLAG);
-    char append = dictionary.affixData(affix, Dictionary.AFFIX_APPEND);
 
     List<CharsRef> stems = new ArrayList<>();
 
-    IntsRef forms = dictionary.lookupWord(strippedWord, 0, length);
+    IntsRef forms = dictionary.lookupWord(strippedWord, offset, length);
     if (forms != null) {
       for (int i = 0; i < forms.length; i += formStep) {
         char[] wordFlags = dictionary.decodeFlags(forms.ints[forms.offset + i], scratch);
-        if (Dictionary.hasFlag(wordFlags, flag)) {
+        if (Dictionary.hasFlag(wordFlags, flag) || isFlagAppendedByAffix(prefixId, flag)) {
           // confusing: in this one exception, we already chained the first prefix against the
           // second,
           // so it doesnt need to be checked against the word
           boolean chainedPrefix = dictionary.complexPrefixes && recursionDepth == 1 && prefix;
-          if (!chainedPrefix
-              && prefixFlag >= 0
-              && !Dictionary.hasFlag(wordFlags, (char) prefixFlag)) {
-            // see if we can chain prefix thru the suffix continuation class (only if it has any!)
-            if (!dictionary.hasFlag(append, (char) prefixFlag, scratch)) {
+          if (!chainedPrefix && prefixId >= 0) {
+            char prefixFlag = dictionary.affixData(prefixId, Dictionary.AFFIX_FLAG);
+            if (!Dictionary.hasFlag(wordFlags, prefixFlag)
+                && !isFlagAppendedByAffix(affix, prefixFlag)) {
               continue;
             }
           }
 
           // if circumfix was previously set by a prefix, we must check this suffix,
           // to ensure it has it, and vice versa
-          if (dictionary.circumfix != -1) {
-            boolean suffixCircumfix =
-                dictionary.hasFlag(append, (char) dictionary.circumfix, scratch);
+          if (dictionary.circumfix != Dictionary.FLAG_UNSET) {
+            boolean suffixCircumfix = isFlagAppendedByAffix(affix, dictionary.circumfix);
             if (circumfix != suffixCircumfix) {
               continue;
             }
@@ -527,26 +547,25 @@
             continue;
           }
           // we aren't decompounding (yet)
-          if (dictionary.onlyincompound != -1
-              && Dictionary.hasFlag(wordFlags, (char) dictionary.onlyincompound)) {
+          if (Dictionary.hasFlag(wordFlags, dictionary.onlyincompound)) {
             continue;
           }
-          stems.add(newStem(strippedWord, length, forms, i));
+          stems.add(newStem(strippedWord, offset, length, forms, i));
         }
       }
     }
 
     // if a circumfix flag is defined in the dictionary, and we are a prefix, we need to check if we
     // have that flag
-    if (dictionary.circumfix != -1 && !circumfix && prefix) {
-      circumfix = dictionary.hasFlag(append, (char) dictionary.circumfix, scratch);
+    if (dictionary.circumfix != Dictionary.FLAG_UNSET && !circumfix && prefix) {
+      circumfix = isFlagAppendedByAffix(affix, dictionary.circumfix);
     }
 
     if (isCrossProduct(affix) && recursionDepth <= 1) {
       boolean doPrefix;
       if (recursionDepth == 0) {
         if (prefix) {
-          prefixFlag = flag;
+          prefixId = affix;
           doPrefix = dictionary.complexPrefixes && dictionary.twoStageAffix;
           // we took away the first prefix.
           // COMPLEXPREFIXES = true:  combine with a second prefix and another suffix
@@ -562,7 +581,7 @@
       } else {
         doPrefix = false;
         if (prefix && dictionary.complexPrefixes) {
-          prefixFlag = flag;
+          prefixId = affix;
           // we took away the second prefix: go look for another suffix
         } else if (prefix || dictionary.complexPrefixes || !dictionary.twoStageAffix) {
           return stems;
@@ -573,10 +592,11 @@
       stems.addAll(
           stem(
               strippedWord,
+              offset,
               length,
               affix,
               flag,
-              prefixFlag,
+              prefixId,
               recursionDepth + 1,
               doPrefix,
               true,
@@ -588,6 +608,12 @@
     return stems;
   }
 
+  private boolean isFlagAppendedByAffix(int affixId, char flag) {
+    if (affixId < 0 || flag == Dictionary.FLAG_UNSET) return false;
+    int appendId = dictionary.affixData(affixId, Dictionary.AFFIX_APPEND);
+    return dictionary.hasFlag(appendId, flag, scratch);
+  }
+
   private boolean isCrossProduct(int affix) {
     return (dictionary.affixData(affix, Dictionary.AFFIX_CONDITION) & 1) == 1;
   }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordCase.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordCase.java
index 7d9e2e7..04adf7a 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordCase.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordCase.java
@@ -23,7 +23,7 @@
   MIXED;
 
   static WordCase caseOf(char[] word, int length) {
-    boolean capitalized = Character.isUpperCase(word[0]);
+    boolean startsWithLower = Character.isLowerCase(word[0]);
 
     boolean seenUpper = false;
     boolean seenLower = false;
@@ -34,11 +34,11 @@
       if (seenUpper && seenLower) break;
     }
 
-    return get(capitalized, seenUpper, seenLower);
+    return get(startsWithLower, seenUpper, seenLower);
   }
 
   static WordCase caseOf(CharSequence word, int length) {
-    boolean capitalized = Character.isUpperCase(word.charAt(0));
+    boolean startsWithLower = Character.isLowerCase(word.charAt(0));
 
     boolean seenUpper = false;
     boolean seenLower = false;
@@ -49,11 +49,11 @@
       if (seenUpper && seenLower) break;
     }
 
-    return get(capitalized, seenUpper, seenLower);
+    return get(startsWithLower, seenUpper, seenLower);
   }
 
-  private static WordCase get(boolean capitalized, boolean seenUpper, boolean seenLower) {
-    if (capitalized) {
+  private static WordCase get(boolean startsWithLower, boolean seenUpper, boolean seenLower) {
+    if (!startsWithLower) {
       return !seenLower ? UPPER : !seenUpper ? TITLE : MIXED;
     }
     return seenUpper ? MIXED : LOWER;
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTypingFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTypingFilter.java
new file mode 100644
index 0000000..c622bde
--- /dev/null
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTypingFilter.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.analysis.pattern;
+
+import java.io.IOException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+
+/**
+ * Set a type attribute to a parameterized value when tokens are matched by any of a several regex
+ * patterns. The value set in the type attribute is parameterized with the match groups of the regex
+ * used for matching. In combination with TypeAsSynonymFilter and DropIfFlagged filter this can
+ * supply complex synonym patterns that are protected from subsequent analysis, and optionally drop
+ * the original term based on the flag set in this filter. See {@link PatternTypingFilterFactory}
+ * for full documentation.
+ *
+ * @see PatternTypingFilterFactory
+ * @since 8.8.0
+ */
+public class PatternTypingFilter extends TokenFilter {
+
+  private final PatternTypingRule[] replacementAndFlagByPattern;
+  private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+  private final FlagsAttribute flagAtt = addAttribute(FlagsAttribute.class);
+  private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
+
+  public PatternTypingFilter(TokenStream input, PatternTypingRule... replacementAndFlagByPattern) {
+    super(input);
+    this.replacementAndFlagByPattern = replacementAndFlagByPattern;
+  }
+
+  @Override
+  public final boolean incrementToken() throws IOException {
+    if (input.incrementToken()) {
+      for (PatternTypingRule rule : replacementAndFlagByPattern) {
+        Matcher matcher = rule.getPattern().matcher(termAtt);
+        if (matcher.find()) {
+          // allow 2nd reset() and find() that occurs inside replaceFirst to avoid excess string
+          // creation
+          typeAtt.setType(matcher.replaceFirst(rule.getTypeTemplate()));
+          flagAtt.setFlags(rule.getFlags());
+          return true;
+        }
+      }
+      return true;
+    }
+    return false;
+  }
+
+  /** Value holding class for pattern typing rules. */
+  public static class PatternTypingRule {
+    private final Pattern pattern;
+    private final int flags;
+    private final String typeTemplate;
+
+    public PatternTypingRule(Pattern pattern, int flags, String typeTemplate) {
+      this.pattern = pattern;
+      this.flags = flags;
+      this.typeTemplate = typeTemplate;
+    }
+
+    public Pattern getPattern() {
+      return pattern;
+    }
+
+    public int getFlags() {
+      return flags;
+    }
+
+    public String getTypeTemplate() {
+      return typeTemplate;
+    }
+  }
+}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTypingFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTypingFilterFactory.java
new file mode 100644
index 0000000..4d4539e
--- /dev/null
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTypingFilterFactory.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.analysis.pattern;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Pattern;
+import org.apache.lucene.analysis.TokenFilterFactory;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.pattern.PatternTypingFilter.PatternTypingRule;
+import org.apache.lucene.util.ResourceLoader;
+import org.apache.lucene.util.ResourceLoaderAware;
+
+/**
+ * Provides a filter that will analyze tokens with the analyzer from an arbitrary field type. By
+ * itself this filter is not very useful. Normally it is combined with a filter that reacts to types
+ * or flags.
+ *
+ * <pre class="prettyprint" >
+ * &lt;fieldType name="text_taf" class="solr.TextField" positionIncrementGap="100"&gt;
+ *   &lt;analyzer&gt;
+ *     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
+ *     &lt;filter class="com.example.PatternTypingFilter" patternFile="patterns.txt"/&gt;
+ *     &lt;filter class="solr.TokenAnalyzerFilter" asType="text_en" preserveType="true"/&gt;
+ *     &lt;filter class="solr.TypeAsSynonymFilterFactory" prefix="__TAS__"
+ *               ignore="word,&amp;lt;ALPHANUM&amp;gt;,&amp;lt;NUM&amp;gt;,&amp;lt;SOUTHEAST_ASIAN&amp;gt;,&amp;lt;IDEOGRAPHIC&amp;gt;,&amp;lt;HIRAGANA&amp;gt;,&amp;lt;KATAKANA&amp;gt;,&amp;lt;HANGUL&amp;gt;,&amp;lt;EMOJI&amp;gt;"/&gt;
+ *   &lt;/analyzer&gt;
+ * &lt;/fieldType&gt;</pre>
+ *
+ * <p>Note that a configuration such as above may interfere with multi-word synonyms. The patterns
+ * file has the format:
+ *
+ * <pre>
+ * (flags) (pattern) ::: (replacement)
+ * </pre>
+ *
+ * Therefore to set the first 2 flag bits on the original token matching 401k or 401(k) and adding a
+ * type of 'legal2_401_k' whenever either one is encountered one would use:
+ *
+ * <pre>
+ * 3 (\d+)\(?([a-z])\)? ::: legal2_$1_$2
+ * </pre>
+ *
+ * Note that the number indicating the flag bits to set must not have leading spaces and be followed
+ * by a single space, and must be 0 if no flags should be set. The flags number should not contain
+ * commas or a decimal point. Lines for which the first character is <code>#</code> will be ignored
+ * as comments. Does not support producing a synonym textually identical to the original term.
+ *
+ * @lucene.spi {@value #NAME}
+ * @since 8.8
+ */
+public class PatternTypingFilterFactory extends TokenFilterFactory implements ResourceLoaderAware {
+
+  /** SPI name */
+  public static final String NAME = "patternTyping";
+
+  private final String patternFile;
+  private PatternTypingRule[] rules;
+
+  /** Creates a new PatternTypingFilterFactory */
+  public PatternTypingFilterFactory(Map<String, String> args) {
+    super(args);
+    patternFile = require(args, "patternFile");
+    if (!args.isEmpty()) {
+      throw new IllegalArgumentException("Unknown parameters: " + args);
+    }
+  }
+
+  /** Default ctor for compatibility with SPI */
+  public PatternTypingFilterFactory() {
+    throw defaultCtorException();
+  }
+
+  @Override
+  public void inform(ResourceLoader loader) throws IOException {
+    List<PatternTypingRule> ruleList = new ArrayList<>();
+    List<String> lines = getLines(loader, patternFile);
+    // format: # regex ::: typename[_$1[_$2 ...]]    (technically _$1 does not need the '_' but it
+    // usually makes sense)
+    // eg: 2 (\d+\(?([a-z])\)?\(?(\d+)\)? ::: legal3_$1_$2_3
+    // which yields legal3_501_c_3 for 501(c)(3) or 501c3 and sets the second lowest bit in flags
+    for (String line : lines) {
+      int firstSpace = line.indexOf(" "); // no leading spaces allowed
+      int flagsVal = Integer.parseInt(line.substring(0, firstSpace));
+      line = line.substring(firstSpace + 1);
+      String[] split =
+          line.split(" ::: "); // arbitrary, unlikely to occur in a useful regex easy to read
+      if (split.length != 2) {
+        throw new RuntimeException(
+            "The PatternTypingFilter: Always two there are, no more, no less, a pattern and a replacement (separated by ' ::: ' )");
+      }
+      Pattern compiled = Pattern.compile(split[0]);
+      ruleList.add(new PatternTypingRule(compiled, flagsVal, split[1]));
+    }
+    this.rules = ruleList.toArray(new PatternTypingRule[0]);
+  }
+
+  @Override
+  public TokenStream create(TokenStream input) {
+    return new PatternTypingFilter(input, rules);
+  }
+}
diff --git a/lucene/analysis/common/src/resources/META-INF/services/org.apache.lucene.analysis.TokenFilterFactory b/lucene/analysis/common/src/resources/META-INF/services/org.apache.lucene.analysis.TokenFilterFactory
index be82bf2..ce2fd64 100644
--- a/lucene/analysis/common/src/resources/META-INF/services/org.apache.lucene.analysis.TokenFilterFactory
+++ b/lucene/analysis/common/src/resources/META-INF/services/org.apache.lucene.analysis.TokenFilterFactory
@@ -97,6 +97,7 @@
 org.apache.lucene.analysis.no.NorwegianMinimalStemFilterFactory
 org.apache.lucene.analysis.pattern.PatternReplaceFilterFactory
 org.apache.lucene.analysis.pattern.PatternCaptureGroupFilterFactory
+org.apache.lucene.analysis.pattern.PatternTypingFilterFactory
 org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory
 org.apache.lucene.analysis.payloads.NumericPayloadTokenFilterFactory
 org.apache.lucene.analysis.payloads.TokenOffsetPayloadTokenFilterFactory
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/boost/DelimitedBoostTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/boost/TestDelimitedBoostTokenFilter.java
similarity index 98%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/boost/DelimitedBoostTokenFilterTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/boost/TestDelimitedBoostTokenFilter.java
index d5608ac..17b5c15 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/boost/DelimitedBoostTokenFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/boost/TestDelimitedBoostTokenFilter.java
@@ -21,7 +21,7 @@
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.search.BoostAttribute;
 
-public class DelimitedBoostTokenFilterTest extends BaseTokenStreamTestCase {
+public class TestDelimitedBoostTokenFilter extends BaseTokenStreamTestCase {
 
   public void testBoosts() throws Exception {
     String test = "The quick|0.4 red|0.5 fox|0.2 jumped|0.1 over the lazy|0.8 brown|0.9 dogs|0.9";
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestHTMLStripCharFilter.java
similarity index 99%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestHTMLStripCharFilter.java
index 8c3ad79..76aa70b 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestHTMLStripCharFilter.java
@@ -31,7 +31,7 @@
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.util.TestUtil;
 
-public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
+public class TestHTMLStripCharFilter extends BaseTokenStreamTestCase {
 
   private static Analyzer newTestAnalyzer() {
     return new Analyzer() {
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/TestCommonGramsFilter.java
similarity index 99%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/TestCommonGramsFilter.java
index ee44a49..2ce0d3a 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/TestCommonGramsFilter.java
@@ -28,7 +28,7 @@
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 
 /** Tests CommonGrams(Query)Filter */
-public class CommonGramsFilterTest extends BaseTokenStreamTestCase {
+public class TestCommonGramsFilter extends BaseTokenStreamTestCase {
   private static final CharArraySet commonWords =
       new CharArraySet(Arrays.asList("s", "a", "b", "c", "d", "the", "of"), false);
 
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
index 4fc2a65..512ae51 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
@@ -85,6 +85,7 @@
 import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter;
 import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
 import org.apache.lucene.analysis.path.ReversePathHierarchyTokenizer;
+import org.apache.lucene.analysis.pattern.PatternTypingFilter;
 import org.apache.lucene.analysis.payloads.IdentityEncoder;
 import org.apache.lucene.analysis.payloads.PayloadEncoder;
 import org.apache.lucene.analysis.shingle.FixedShingleFilter;
@@ -617,6 +618,23 @@
                     new RegExp(AutomatonTestUtil.randomRegexp(random), RegExp.NONE).toAutomaton(),
                     Operations.DEFAULT_MAX_DETERMINIZED_STATES);
               });
+          put(
+              PatternTypingFilter.PatternTypingRule[].class,
+              random -> {
+                int numRules = TestUtil.nextInt(random, 1, 3);
+                PatternTypingFilter.PatternTypingRule[] patternTypingRules =
+                    new PatternTypingFilter.PatternTypingRule[numRules];
+                for (int i = 0; i < patternTypingRules.length; i++) {
+                  String s = TestUtil.randomSimpleString(random, 1, 2);
+                  // random regex with one group
+                  String regex = s + "(.*)";
+                  // pattern rule with a template that accepts one group.
+                  patternTypingRules[i] =
+                      new PatternTypingFilter.PatternTypingRule(
+                          Pattern.compile(regex), TestUtil.nextInt(random, 1, 8), s + "_$1");
+                }
+                return patternTypingRules;
+              });
         }
       };
 
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekAnalyzer.java
similarity index 97%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekAnalyzer.java
index 7555b87..30d2965 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekAnalyzer.java
@@ -20,7 +20,7 @@
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 
 /** A unit test class for verifying the correct operation of the GreekAnalyzer. */
-public class GreekAnalyzerTest extends BaseTokenStreamTestCase {
+public class TestGreekAnalyzer extends BaseTokenStreamTestCase {
 
   /**
    * Test the analysis of various greek strings.
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/SpellCheckerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/SpellCheckerTest.java
index a478dda..dacf22e 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/SpellCheckerTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/SpellCheckerTest.java
@@ -27,6 +27,11 @@
 
 public class SpellCheckerTest extends StemmerTestBase {
   @Test
+  public void allcaps() throws Exception {
+    doTest("allcaps");
+  }
+
+  @Test
   public void i53643_numbersWithSeparators() throws Exception {
     doTest("i53643");
   }
@@ -43,6 +48,38 @@
     doTest("breakoff");
   }
 
+  public void testCompoundrule() throws Exception {
+    doTest("compoundrule");
+  }
+
+  public void testCompoundrule2() throws Exception {
+    doTest("compoundrule2");
+  }
+
+  public void testCompoundrule3() throws Exception {
+    doTest("compoundrule3");
+  }
+
+  public void testCompoundrule4() throws Exception {
+    doTest("compoundrule4");
+  }
+
+  public void testCompoundrule5() throws Exception {
+    doTest("compoundrule5");
+  }
+
+  public void testCompoundrule6() throws Exception {
+    doTest("compoundrule6");
+  }
+
+  public void testCompoundrule7() throws Exception {
+    doTest("compoundrule7");
+  }
+
+  public void testCompoundrule8() throws Exception {
+    doTest("compoundrule8");
+  }
+
   protected void doTest(String name) throws Exception {
     InputStream affixStream =
         Objects.requireNonNull(getClass().getResourceAsStream(name + ".aff"), name);
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllCaps.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllCaps.java
index 43c6764..33f132f 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllCaps.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllCaps.java
@@ -28,6 +28,8 @@
   public void testGood() {
     assertStemsTo("OpenOffice.org", "OpenOffice.org");
     assertStemsTo("UNICEF's", "UNICEF");
+    assertStemsTo("L'Afrique", "Afrique");
+    assertStemsTo("L'AFRIQUE", "Afrique");
 
     // Hunspell returns these title-cased stems, so for consistency we do, too
     assertStemsTo("OPENOFFICE.ORG", "Openoffice.org");
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDependencies.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDependencies.java
index aadcda3..e6310b6 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDependencies.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDependencies.java
@@ -38,5 +38,8 @@
     assertStemsTo("hydration", "hydrate");
     assertStemsTo("dehydrate", "hydrate");
     assertStemsTo("dehydration", "hydrate");
+
+    assertStemsTo("calorie", "calorie", "calorie");
+    assertStemsTo("calories", "calorie");
   }
 }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java
index 5e8fdff..a0ece78 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java
@@ -22,6 +22,7 @@
 import java.io.InputStream;
 import java.nio.charset.StandardCharsets;
 import java.text.ParseException;
+import java.util.Random;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
@@ -33,6 +34,7 @@
 import org.apache.lucene.util.fst.FSTCompiler;
 import org.apache.lucene.util.fst.Outputs;
 import org.apache.lucene.util.fst.Util;
+import org.junit.Test;
 
 public class TestDictionary extends LuceneTestCase {
 
@@ -268,6 +270,27 @@
     assertNotNull(Dictionary.getFlagParsingStrategy("FLAG    UTF-8"));
   }
 
+  @Test
+  public void testFlagSerialization() {
+    Random r = random();
+    char[] flags = new char[r.nextInt(10)];
+    for (int i = 0; i < flags.length; i++) {
+      flags[i] = (char) r.nextInt(Character.MAX_VALUE);
+    }
+
+    String[] flagLines = {"FLAG long", "FLAG UTF-8", "FLAG num"};
+    for (String flagLine : flagLines) {
+      Dictionary.FlagParsingStrategy strategy = Dictionary.getFlagParsingStrategy(flagLine);
+      StringBuilder serialized = new StringBuilder();
+      for (char flag : flags) {
+        strategy.appendFlag(flag, serialized);
+      }
+
+      char[] deserialized = strategy.parseFlags(serialized.toString());
+      assertEquals(new String(flags), new String(deserialized));
+    }
+  }
+
   private Directory getDirectory() {
     return newDirectory();
   }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.aff
index 57e916b..53efbaf 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.aff
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.aff
@@ -3,3 +3,6 @@
 
 SFX S N 1
 SFX S   0     's      .
+
+PFX L N 1
+PFX L   0     L'
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.dic
index 7d3cdcc..4732b95 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.dic
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.dic
@@ -1,3 +1,4 @@
 2
 OpenOffice.org
 UNICEF/S
+Afrique/L
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.good
new file mode 100644
index 0000000..fb88cc2
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.good
@@ -0,0 +1,6 @@
+OpenOffice.org
+OPENOFFICE.ORG
+UNICEF's
+UNICEF'S
+L'AFRIQUE
+L'Afrique
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.wrong
new file mode 100644
index 0000000..6681949
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/allcaps.wrong
@@ -0,0 +1,3 @@
+Openoffice.org
+Unicef
+Unicef's
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule.aff
new file mode 100644
index 0000000..09309e0
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule.aff
@@ -0,0 +1,3 @@
+COMPOUNDMIN 1
+COMPOUNDRULE 1
+COMPOUNDRULE ABC
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule.dic
new file mode 100644
index 0000000..b11e829
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule.dic
@@ -0,0 +1,5 @@
+3
+a/A
+b/B
+c/BC
+
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule.good
new file mode 100644
index 0000000..c7a0763
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule.good
@@ -0,0 +1,2 @@
+abc
+acc
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule.wrong
new file mode 100644
index 0000000..bc151ea
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule.wrong
@@ -0,0 +1,39 @@
+ba
+aaabaaa
+bbaaa
+aaaaba
+bbbbbaa
+aa
+aaa
+aaaa
+ab
+aab
+aaab
+aaaab
+abb
+aabb
+aaabbb
+bb
+bbb
+bbbb
+aaab
+abcc
+abbc
+abbcc
+aabc
+aabcc
+aabbc
+aabbcc
+aaabbbccc
+ac
+aac
+aacc
+aaaccc
+bc
+bcc
+bbc
+bbcc
+bbbccc
+cc
+ccc
+cccccc
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule2.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule2.aff
new file mode 100644
index 0000000..e4b86a5
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule2.aff
@@ -0,0 +1,3 @@
+COMPOUNDMIN 1
+COMPOUNDRULE 1
+COMPOUNDRULE A*B*C*
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule2.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule2.dic
new file mode 100644
index 0000000..7d07bbc
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule2.dic
@@ -0,0 +1,5 @@
+3
+a/A
+b/B
+c/C
+
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule2.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule2.good
new file mode 100644
index 0000000..de743bb
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule2.good
@@ -0,0 +1,37 @@
+aa
+aaa
+aaaa
+ab
+aab
+aaab
+aaaab
+abb
+aabb
+aaabbb
+bb
+bbb
+bbbb
+aaab
+abc
+abcc
+abbc
+abbcc
+aabc
+aabcc
+aabbc
+aabbcc
+aaabbbccc
+ac
+acc
+aac
+aacc
+aaaccc
+bc
+bcc
+bbc
+bbcc
+bbbccc
+cc
+ccc
+cccccc
+abcc
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule2.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule2.wrong
new file mode 100644
index 0000000..9e5d38d
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule2.wrong
@@ -0,0 +1,8 @@
+ba
+aaabaaa
+bbaaa
+aaaaba
+bbbbbaa
+cba
+cab
+acb
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule3.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule3.aff
new file mode 100644
index 0000000..0053145
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule3.aff
@@ -0,0 +1,3 @@
+COMPOUNDMIN 1
+COMPOUNDRULE 1
+COMPOUNDRULE A?B?C?
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule3.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule3.dic
new file mode 100644
index 0000000..7d07bbc
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule3.dic
@@ -0,0 +1,5 @@
+3
+a/A
+b/B
+c/C
+
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule3.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule3.good
new file mode 100644
index 0000000..7f51889
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule3.good
@@ -0,0 +1,7 @@
+a
+b
+c
+ab
+abc
+ac
+bc
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule3.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule3.wrong
new file mode 100644
index 0000000..6bd1d80
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule3.wrong
@@ -0,0 +1,41 @@
+aa
+aaa
+aaaa
+aab
+aaab
+aaaab
+abb
+aabb
+aaabbb
+bb
+bbb
+bbbb
+aaab
+abcc
+abbc
+abbcc
+aabc
+aabcc
+aabbc
+aabbcc
+aaabbbccc
+acc
+aac
+aacc
+aaaccc
+bcc
+bbc
+bbcc
+bbbccc
+cc
+ccc
+cccccc
+abcc
+ba
+aaabaaa
+bbaaa
+aaaaba
+bbbbbaa
+cba
+cab
+acb
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule4.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule4.aff
new file mode 100644
index 0000000..8a9996c
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule4.aff
@@ -0,0 +1,7 @@
+# English ordinal numbers
+WORDCHARS 0123456789
+COMPOUNDMIN 1
+ONLYINCOMPOUND c
+COMPOUNDRULE 2
+COMPOUNDRULE n*1t
+COMPOUNDRULE n*mp
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule4.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule4.dic
new file mode 100644
index 0000000..ced0735
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule4.dic
@@ -0,0 +1,24 @@
+22
+0/nm
+1/n1
+2/nm
+3/nm
+4/nm
+5/nm
+6/nm
+7/nm
+8/nm
+9/nm
+0th/pt
+1st/p
+1th/tc
+2nd/p
+2th/tc
+3rd/p
+3th/tc
+4th/pt
+5th/pt
+6th/pt
+7th/pt
+8th/pt
+9th/pt
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule4.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule4.good
new file mode 100644
index 0000000..8694943
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule4.good
@@ -0,0 +1,31 @@
+1st
+2nd
+3rd
+4th
+5th
+6th
+7th
+8th
+9th
+10th
+11th
+12th
+13th
+14th
+15th
+16th
+17th
+18th
+19th
+20th
+21st
+22nd
+23rd
+24th
+25th
+100th
+1000th
+10001st
+10011th
+1ST
+42ND
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule4.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule4.wrong
new file mode 100644
index 0000000..99f28e7
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule4.wrong
@@ -0,0 +1,5 @@
+1th
+2th
+3th
+10001th
+10011st
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule5.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule5.aff
new file mode 100644
index 0000000..4650246
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule5.aff
@@ -0,0 +1,7 @@
+# number + percent
+SET UTF-8
+COMPOUNDMIN 1
+COMPOUNDRULE 2
+COMPOUNDRULE N*%?
+COMPOUNDRULE NN*.NN*%?
+WORDCHARS 0123456789‰.
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule5.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule5.dic
new file mode 100644
index 0000000..eeeffda
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule5.dic
@@ -0,0 +1,14 @@
+13
+0/N	po:num
+1/N	po:num
+2/N	po:num
+3/N	po:num
+4/N	po:num
+5/N	po:num
+6/N	po:num
+7/N	po:num
+8/N	po:num
+9/N	po:num
+./.	po:sign_dot
+%/%	po:sign_percent
+‰/%	po:sign_per_mille
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule5.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule5.good
new file mode 100644
index 0000000..691fca1
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule5.good
@@ -0,0 +1,7 @@
+10%
+0.2%
+0.20%
+123.4561‰
+10
+0000
+10.25
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule5.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule5.wrong
new file mode 100644
index 0000000..ba1fe32
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule5.wrong
@@ -0,0 +1 @@
+.25
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule6.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule6.aff
new file mode 100644
index 0000000..e8a088d
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule6.aff
@@ -0,0 +1,4 @@
+COMPOUNDMIN 1
+COMPOUNDRULE 2
+COMPOUNDRULE A*A
+COMPOUNDRULE A*AAB*BBBC*C
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule6.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule6.dic
new file mode 100644
index 0000000..7d07bbc
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule6.dic
@@ -0,0 +1,5 @@
+3
+a/A
+b/B
+c/C
+
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule6.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule6.good
new file mode 100644
index 0000000..55a8f8b
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule6.good
@@ -0,0 +1,4 @@
+aa
+aaaaaa
+aabbbc
+aaaaabbbbbbcccccc
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule6.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule6.wrong
new file mode 100644
index 0000000..48b376d
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule6.wrong
@@ -0,0 +1,4 @@
+abc
+abbbbbccccccc
+aabbccccccc
+aabbbbbbb
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule7.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule7.aff
new file mode 100644
index 0000000..3ae1fc78
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule7.aff
@@ -0,0 +1,8 @@
+# English ordinal numbers (parenthesized long flags)
+FLAG long
+WORDCHARS 0123456789
+COMPOUNDMIN 1
+ONLYINCOMPOUND cc
+COMPOUNDRULE 2
+COMPOUNDRULE (nn)*(11)(tt)
+COMPOUNDRULE (nn)*(mm)(pp)
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule7.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule7.dic
new file mode 100644
index 0000000..ad4bb4d
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule7.dic
@@ -0,0 +1,24 @@
+22
+0/nnmm
+1/nn11
+2/nnmm
+3/nnmm
+4/nnmm
+5/nnmm
+6/nnmm
+7/nnmm
+8/nnmm
+9/nnmm
+0th/pptt
+1st/pp
+1th/ttcc
+2nd/pp
+2th/ttcc
+3rd/pp
+3th/ttcc
+4th/pptt
+5th/pptt
+6th/pptt
+7th/pptt
+8th/pptt
+9th/pptt
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule7.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule7.good
new file mode 100644
index 0000000..fafe64a
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule7.good
@@ -0,0 +1,29 @@
+1st
+2nd
+3rd
+4th
+5th
+6th
+7th
+8th
+9th
+10th
+11th
+12th
+13th
+14th
+15th
+16th
+17th
+18th
+19th
+20th
+21st
+22nd
+23rd
+24th
+25th
+100th
+1000th
+10001st
+10011th
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule7.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule7.wrong
new file mode 100644
index 0000000..99f28e7
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule7.wrong
@@ -0,0 +1,5 @@
+1th
+2th
+3th
+10001th
+10011st
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule8.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule8.aff
new file mode 100644
index 0000000..03a423d
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule8.aff
@@ -0,0 +1,8 @@
+# English ordinal numbers (parenthesized numerical flags)
+FLAG num
+WORDCHARS 0123456789
+COMPOUNDMIN 1
+ONLYINCOMPOUND 1000
+COMPOUNDRULE 2
+COMPOUNDRULE (1001)*(1002)(2001)
+COMPOUNDRULE (1001)*(2002)(2000)
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule8.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule8.dic
new file mode 100644
index 0000000..e156e95
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule8.dic
@@ -0,0 +1,24 @@
+22
+0/1001,2002
+1/1001,1002
+2/1001,2002
+3/1001,2002
+4/1001,2002
+5/1001,2002
+6/1001,2002
+7/1001,2002
+8/1001,2002
+9/1001,2002
+0th/2000,2001
+1st/2000
+1th/2001,1000
+2nd/2000
+2th/2001,1000
+3rd/2000
+3th/2001,1000
+4th/2000,2001
+5th/2000,2001
+6th/2000,2001
+7th/2000,2001
+8th/2000,2001
+9th/2000,2001
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule8.good b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule8.good
new file mode 100644
index 0000000..fafe64a
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule8.good
@@ -0,0 +1,29 @@
+1st
+2nd
+3rd
+4th
+5th
+6th
+7th
+8th
+9th
+10th
+11th
+12th
+13th
+14th
+15th
+16th
+17th
+18th
+19th
+20th
+21st
+22nd
+23rd
+24th
+25th
+100th
+1000th
+10001st
+10011th
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule8.wrong b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule8.wrong
new file mode 100644
index 0000000..99f28e7
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/compoundrule8.wrong
@@ -0,0 +1,5 @@
+1th
+2th
+3th
+10001th
+10011st
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/conv.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/conv.aff
index 2705dc5..cee48e7 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/conv.aff
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/conv.aff
@@ -6,9 +6,10 @@
 ICONV C c
 ICONV I i
 
-OCONV 4
-OCONV a A
-OCONV b B
+# Testing also whitespace and comments.
+OCONV 4 # space, space
+OCONV	a A # tab, space, space
+OCONV	b	B # tab, tab, space
 OCONV c C
 OCONV i I
 
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/dependencies.aff b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/dependencies.aff
index 6aff674..9750c06 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/dependencies.aff
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/dependencies.aff
@@ -17,3 +17,10 @@
 
 SFX A Y 1
 SFX A te tion/S .
+
+SFX s Y 1
+SFX s 0 s .
+
+PFX p Y 1
+PFX p 0 0/s .
+
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/dependencies.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/dependencies.dic
index 632f70f..08c565e 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/dependencies.dic
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/dependencies.dic
@@ -1,4 +1,5 @@
-2
+4
 drink/RQ	[verb]
 drink/S	[noun]
-hydrate/hA
\ No newline at end of file
+hydrate/hA
+calorie/p
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/minhash/MinHashFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/minhash/TestMinHashFilter.java
similarity index 99%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/minhash/MinHashFilterTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/minhash/TestMinHashFilter.java
index 46703f8..77a2ebe 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/minhash/MinHashFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/minhash/TestMinHashFilter.java
@@ -35,7 +35,7 @@
 import org.junit.Test;
 
 /** Tests for {@link MinHashFilter} */
-public class MinHashFilterTest extends BaseTokenStreamTestCase {
+public class TestMinHashFilter extends BaseTokenStreamTestCase {
 
   @Test
   public void testIntHash() {
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/DateRecognizerFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestDateRecognizerFilter.java
similarity index 95%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/DateRecognizerFilterTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestDateRecognizerFilter.java
index 273980f..5c5afec 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/DateRecognizerFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestDateRecognizerFilter.java
@@ -23,7 +23,7 @@
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.TokenStream;
 
-public class DateRecognizerFilterTest extends BaseTokenStreamTestCase {
+public class TestDateRecognizerFilter extends BaseTokenStreamTestCase {
 
   public void test() throws IOException {
     final String test =
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/DateRecognizerFilterFactoryTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestDateRecognizerFilterFactory.java
similarity index 95%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/DateRecognizerFilterFactoryTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestDateRecognizerFilterFactory.java
index 803ec6a..28831b0 100755
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/DateRecognizerFilterFactoryTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestDateRecognizerFilterFactory.java
@@ -20,7 +20,7 @@
 import java.util.Map;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 
-public class DateRecognizerFilterFactoryTest extends BaseTokenStreamTestCase {
+public class TestDateRecognizerFilterFactory extends BaseTokenStreamTestCase {
 
   public void testBadLanguageTagThrowsException() {
     expectThrows(
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/DelimitedTermFrequencyTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestDelimitedTermFrequencyTokenFilter.java
similarity index 97%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/DelimitedTermFrequencyTokenFilterTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestDelimitedTermFrequencyTokenFilter.java
index 6b9acb8..3c84f82 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/DelimitedTermFrequencyTokenFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestDelimitedTermFrequencyTokenFilter.java
@@ -22,7 +22,7 @@
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.TermFrequencyAttribute;
 
-public class DelimitedTermFrequencyTokenFilterTest extends BaseTokenStreamTestCase {
+public class TestDelimitedTermFrequencyTokenFilter extends BaseTokenStreamTestCase {
 
   public void testTermFrequency() throws Exception {
     String test = "The quick|40 red|4 fox|06 jumped|1 over the lazy|2 brown|123 dogs|1024";
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestEdgeNGramTokenFilter.java
similarity index 99%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestEdgeNGramTokenFilter.java
index 3f39c2d..808c473 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestEdgeNGramTokenFilter.java
@@ -35,7 +35,7 @@
 import org.apache.lucene.util.TestUtil;
 
 /** Tests {@link EdgeNGramTokenFilter} for correctness. */
-public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
+public class TestEdgeNGramTokenFilter extends BaseTokenStreamTestCase {
   private TokenStream input;
 
   @Override
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestEdgeNGramTokenizer.java
similarity index 97%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestEdgeNGramTokenizer.java
index c94e51a..78846c3 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestEdgeNGramTokenizer.java
@@ -26,7 +26,7 @@
 import org.apache.lucene.util.TestUtil;
 
 /** Tests {@link EdgeNGramTokenizer} for correctness. */
-public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase {
+public class TestEdgeNGramTokenizer extends BaseTokenStreamTestCase {
   private StringReader input;
 
   @Override
@@ -146,7 +146,7 @@
 
   private static void testNGrams(int minGram, int maxGram, String s, String nonTokenChars)
       throws IOException {
-    NGramTokenizerTest.testNGrams(minGram, maxGram, s, nonTokenChars, true);
+    TestNGramTokenizer.testNGrams(minGram, maxGram, s, nonTokenChars, true);
   }
 
   public void testLargeInput() throws IOException {
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramTokenFilter.java
similarity index 99%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramTokenFilter.java
index bc27e71..f5f6967 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramTokenFilter.java
@@ -34,7 +34,7 @@
 import org.apache.lucene.util.TestUtil;
 
 /** Tests {@link NGramTokenFilter} for correctness. */
-public class NGramTokenFilterTest extends BaseTokenStreamTestCase {
+public class TestNGramTokenFilter extends BaseTokenStreamTestCase {
   private TokenStream input;
 
   @Override
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramTokenizer.java
similarity index 98%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramTokenizer.java
index f0b1fe0..f0b1066 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramTokenizer.java
@@ -31,7 +31,7 @@
 import org.apache.lucene.util.TestUtil;
 
 /** Tests {@link NGramTokenizer} for correctness. */
-public class NGramTokenizerTest extends BaseTokenStreamTestCase {
+public class TestNGramTokenizer extends BaseTokenStreamTestCase {
   private StringReader input;
 
   @Override
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTypingFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTypingFilter.java
new file mode 100644
index 0000000..2611ff2
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTypingFilter.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.pattern;
+
+import java.io.IOException;
+import java.util.regex.Pattern;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.pattern.PatternTypingFilter.PatternTypingRule;
+
+/** Test that this filter sets a type for tokens matching patterns defined in a patterns.txt file */
+public class TestPatternTypingFilter extends BaseTokenStreamTestCase {
+
+  /** Test the straight forward cases. When all flags match the token should be dropped */
+  public void testPatterns() throws Exception {
+
+    Token tokenA1 = new Token("One", 0, 2);
+    Token tokenA2 = new Token("401(k)", 4, 9);
+    Token tokenA3 = new Token("two", 11, 13);
+    Token tokenB1 = new Token("three", 15, 19);
+    Token tokenB2 = new Token("401k", 21, 24);
+
+    TokenStream ts = new CannedTokenStream(tokenA1, tokenA2, tokenA3, tokenB1, tokenB2);
+
+    // 2 ^(\d+)\(?([a-z])\)?$ ::: legal2_$1_$2
+    ts =
+        new PatternTypingFilter(
+            ts,
+            new PatternTypingRule(Pattern.compile("^(\\d+)\\(?([a-z])\\)?$"), 2, "legal2_$1_$2"));
+
+    assertTokenStreamContents(
+        ts,
+        new String[] {"One", "401(k)", "two", "three", "401k"},
+        null,
+        null,
+        new String[] {"word", "legal2_401_k", "word", "word", "legal2_401_k"},
+        null,
+        null,
+        null,
+        null,
+        null,
+        false,
+        null,
+        new int[] {0, 2, 0, 0, 2});
+  }
+
+  public void testFirstPatternWins() throws IOException {
+    Token tokenA1 = new Token("One", 0, 2);
+    Token tokenA3 = new Token("forty-two", 11, 13);
+    Token tokenB1 = new Token("4-2", 15, 19);
+
+    TokenStream ts = new CannedTokenStream(tokenA1, tokenA3, tokenB1);
+
+    // 2 ^(\d+)\(?([a-z])\)?$ ::: legal2_$1_$2
+    PatternTypingRule p1 =
+        new PatternTypingRule(Pattern.compile("^(\\d+)-(\\d+)$"), 6, "$1_hnum_$2");
+    PatternTypingRule p2 =
+        new PatternTypingRule(Pattern.compile("^(\\w+)-(\\w+)$"), 2, "$1_hword_$2");
+
+    ts = new PatternTypingFilter(ts, p1, p2); // 101
+
+    assertTokenStreamContents(
+        ts,
+        new String[] {"One", "forty-two", "4-2"},
+        null,
+        null,
+        new String[] {"word", "forty_hword_two", "4_hnum_2"},
+        null,
+        null,
+        null,
+        null,
+        null,
+        false,
+        null,
+        new int[] {0, 2, 6});
+  }
+}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTypingFilterFactory.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTypingFilterFactory.java
new file mode 100644
index 0000000..dce0b59
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTypingFilterFactory.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.analysis.pattern;
+
+import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenFilterFactory;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.StringMockResourceLoader;
+import org.apache.lucene.util.Version;
+
+/** This test just ensures the factory works */
+public class TestPatternTypingFilterFactory extends BaseTokenStreamFactoryTestCase {
+
+  public void testFactory() throws Exception {
+    Token tokenA1 = new Token("One", 0, 2);
+    Token tokenA3 = new Token("forty-two", 11, 13);
+    Token tokenB1 = new Token("4-2", 15, 19);
+
+    TokenStream ts = new CannedTokenStream(tokenA1, tokenA3, tokenB1);
+
+    TokenFilterFactory tokenFilterFactory =
+        tokenFilterFactory(
+            "patternTyping",
+            Version.LATEST,
+            new StringMockResourceLoader(
+                "6 \\b(\\d+)-(\\d+) ::: $1_hnum_$2\n" + "2 \\b(\\w+)-(\\w+) ::: $1_hword_$2"),
+            "patternFile",
+            "patterns.txt");
+
+    ts = tokenFilterFactory.create(ts);
+    assertTokenStreamContents(
+        ts,
+        new String[] {"One", "forty-two", "4-2"},
+        null,
+        null,
+        new String[] {"word", "forty_hword_two", "4_hnum_2"},
+        null,
+        null,
+        null,
+        null,
+        null,
+        false,
+        null,
+        new int[] {0, 2, 6});
+  }
+}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestDelimitedPayloadTokenFilter.java
similarity index 98%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestDelimitedPayloadTokenFilter.java
index 861591d..6ac7219 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestDelimitedPayloadTokenFilter.java
@@ -23,7 +23,7 @@
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.util.BytesRef;
 
-public class DelimitedPayloadTokenFilterTest extends BaseTokenStreamTestCase {
+public class TestDelimitedPayloadTokenFilter extends BaseTokenStreamTestCase {
 
   public void testPayloads() throws Exception {
     String test = "The quick|JJ red|JJ fox|NN jumped|VB over the lazy|JJ brown|JJ dogs|NN";
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestNumericPayloadTokenFilter.java
similarity index 97%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestNumericPayloadTokenFilter.java
index dfd07ee..98f2f40 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestNumericPayloadTokenFilter.java
@@ -26,7 +26,7 @@
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 
-public class NumericPayloadTokenFilterTest extends BaseTokenStreamTestCase {
+public class TestNumericPayloadTokenFilter extends BaseTokenStreamTestCase {
 
   public void test() throws IOException {
     String test = "The quick red fox jumped over the lazy brown dogs";
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestTokenOffsetPayloadTokenFilter.java
similarity index 96%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestTokenOffsetPayloadTokenFilter.java
index 32e87d7..eaef32b 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestTokenOffsetPayloadTokenFilter.java
@@ -22,7 +22,7 @@
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.util.BytesRef;
 
-public class TokenOffsetPayloadTokenFilterTest extends BaseTokenStreamTestCase {
+public class TestTokenOffsetPayloadTokenFilter extends BaseTokenStreamTestCase {
 
   public void test() throws IOException {
     String test = "The quick red fox jumped over the lazy brown dogs";
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestTypeAsPayloadTokenFilter.java
similarity index 97%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestTypeAsPayloadTokenFilter.java
index b5b02a6..2a07c0e 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestTypeAsPayloadTokenFilter.java
@@ -24,7 +24,7 @@
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 
-public class TypeAsPayloadTokenFilterTest extends BaseTokenStreamTestCase {
+public class TestTypeAsPayloadTokenFilter extends BaseTokenStreamTestCase {
 
   public void test() throws IOException {
     String test = "The quick red fox jumped over the lazy brown dogs";
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/TestQueryAutoStopWordAnalyzer.java
similarity index 98%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/query/TestQueryAutoStopWordAnalyzer.java
index c99ff1b..0b1e29b 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/TestQueryAutoStopWordAnalyzer.java
@@ -29,7 +29,7 @@
 import org.apache.lucene.store.ByteBuffersDirectory;
 import org.apache.lucene.store.Directory;
 
-public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
+public class TestQueryAutoStopWordAnalyzer extends BaseTokenStreamTestCase {
   String variedFieldValues[] = {
     "the", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "boring", "dog"
   };
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/FixedShingleFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestFixedShingleFilter.java
similarity index 98%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/FixedShingleFilterTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestFixedShingleFilter.java
index 2b71736..db085f2 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/FixedShingleFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestFixedShingleFilter.java
@@ -25,7 +25,7 @@
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.util.graph.GraphTokenStreamFiniteStrings;
 
-public class FixedShingleFilterTest extends BaseTokenStreamTestCase {
+public class TestFixedShingleFilter extends BaseTokenStreamTestCase {
 
   public void testBiGramFilter() throws IOException {
 
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleAnalyzerWrapper.java
similarity index 99%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleAnalyzerWrapper.java
index 07c36f4..269d48dc 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleAnalyzerWrapper.java
@@ -44,7 +44,7 @@
 import org.apache.lucene.store.Directory;
 
 /** A test class for ShingleAnalyzerWrapper as regards queries and scoring. */
-public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
+public class TestShingleAnalyzerWrapper extends BaseTokenStreamTestCase {
   private Analyzer analyzer;
   private IndexSearcher searcher;
   private IndexReader reader;
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleFilter.java
similarity index 99%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleFilter.java
index 976677f..5a597ef 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleFilter.java
@@ -30,7 +30,7 @@
 import org.apache.lucene.analysis.core.WhitespaceTokenizer;
 import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 
-public class ShingleFilterTest extends BaseTokenStreamTestCase {
+public class TestShingleFilter extends BaseTokenStreamTestCase {
 
   public static final Token[] TEST_TOKEN =
       new Token[] {
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizer.java
similarity index 99%
rename from lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java
rename to lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizer.java
index 834504b..de32cc4 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizer.java
@@ -31,7 +31,7 @@
 import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
 
 /** Basic Tests for {@link WikipediaTokenizer} */
-public class WikipediaTokenizerTest extends BaseTokenStreamTestCase {
+public class TestWikipediaTokenizer extends BaseTokenStreamTestCase {
   protected static final String LINK_PHRASES =
       "click [[link here again]] click [http://lucene.apache.org here again] [[Category:a b c d]]";
 
diff --git a/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/TokenInfoDictionaryTest.java b/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/TestTokenInfoDictionary.java
similarity index 98%
rename from lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/TokenInfoDictionaryTest.java
rename to lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/TestTokenInfoDictionary.java
index 95de540..9555ba0 100644
--- a/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/TokenInfoDictionaryTest.java
+++ b/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/TestTokenInfoDictionary.java
@@ -35,7 +35,7 @@
 import org.apache.lucene.util.fst.IntsRefFSTEnum;
 
 /** Tests of TokenInfoDictionary build tools; run using ant test-tools */
-public class TokenInfoDictionaryTest extends LuceneTestCase {
+public class TestTokenInfoDictionary extends LuceneTestCase {
 
   public void testPut() throws Exception {
     TokenInfoDictionary dict =
diff --git a/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/UserDictionaryTest.java b/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/TestUserDictionary.java
similarity index 98%
rename from lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/UserDictionaryTest.java
rename to lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/TestUserDictionary.java
index 28d5591..3cc5ea6 100644
--- a/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/UserDictionaryTest.java
+++ b/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/TestUserDictionary.java
@@ -22,7 +22,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Test;
 
-public class UserDictionaryTest extends LuceneTestCase {
+public class TestUserDictionary extends LuceneTestCase {
 
   @Test
   public void testLookup() throws IOException {
diff --git a/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/util/UnknownDictionaryTest.java b/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/util/TestUnknownDictionary.java
similarity index 97%
rename from lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/util/UnknownDictionaryTest.java
rename to lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/util/TestUnknownDictionary.java
index 8aa118a..110cdc2 100644
--- a/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/util/UnknownDictionaryTest.java
+++ b/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/util/TestUnknownDictionary.java
@@ -19,7 +19,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Test;
 
-public class UnknownDictionaryTest extends LuceneTestCase {
+public class TestUnknownDictionary extends LuceneTestCase {
   public static final String FILENAME = "unk-tokeninfo-dict.obj";
 
   @Test
diff --git a/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/dict/TokenInfoDictionaryTest.java b/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/dict/TestTokenInfoDictionary.java
similarity index 98%
rename from lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/dict/TokenInfoDictionaryTest.java
rename to lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/dict/TestTokenInfoDictionary.java
index d4083d9..10042c9 100644
--- a/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/dict/TokenInfoDictionaryTest.java
+++ b/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/dict/TestTokenInfoDictionary.java
@@ -34,7 +34,7 @@
 import org.apache.lucene.util.fst.IntsRefFSTEnum;
 
 /** Tests of TokenInfoDictionary build tools; run using ant test-tools */
-public class TokenInfoDictionaryTest extends LuceneTestCase {
+public class TestTokenInfoDictionary extends LuceneTestCase {
 
   public void testPut() throws Exception {
     TokenInfoDictionary dict =
diff --git a/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/dict/UserDictionaryTest.java b/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/dict/TestUserDictionary.java
similarity index 97%
rename from lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/dict/UserDictionaryTest.java
rename to lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/dict/TestUserDictionary.java
index 4a03b11..864d8e8 100644
--- a/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/dict/UserDictionaryTest.java
+++ b/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/dict/TestUserDictionary.java
@@ -23,7 +23,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Test;
 
-public class UserDictionaryTest extends LuceneTestCase {
+public class TestUserDictionary extends LuceneTestCase {
   @Test
   public void testLookup() throws IOException {
     UserDictionary dictionary = TestKoreanTokenizer.readDict();
diff --git a/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/util/UnknownDictionaryTest.java b/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/util/TestUnknownDictionary.java
similarity index 96%
rename from lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/util/UnknownDictionaryTest.java
rename to lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/util/TestUnknownDictionary.java
index 3de038a..10b7dee 100644
--- a/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/util/UnknownDictionaryTest.java
+++ b/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/util/TestUnknownDictionary.java
@@ -19,7 +19,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Test;
 
-public class UnknownDictionaryTest extends LuceneTestCase {
+public class TestUnknownDictionary extends LuceneTestCase {
 
   @Test
   public void testPutCharacterCategory() {
diff --git a/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterTest.java b/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestDoubleMetaphoneFilter.java
similarity index 98%
rename from lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterTest.java
rename to lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestDoubleMetaphoneFilter.java
index ccd1c0c..798c700 100644
--- a/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterTest.java
+++ b/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestDoubleMetaphoneFilter.java
@@ -25,7 +25,7 @@
 import org.apache.lucene.analysis.core.KeywordTokenizer;
 import org.apache.lucene.util.TestUtil;
 
-public class DoubleMetaphoneFilterTest extends BaseTokenStreamTestCase {
+public class TestDoubleMetaphoneFilter extends BaseTokenStreamTestCase {
 
   public void testSize4FalseInject() throws Exception {
     TokenStream stream = whitespaceMockTokenizer("international");
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60FieldInfosFormat.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene60/Lucene60FieldInfosFormat.java
similarity index 97%
rename from lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60FieldInfosFormat.java
rename to lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene60/Lucene60FieldInfosFormat.java
index dd72965..3eb352e 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene60/Lucene60FieldInfosFormat.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene60/Lucene60FieldInfosFormat.java
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.codecs.lucene60;
+package org.apache.lucene.backward_codecs.lucene60;
 
 import java.io.IOException;
 import java.util.Collections;
@@ -311,6 +311,11 @@
     }
   }
 
+  /**
+   * Note: although this format is only used on older versions, we need to keep the write logic in
+   * addition to the read logic. It's possible for field infos on older segments to be written to,
+   * for example as part of in-place doc values updates.
+   */
   @Override
   public void write(
       Directory directory,
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70Codec.java
index b1ee4b4..fae33b0 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70Codec.java
@@ -18,6 +18,7 @@
 
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
+import org.apache.lucene.backward_codecs.lucene60.Lucene60FieldInfosFormat;
 import org.apache.lucene.backward_codecs.lucene60.Lucene60PointsFormat;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.CompoundFormat;
@@ -35,7 +36,6 @@
 import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
-import org.apache.lucene.codecs.lucene60.Lucene60FieldInfosFormat;
 import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat;
 import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat;
 
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80Codec.java
index cd2eb90..6660461 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80Codec.java
@@ -17,6 +17,7 @@
 package org.apache.lucene.backward_codecs.lucene80;
 
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat;
+import org.apache.lucene.backward_codecs.lucene60.Lucene60FieldInfosFormat;
 import org.apache.lucene.backward_codecs.lucene60.Lucene60PointsFormat;
 import org.apache.lucene.backward_codecs.lucene70.Lucene70SegmentInfoFormat;
 import org.apache.lucene.codecs.Codec;
@@ -34,7 +35,6 @@
 import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
-import org.apache.lucene.codecs.lucene60.Lucene60FieldInfosFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80NormsFormat;
 import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat;
 import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat;
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84Codec.java
index f439c36..49383e3 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene84/Lucene84Codec.java
@@ -19,6 +19,7 @@
 import java.util.Objects;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
+import org.apache.lucene.backward_codecs.lucene60.Lucene60FieldInfosFormat;
 import org.apache.lucene.backward_codecs.lucene60.Lucene60PointsFormat;
 import org.apache.lucene.backward_codecs.lucene70.Lucene70SegmentInfoFormat;
 import org.apache.lucene.codecs.Codec;
@@ -37,7 +38,6 @@
 import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
-import org.apache.lucene.codecs.lucene60.Lucene60FieldInfosFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80NormsFormat;
 import org.apache.lucene.codecs.lucene84.Lucene84PostingsFormat;
 import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat;
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene86/Lucene86Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene86/Lucene86Codec.java
index d05b632..7d51c67 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene86/Lucene86Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene86/Lucene86Codec.java
@@ -19,6 +19,7 @@
 
 import java.util.Objects;
 import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat;
+import org.apache.lucene.backward_codecs.lucene60.Lucene60FieldInfosFormat;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.CompoundFormat;
 import org.apache.lucene.codecs.DocValuesFormat;
@@ -35,7 +36,6 @@
 import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
-import org.apache.lucene.codecs.lucene60.Lucene60FieldInfosFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80NormsFormat;
 import org.apache.lucene.codecs.lucene84.Lucene84PostingsFormat;
 import org.apache.lucene.codecs.lucene86.Lucene86PointsFormat;
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87Codec.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87Codec.java
index b6925ff..b254fa6 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87Codec.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene87/Lucene87Codec.java
@@ -18,6 +18,7 @@
 package org.apache.lucene.backward_codecs.lucene87;
 
 import java.util.Objects;
+import org.apache.lucene.backward_codecs.lucene60.Lucene60FieldInfosFormat;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.CompoundFormat;
 import org.apache.lucene.codecs.DocValuesFormat;
@@ -34,7 +35,6 @@
 import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
-import org.apache.lucene.codecs.lucene60.Lucene60FieldInfosFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80DocValuesFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80NormsFormat;
 import org.apache.lucene.codecs.lucene84.Lucene84PostingsFormat;
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene60FieldInfoFormat.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene60/TestLucene60FieldInfosFormat.java
similarity index 79%
rename from lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene60FieldInfoFormat.java
rename to lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene60/TestLucene60FieldInfosFormat.java
index ef39a81..0db5b32 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene60FieldInfoFormat.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene60/TestLucene60FieldInfosFormat.java
@@ -14,17 +14,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.lucene.codecs.lucene50;
+package org.apache.lucene.backward_codecs.lucene60;
 
+import org.apache.lucene.backward_codecs.lucene84.Lucene84RWCodec;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.index.BaseFieldInfoFormatTestCase;
-import org.apache.lucene.util.TestUtil;
 
-/** Tests Lucene60FieldInfoFormat */
-public class TestLucene60FieldInfoFormat extends BaseFieldInfoFormatTestCase {
-
+public class TestLucene60FieldInfosFormat extends BaseFieldInfoFormatTestCase {
   @Override
   protected Codec getCodec() {
-    return TestUtil.getDefaultCodec();
+    return new Lucene84RWCodec();
   }
 }
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestDocMaker.java
similarity index 98%
rename from lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java
rename to lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestDocMaker.java
index 502cc56..33ce32d 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestDocMaker.java
@@ -38,7 +38,7 @@
 import org.apache.lucene.util.IOUtils;
 
 /** Tests the functionality of {@link DocMaker}. */
-public class DocMakerTest extends BenchmarkTestCase {
+public class TestDocMaker extends BenchmarkTestCase {
 
   public static final class OneDocSource extends ContentSource {
 
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/EnwikiContentSourceTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestEnwikiContentSource.java
similarity index 98%
rename from lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/EnwikiContentSourceTest.java
rename to lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestEnwikiContentSource.java
index 7684bbb..c2464b5 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/EnwikiContentSourceTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestEnwikiContentSource.java
@@ -26,7 +26,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Test;
 
-public class EnwikiContentSourceTest extends LuceneTestCase {
+public class TestEnwikiContentSource extends LuceneTestCase {
 
   /** An EnwikiContentSource which works on a String and not files. */
   private static class StringableEnwikiSource extends EnwikiContentSource {
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestLineDocSource.java
similarity index 98%
rename from lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java
rename to lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestLineDocSource.java
index 0365bc6..ad058f9 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestLineDocSource.java
@@ -45,7 +45,7 @@
 import org.apache.lucene.util.IOUtils;
 
 /** Tests the functionality of {@link LineDocSource}. */
-public class LineDocSourceTest extends BenchmarkTestCase {
+public class TestLineDocSource extends BenchmarkTestCase {
 
   private static final CompressorStreamFactory csFactory = new CompressorStreamFactory();
 
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TrecContentSourceTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestTrecContentSource.java
similarity index 99%
rename from lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TrecContentSourceTest.java
rename to lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestTrecContentSource.java
index 0b0fbc8..d6a6a07 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TrecContentSourceTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TestTrecContentSource.java
@@ -31,7 +31,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
-public class TrecContentSourceTest extends LuceneTestCase {
+public class TestTrecContentSource extends LuceneTestCase {
 
   /** A TrecDocMaker which works on a String and not files. */
   private static class StringableTrecSource extends TrecContentSource {
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTaskTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestAddIndexesTask.java
similarity index 98%
rename from lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTaskTest.java
rename to lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestAddIndexesTask.java
index f95b317..841047f 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTaskTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestAddIndexesTask.java
@@ -33,7 +33,7 @@
 import org.junit.BeforeClass;
 
 /** Tests the functionality of {@link AddIndexesTask}. */
-public class AddIndexesTaskTest extends BenchmarkTestCase {
+public class TestAddIndexesTask extends BenchmarkTestCase {
 
   private static Path testDir, inputDir;
 
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CommitIndexTaskTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestCommitIndexTask.java
similarity index 96%
rename from lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CommitIndexTaskTest.java
rename to lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestCommitIndexTask.java
index 6605edd..9f21a68 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CommitIndexTaskTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestCommitIndexTask.java
@@ -24,7 +24,7 @@
 import org.apache.lucene.util.Version;
 
 /** Tests the functionality of {@link CreateIndexTask}. */
-public class CommitIndexTaskTest extends BenchmarkTestCase {
+public class TestCommitIndexTask extends BenchmarkTestCase {
 
   private PerfRunData createPerfRunData() throws Exception {
     Properties props = new Properties();
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTaskTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestCreateIndexTask.java
similarity index 98%
rename from lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTaskTest.java
rename to lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestCreateIndexTask.java
index 4408fd6..b0d51f0 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTaskTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestCreateIndexTask.java
@@ -31,7 +31,7 @@
 import org.apache.lucene.util.Version;
 
 /** Tests the functionality of {@link CreateIndexTask}. */
-public class CreateIndexTaskTest extends BenchmarkTestCase {
+public class TestCreateIndexTask extends BenchmarkTestCase {
 
   private PerfRunData createPerfRunData(String infoStreamValue) throws Exception {
     Properties props = new Properties();
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/PerfTaskTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestPerfTask.java
similarity index 97%
rename from lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/PerfTaskTest.java
rename to lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestPerfTask.java
index 11970df..b2d7411 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/PerfTaskTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestPerfTask.java
@@ -22,7 +22,7 @@
 import org.apache.lucene.benchmark.byTask.utils.Config;
 
 /** Tests the functionality of the abstract {@link PerfTask}. */
-public class PerfTaskTest extends BenchmarkTestCase {
+public class TestPerfTask extends BenchmarkTestCase {
 
   private static final class MyPerfTask extends PerfTask {
 
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/SearchWithSortTaskTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestSearchWithSortTask.java
similarity index 95%
rename from lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/SearchWithSortTaskTest.java
rename to lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestSearchWithSortTask.java
index 2a55128..baab823 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/SearchWithSortTaskTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestSearchWithSortTask.java
@@ -22,7 +22,7 @@
 import org.apache.lucene.benchmark.byTask.utils.Config;
 import org.apache.lucene.search.SortField;
 
-public class SearchWithSortTaskTest extends BenchmarkTestCase {
+public class TestSearchWithSortTask extends BenchmarkTestCase {
 
   public void testSetParams_docField() throws Exception {
     SearchWithSortTask task = new SearchWithSortTask(new PerfRunData(new Config(new Properties())));
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteEnwikiLineDocTaskTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestWriteEnwikiLineDocTask.java
similarity index 97%
rename from lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteEnwikiLineDocTaskTest.java
rename to lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestWriteEnwikiLineDocTask.java
index 4d638ef..a570e45 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteEnwikiLineDocTaskTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestWriteEnwikiLineDocTask.java
@@ -31,7 +31,7 @@
 import org.apache.lucene.document.StringField;
 
 /** Tests the functionality of {@link WriteEnwikiLineDocTask}. */
-public class WriteEnwikiLineDocTaskTest extends BenchmarkTestCase {
+public class TestWriteEnwikiLineDocTask extends BenchmarkTestCase {
 
   // class has to be public so that Class.forName.newInstance() will work
   /** Interleaves category docs with regular docs */
@@ -72,7 +72,7 @@
       throws Exception {
     try (BufferedReader br = Files.newBufferedReader(file, StandardCharsets.UTF_8)) {
       String line = br.readLine();
-      WriteLineDocTaskTest.assertHeaderLine(line);
+      TestWriteLineDocTask.assertHeaderLine(line);
       for (int i = 0; i < n; i++) {
         line = br.readLine();
         assertNotNull(line);
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestWriteLineDocTask.java
similarity index 99%
rename from lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java
rename to lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestWriteLineDocTask.java
index cd969ee..0877f17 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/TestWriteLineDocTask.java
@@ -36,7 +36,7 @@
 import org.apache.lucene.document.StringField;
 
 /** Tests the functionality of {@link WriteLineDocTask}. */
-public class WriteLineDocTaskTest extends BenchmarkTestCase {
+public class TestWriteLineDocTask extends BenchmarkTestCase {
 
   // class has to be public so that Class.forName.newInstance() will work
   public static final class WriteLineDocMaker extends DocMaker {
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/alt/AltPackageTaskTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/alt/TestAltPackageTask.java
similarity index 96%
rename from lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/alt/AltPackageTaskTest.java
rename to lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/alt/TestAltPackageTask.java
index ca6e4f8..c6a3a85 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/alt/AltPackageTaskTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/alt/TestAltPackageTask.java
@@ -20,7 +20,7 @@
 import org.apache.lucene.benchmark.byTask.Benchmark;
 
 /** Tests that tasks in alternate packages are found. */
-public class AltPackageTaskTest extends BenchmarkTestCase {
+public class TestAltPackageTask extends BenchmarkTestCase {
 
   /** Benchmark should fail loading the algorithm when alt is not specified */
   public void testWithoutAlt() throws Exception {
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/utils/StreamUtilsTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/utils/TestStreamUtils.java
similarity index 98%
rename from lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/utils/StreamUtilsTest.java
rename to lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/utils/TestStreamUtils.java
index 8d00f28..a3953f4 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/utils/StreamUtilsTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/utils/TestStreamUtils.java
@@ -31,7 +31,7 @@
 import org.junit.Before;
 import org.junit.Test;
 
-public class StreamUtilsTest extends BenchmarkTestCase {
+public class TestStreamUtils extends BenchmarkTestCase {
   private static final String TEXT = "Some-Text...";
   private Path testDir;
 
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/BM25NBClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/TestBM25NBClassifier.java
similarity index 98%
rename from lucene/classification/src/test/org/apache/lucene/classification/BM25NBClassifierTest.java
rename to lucene/classification/src/test/org/apache/lucene/classification/TestBM25NBClassifier.java
index cd0e6bd..f1220d2 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/BM25NBClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/TestBM25NBClassifier.java
@@ -33,7 +33,7 @@
 import org.junit.Test;
 
 /** Tests for {@link BM25NBClassifier} */
-public class BM25NBClassifierTest extends ClassificationTestBase<BytesRef> {
+public class TestBM25NBClassifier extends ClassificationTestBase<BytesRef> {
 
   @Test
   public void testBasicUsage() throws Exception {
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/BooleanPerceptronClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/TestBooleanPerceptronClassifier.java
similarity index 98%
rename from lucene/classification/src/test/org/apache/lucene/classification/BooleanPerceptronClassifierTest.java
rename to lucene/classification/src/test/org/apache/lucene/classification/TestBooleanPerceptronClassifier.java
index 2745bd8..ba6a485 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/BooleanPerceptronClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/TestBooleanPerceptronClassifier.java
@@ -28,7 +28,7 @@
 import org.junit.Test;
 
 /** Testcase for {@link org.apache.lucene.classification.BooleanPerceptronClassifier} */
-public class BooleanPerceptronClassifierTest extends ClassificationTestBase<Boolean> {
+public class TestBooleanPerceptronClassifier extends ClassificationTestBase<Boolean> {
 
   @Test
   public void testBasicUsage() throws Exception {
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/TestCachingNaiveBayesClassifier.java
similarity index 98%
rename from lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java
rename to lucene/classification/src/test/org/apache/lucene/classification/TestCachingNaiveBayesClassifier.java
index 3b16de8..e69e10f 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/TestCachingNaiveBayesClassifier.java
@@ -33,7 +33,7 @@
 import org.junit.Test;
 
 /** Testcase for {@link org.apache.lucene.classification.CachingNaiveBayesClassifier} */
-public class CachingNaiveBayesClassifierTest extends ClassificationTestBase<BytesRef> {
+public class TestCachingNaiveBayesClassifier extends ClassificationTestBase<BytesRef> {
 
   @Test
   public void testBasicUsage() throws Exception {
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/KNearestFuzzyClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/TestKNearestFuzzyClassifier.java
similarity index 98%
rename from lucene/classification/src/test/org/apache/lucene/classification/KNearestFuzzyClassifierTest.java
rename to lucene/classification/src/test/org/apache/lucene/classification/TestKNearestFuzzyClassifier.java
index 34a35cc..2ba1c08 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/KNearestFuzzyClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/TestKNearestFuzzyClassifier.java
@@ -28,7 +28,7 @@
 import org.junit.Test;
 
 /** Tests for {@link KNearestFuzzyClassifier} */
-public class KNearestFuzzyClassifierTest extends ClassificationTestBase<BytesRef> {
+public class TestKNearestFuzzyClassifier extends ClassificationTestBase<BytesRef> {
 
   @Test
   public void testBasicUsage() throws Exception {
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/KNearestNeighborClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/TestKNearestNeighborClassifier.java
similarity index 98%
rename from lucene/classification/src/test/org/apache/lucene/classification/KNearestNeighborClassifierTest.java
rename to lucene/classification/src/test/org/apache/lucene/classification/TestKNearestNeighborClassifier.java
index 8651ecb..95e4f50 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/KNearestNeighborClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/TestKNearestNeighborClassifier.java
@@ -33,7 +33,7 @@
 import org.junit.Test;
 
 /** Testcase for {@link KNearestNeighborClassifier} */
-public class KNearestNeighborClassifierTest extends ClassificationTestBase<BytesRef> {
+public class TestKNearestNeighborClassifier extends ClassificationTestBase<BytesRef> {
 
   @Test
   public void testBasicUsage() throws Exception {
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/TestSimpleNaiveBayesClassifier.java
similarity index 98%
rename from lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
rename to lucene/classification/src/test/org/apache/lucene/classification/TestSimpleNaiveBayesClassifier.java
index 8400cfb..1853890 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/TestSimpleNaiveBayesClassifier.java
@@ -33,7 +33,7 @@
 import org.junit.Test;
 
 /** Testcase for {@link SimpleNaiveBayesClassifier} */
-public class SimpleNaiveBayesClassifierTest extends ClassificationTestBase<BytesRef> {
+public class TestSimpleNaiveBayesClassifier extends ClassificationTestBase<BytesRef> {
 
   @Test
   public void testBasicUsage() throws Exception {
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/document/TestKNearestNeighborDocumentClassifier.java
similarity index 98%
rename from lucene/classification/src/test/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifierTest.java
rename to lucene/classification/src/test/org/apache/lucene/classification/document/TestKNearestNeighborDocumentClassifier.java
index 58a55a3..1432fb9 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/document/KNearestNeighborDocumentClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/document/TestKNearestNeighborDocumentClassifier.java
@@ -23,7 +23,7 @@
 import org.junit.Test;
 
 /** Tests for {@link org.apache.lucene.classification.KNearestNeighborClassifier} */
-public class KNearestNeighborDocumentClassifierTest
+public class TestKNearestNeighborDocumentClassifier
     extends DocumentClassificationTestBase<BytesRef> {
 
   @Test
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/document/SimpleNaiveBayesDocumentClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/document/TestSimpleNaiveBayesDocumentClassifier.java
similarity index 98%
rename from lucene/classification/src/test/org/apache/lucene/classification/document/SimpleNaiveBayesDocumentClassifierTest.java
rename to lucene/classification/src/test/org/apache/lucene/classification/document/TestSimpleNaiveBayesDocumentClassifier.java
index e45bdfa..8e49798 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/document/SimpleNaiveBayesDocumentClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/document/TestSimpleNaiveBayesDocumentClassifier.java
@@ -20,7 +20,7 @@
 import org.junit.Test;
 
 /** Tests for {@link org.apache.lucene.classification.SimpleNaiveBayesClassifier} */
-public class SimpleNaiveBayesDocumentClassifierTest
+public class TestSimpleNaiveBayesDocumentClassifier
     extends DocumentClassificationTestBase<BytesRef> {
 
   @Test
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/utils/ConfusionMatrixGeneratorTest.java b/lucene/classification/src/test/org/apache/lucene/classification/utils/TestConfusionMatrixGenerator.java
similarity index 98%
rename from lucene/classification/src/test/org/apache/lucene/classification/utils/ConfusionMatrixGeneratorTest.java
rename to lucene/classification/src/test/org/apache/lucene/classification/utils/TestConfusionMatrixGenerator.java
index e4b54b7..ba03ec3 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/utils/ConfusionMatrixGeneratorTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/utils/TestConfusionMatrixGenerator.java
@@ -33,7 +33,7 @@
 import org.junit.Test;
 
 /** Tests for {@link ConfusionMatrixGenerator} */
-public class ConfusionMatrixGeneratorTest extends ClassificationTestBase<Object> {
+public class TestConfusionMatrixGenerator extends ClassificationTestBase<Object> {
 
   @Test
   public void testGetConfusionMatrix() throws Exception {
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java b/lucene/classification/src/test/org/apache/lucene/classification/utils/TestDataSplitter.java
similarity index 98%
rename from lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java
rename to lucene/classification/src/test/org/apache/lucene/classification/utils/TestDataSplitter.java
index 3b17da5..98265a3 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/utils/TestDataSplitter.java
@@ -39,7 +39,7 @@
 
 /** Testcase for {@link org.apache.lucene.classification.utils.DatasetSplitter} */
 @LuceneTestCase.SuppressCodecs("SimpleText")
-public class DataSplitterTest extends LuceneTestCase {
+public class TestDataSplitter extends LuceneTestCase {
 
   private LeafReader originalIndex;
   private RandomIndexWriter indexWriter;
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java b/lucene/classification/src/test/org/apache/lucene/classification/utils/TestDocToDoubleVectorUtils.java
similarity index 97%
rename from lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java
rename to lucene/classification/src/test/org/apache/lucene/classification/utils/TestDocToDoubleVectorUtils.java
index 4392b24..7db2393 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/utils/TestDocToDoubleVectorUtils.java
@@ -34,7 +34,7 @@
 import org.junit.Test;
 
 /** Testcase for {@link org.apache.lucene.classification.utils.DocToDoubleVectorUtils} */
-public class DocToDoubleVectorUtilsTest extends LuceneTestCase {
+public class TestDocToDoubleVectorUtils extends LuceneTestCase {
 
   private IndexReader index;
   private Directory dir;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene60/package-info.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene60/package-info.java
deleted file mode 100644
index 7db4fa6..0000000
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene60/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Components from the Lucene 6.0 index format. See {@link org.apache.lucene.codecs.lucene90} for an
- * overview of the current index format.
- */
-package org.apache.lucene.codecs.lucene60;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/package-info.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/package-info.java
index 226da1f..6bc4f5d 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/package-info.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/package-info.java
@@ -240,7 +240,7 @@
  * systems that frequently run out of file handles.</td>
  * </tr>
  * <tr>
- * <td>{@link org.apache.lucene.codecs.lucene60.Lucene60FieldInfosFormat Fields}</td>
+ * <td>{@link org.apache.lucene.codecs.lucene90.Lucene90FieldInfosFormat Fields}</td>
  * <td>.fnm</td>
  * <td>Stores information about the fields</td>
  * </tr>
diff --git a/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java b/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java
index d0a4dbd..bdf085d 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java
@@ -231,10 +231,15 @@
         optionalScorers.add(scorer.get(leadCost));
       }
 
-      if (scoreMode == ScoreMode.TOP_SCORES) {
-        return new WANDScorer(weight, optionalScorers, minShouldMatch);
-      } else if (minShouldMatch > 1) {
-        return new MinShouldMatchSumScorer(weight, optionalScorers, minShouldMatch);
+      // Technically speaking, WANDScorer should be able to handle the following 3 conditions now
+      // 1. Any ScoreMode (with scoring or not)
+      // 2. Any minCompetitiveScore ( >= 0 )
+      // 3. Any minShouldMatch ( >= 0 )
+      //
+      // However, as WANDScorer uses more complex algorithm and data structure, we would like to
+      // still use DisjunctionSumScorer to handle exhaustive pure disjunctions, which may be faster
+      if (scoreMode == ScoreMode.TOP_SCORES || minShouldMatch > 1) {
+        return new WANDScorer(weight, optionalScorers, minShouldMatch, scoreMode);
       } else {
         return new DisjunctionSumScorer(weight, optionalScorers, scoreMode);
       }
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
index 9aa25b8..d1d0af2 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
@@ -61,7 +61,7 @@
     }
   }
 
-  // See MinShouldMatchSumScorer for an explanation
+  // See WANDScorer for an explanation
   private static long cost(Collection<BulkScorer> scorers, int minShouldMatch) {
     final PriorityQueue<BulkScorer> pq =
         new PriorityQueue<BulkScorer>(scorers.size() - minShouldMatch + 1) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
deleted file mode 100644
index 574fd1a..0000000
--- a/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
+++ /dev/null
@@ -1,382 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.search;
-
-import static org.apache.lucene.search.DisiPriorityQueue.leftNode;
-import static org.apache.lucene.search.DisiPriorityQueue.parentNode;
-import static org.apache.lucene.search.DisiPriorityQueue.rightNode;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-/**
- * A {@link Scorer} for {@link BooleanQuery} when {@link
- * BooleanQuery.Builder#setMinimumNumberShouldMatch(int) minShouldMatch} is between 2 and the total
- * number of clauses.
- *
- * <p>This implementation keeps sub scorers in 3 different places: - lead: a linked list of scorer
- * that are positioned on the desired doc ID - tail: a heap that contains at most minShouldMatch - 1
- * scorers that are behind the desired doc ID. These scorers are ordered by cost so that we can
- * advance the least costly ones first. - head: a heap that contains scorers which are beyond the
- * desired doc ID, ordered by doc ID in order to move quickly to the next candidate.
- *
- * <p>Finding the next match consists of first setting the desired doc ID to the least entry in
- * 'head' and then advance 'tail' until there is a match.
- */
-final class MinShouldMatchSumScorer extends Scorer {
-
-  final int minShouldMatch;
-
-  // list of scorers which 'lead' the iteration and are currently
-  // positioned on 'doc'
-  DisiWrapper lead;
-  int doc; // current doc ID of the leads
-  int freq; // number of scorers on the desired doc ID
-
-  // priority queue of scorers that are too advanced compared to the current
-  // doc. Ordered by doc ID.
-  final DisiPriorityQueue head;
-
-  // priority queue of scorers which are behind the current doc.
-  // Ordered by cost.
-  final DisiWrapper[] tail;
-  int tailSize;
-
-  final long cost;
-
-  MinShouldMatchSumScorer(Weight weight, Collection<Scorer> scorers, int minShouldMatch) {
-    super(weight);
-
-    if (minShouldMatch > scorers.size()) {
-      throw new IllegalArgumentException("minShouldMatch should be <= the number of scorers");
-    }
-    if (minShouldMatch < 1) {
-      throw new IllegalArgumentException("minShouldMatch should be >= 1");
-    }
-
-    this.minShouldMatch = minShouldMatch;
-    this.doc = -1;
-
-    head = new DisiPriorityQueue(scorers.size() - minShouldMatch + 1);
-    // there can be at most minShouldMatch - 1 scorers beyond the current position
-    // otherwise we might be skipping over matching documents
-    tail = new DisiWrapper[minShouldMatch - 1];
-
-    for (Scorer scorer : scorers) {
-      addLead(new DisiWrapper(scorer));
-    }
-
-    this.cost =
-        ScorerUtil.costWithMinShouldMatch(
-            scorers.stream().map(Scorer::iterator).mapToLong(DocIdSetIterator::cost),
-            scorers.size(),
-            minShouldMatch);
-  }
-
-  @Override
-  public final Collection<ChildScorable> getChildren() throws IOException {
-    List<ChildScorable> matchingChildren = new ArrayList<>();
-    updateFreq();
-    for (DisiWrapper s = lead; s != null; s = s.next) {
-      matchingChildren.add(new ChildScorable(s.scorer, "SHOULD"));
-    }
-    return matchingChildren;
-  }
-
-  @Override
-  public DocIdSetIterator iterator() {
-    return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator());
-  }
-
-  @Override
-  public TwoPhaseIterator twoPhaseIterator() {
-    DocIdSetIterator approximation =
-        new DocIdSetIterator() {
-
-          @Override
-          public int docID() {
-            assert doc == lead.doc;
-            return doc;
-          }
-
-          @Override
-          public int nextDoc() throws IOException {
-            // We are moving to the next doc ID, so scorers in 'lead' need to go in
-            // 'tail'. If there is not enough space in 'tail', then we take the least
-            // costly scorers and advance them.
-            for (DisiWrapper s = lead; s != null; s = s.next) {
-              final DisiWrapper evicted = insertTailWithOverFlow(s);
-              if (evicted != null) {
-                if (evicted.doc == doc) {
-                  evicted.doc = evicted.iterator.nextDoc();
-                } else {
-                  evicted.doc = evicted.iterator.advance(doc + 1);
-                }
-                head.add(evicted);
-              }
-            }
-
-            setDocAndFreq();
-            // It would be correct to return doNextCandidate() at this point but if you
-            // call nextDoc as opposed to advance, it probably means that you really
-            // need the next match. Returning 'doc' here would lead to a similar
-            // iteration over sub postings overall except that the decision making would
-            // happen at a higher level where more abstractions are involved and
-            // benchmarks suggested it causes a significant performance hit.
-            return doNext();
-          }
-
-          @Override
-          public int advance(int target) throws IOException {
-            // Same logic as in nextDoc
-            for (DisiWrapper s = lead; s != null; s = s.next) {
-              final DisiWrapper evicted = insertTailWithOverFlow(s);
-              if (evicted != null) {
-                evicted.doc = evicted.iterator.advance(target);
-                head.add(evicted);
-              }
-            }
-
-            // But this time there might also be scorers in 'head' behind the desired
-            // target so we need to do the same thing that we did on 'lead' on 'head'
-            DisiWrapper headTop = head.top();
-            while (headTop.doc < target) {
-              final DisiWrapper evicted = insertTailWithOverFlow(headTop);
-              // We know that the tail is full since it contains at most
-              // minShouldMatch - 1 entries and we just moved at least minShouldMatch
-              // entries to it, so evicted is not null
-              evicted.doc = evicted.iterator.advance(target);
-              headTop = head.updateTop(evicted);
-            }
-
-            setDocAndFreq();
-            return doNextCandidate();
-          }
-
-          @Override
-          public long cost() {
-            return cost;
-          }
-        };
-    return new TwoPhaseIterator(approximation) {
-
-      @Override
-      public boolean matches() throws IOException {
-        while (freq < minShouldMatch) {
-          assert freq > 0;
-          if (freq + tailSize >= minShouldMatch) {
-            // a match on doc is still possible, try to
-            // advance scorers from the tail
-            advanceTail();
-          } else {
-            return false;
-          }
-        }
-        return true;
-      }
-
-      @Override
-      public float matchCost() {
-        // maximum number of scorer that matches() might advance
-        return tail.length;
-      }
-    };
-  }
-
-  private void addLead(DisiWrapper lead) {
-    lead.next = this.lead;
-    this.lead = lead;
-    freq += 1;
-  }
-
-  private void pushBackLeads() throws IOException {
-    for (DisiWrapper s = lead; s != null; s = s.next) {
-      addTail(s);
-    }
-  }
-
-  private void advanceTail(DisiWrapper top) throws IOException {
-    top.doc = top.iterator.advance(doc);
-    if (top.doc == doc) {
-      addLead(top);
-    } else {
-      head.add(top);
-    }
-  }
-
-  private void advanceTail() throws IOException {
-    final DisiWrapper top = popTail();
-    advanceTail(top);
-  }
-
-  /** Reinitializes head, freq and doc from 'head' */
-  private void setDocAndFreq() {
-    assert head.size() > 0;
-
-    // The top of `head` defines the next potential match
-    // pop all documents which are on this doc
-    lead = head.pop();
-    lead.next = null;
-    freq = 1;
-    doc = lead.doc;
-    while (head.size() > 0 && head.top().doc == doc) {
-      addLead(head.pop());
-    }
-  }
-
-  /** Advance tail to the lead until there is a match. */
-  private int doNext() throws IOException {
-    while (freq < minShouldMatch) {
-      assert freq > 0;
-      if (freq + tailSize >= minShouldMatch) {
-        // a match on doc is still possible, try to
-        // advance scorers from the tail
-        advanceTail();
-      } else {
-        // no match on doc is possible anymore, move to the next potential match
-        pushBackLeads();
-        setDocAndFreq();
-      }
-    }
-
-    return doc;
-  }
-
-  /**
-   * Move iterators to the tail until the cumulated size of lead+tail is greater than or equal to
-   * minShouldMath
-   */
-  private int doNextCandidate() throws IOException {
-    while (freq + tailSize < minShouldMatch) {
-      // no match on doc is possible, move to the next potential match
-      pushBackLeads();
-      setDocAndFreq();
-    }
-
-    return doc;
-  }
-
-  /** Advance all entries from the tail to know about all matches on the current doc. */
-  private void updateFreq() throws IOException {
-    assert freq >= minShouldMatch;
-    // we return the next doc when there are minShouldMatch matching clauses
-    // but some of the clauses in 'tail' might match as well
-    // in general we want to advance least-costly clauses first in order to
-    // skip over non-matching documents as fast as possible. However here,
-    // we are advancing everything anyway so iterating over clauses in
-    // (roughly) cost-descending order might help avoid some permutations in
-    // the head heap
-    for (int i = tailSize - 1; i >= 0; --i) {
-      advanceTail(tail[i]);
-    }
-    tailSize = 0;
-  }
-
-  @Override
-  public float score() throws IOException {
-    // we need to know about all matches
-    updateFreq();
-    double score = 0;
-    for (DisiWrapper s = lead; s != null; s = s.next) {
-      score += s.scorer.score();
-    }
-    return (float) score;
-  }
-
-  @Override
-  public float getMaxScore(int upTo) throws IOException {
-    // TODO: implement but be careful about floating-point errors.
-    return Float.POSITIVE_INFINITY;
-  }
-
-  @Override
-  public int docID() {
-    assert doc == lead.doc;
-    return doc;
-  }
-
-  /** Insert an entry in 'tail' and evict the least-costly scorer if full. */
-  private DisiWrapper insertTailWithOverFlow(DisiWrapper s) {
-    if (tailSize < tail.length) {
-      addTail(s);
-      return null;
-    } else if (tail.length >= 1) {
-      final DisiWrapper top = tail[0];
-      if (top.cost < s.cost) {
-        tail[0] = s;
-        downHeapCost(tail, tailSize);
-        return top;
-      }
-    }
-    return s;
-  }
-
-  /** Add an entry to 'tail'. Fails if over capacity. */
-  private void addTail(DisiWrapper s) {
-    tail[tailSize] = s;
-    upHeapCost(tail, tailSize);
-    tailSize += 1;
-  }
-
-  /** Pop the least-costly scorer from 'tail'. */
-  private DisiWrapper popTail() {
-    assert tailSize > 0;
-    final DisiWrapper result = tail[0];
-    tail[0] = tail[--tailSize];
-    downHeapCost(tail, tailSize);
-    return result;
-  }
-
-  /** Heap helpers */
-  private static void upHeapCost(DisiWrapper[] heap, int i) {
-    final DisiWrapper node = heap[i];
-    final long nodeCost = node.cost;
-    int j = parentNode(i);
-    while (j >= 0 && nodeCost < heap[j].cost) {
-      heap[i] = heap[j];
-      i = j;
-      j = parentNode(j);
-    }
-    heap[i] = node;
-  }
-
-  private static void downHeapCost(DisiWrapper[] heap, int size) {
-    int i = 0;
-    final DisiWrapper node = heap[0];
-    int j = leftNode(i);
-    if (j < size) {
-      int k = rightNode(j);
-      if (k < size && heap[k].cost < heap[j].cost) {
-        j = k;
-      }
-      if (heap[j].cost < node.cost) {
-        do {
-          heap[i] = heap[j];
-          i = j;
-          j = leftNode(i);
-          k = rightNode(j);
-          if (k < size && heap[k].cost < heap[j].cost) {
-            j = k;
-          }
-        } while (j < size && heap[j].cost < node.cost);
-        heap[i] = node;
-      }
-    }
-  }
-}
diff --git a/lucene/core/src/java/org/apache/lucene/search/WANDScorer.java b/lucene/core/src/java/org/apache/lucene/search/WANDScorer.java
index b1ed3bf..f33af6b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/WANDScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/WANDScorer.java
@@ -31,13 +31,25 @@
  * This implements the WAND (Weak AND) algorithm for dynamic pruning described in "Efficient Query
  * Evaluation using a Two-Level Retrieval Process" by Broder, Carmel, Herscovici, Soffer and Zien.
  * Enhanced with techniques described in "Faster Top-k Document Retrieval Using Block-Max Indexes"
- * by Ding and Suel. This scorer maintains a feedback loop with the collector in order to know at
- * any time the minimum score that is required in order for a hit to be competitive. Then it
- * leverages the {@link Scorer#getMaxScore(int) max score} from each scorer in order to know when it
- * may call {@link DocIdSetIterator#advance} rather than {@link DocIdSetIterator#nextDoc} to move to
- * the next competitive hit. Implementation is similar to {@link MinShouldMatchSumScorer} except
- * that instead of enforcing that {@code freq >= minShouldMatch}, we enforce that {@code ∑ max_score
- * >= minCompetitiveScore}.
+ * by Ding and Suel. For scoreMode == {@link ScoreMode#TOP_SCORES}, this scorer maintains a feedback
+ * loop with the collector in order to know at any time the minimum score that is required in order
+ * for a hit to be competitive.
+ *
+ * <p>The implementation supports both minCompetitiveScore by enforce that {@code ∑ max_score >=
+ * minCompetitiveScore}, and minShouldMatch by enforcing {@code freq >= minShouldMatch}. It keeps
+ * sub scorers in 3 different places: - tail: a heap that contains scorers that are behind the
+ * desired doc ID. These scorers are ordered by cost so that we can advance the least costly ones
+ * first. - lead: a linked list of scorer that are positioned on the desired doc ID - head: a heap
+ * that contains scorers which are beyond the desired doc ID, ordered by doc ID in order to move
+ * quickly to the next candidate.
+ *
+ * <p>When scoreMode == {@link ScoreMode#TOP_SCORES}, it leverages the {@link
+ * Scorer#getMaxScore(int) max score} from each scorer in order to know when it may call {@link
+ * DocIdSetIterator#advance} rather than {@link DocIdSetIterator#nextDoc} to move to the next
+ * competitive hit. When scoreMode != {@link ScoreMode#TOP_SCORES}, block-max scoring related logic
+ * is skipped. Finding the next match consists of first setting the desired doc ID to the least
+ * entry in 'head', and then advance 'tail' until there is a match, by meeting the configured {@code
+ * freq >= minShouldMatch} and / or {@code ∑ max_score >= minCompetitiveScore} requirements.
  */
 final class WANDScorer extends Scorer {
 
@@ -134,7 +146,10 @@
   final int minShouldMatch;
   int freq;
 
-  WANDScorer(Weight weight, Collection<Scorer> scorers, int minShouldMatch) throws IOException {
+  final ScoreMode scoreMode;
+
+  WANDScorer(Weight weight, Collection<Scorer> scorers, int minShouldMatch, ScoreMode scoreMode)
+      throws IOException {
     super(weight);
 
     if (minShouldMatch >= scorers.size()) {
@@ -149,23 +164,32 @@
     this.doc = -1;
     this.upTo = -1; // will be computed on the first call to nextDoc/advance
 
+    this.scoreMode = scoreMode;
+
     head = new DisiPriorityQueue(scorers.size());
     // there can be at most num_scorers - 1 scorers beyond the current position
     tail = new DisiWrapper[scorers.size()];
 
-    OptionalInt scalingFactor = OptionalInt.empty();
-    for (Scorer scorer : scorers) {
-      scorer.advanceShallow(0);
-      float maxScore = scorer.getMaxScore(DocIdSetIterator.NO_MORE_DOCS);
-      if (maxScore != 0 && Float.isFinite(maxScore)) {
-        // 0 and +Infty should not impact the scale
-        scalingFactor =
-            OptionalInt.of(
-                Math.min(scalingFactor.orElse(Integer.MAX_VALUE), scalingFactor(maxScore)));
+    if (this.scoreMode == ScoreMode.TOP_SCORES) {
+      OptionalInt scalingFactor = OptionalInt.empty();
+      for (Scorer scorer : scorers) {
+        scorer.advanceShallow(0);
+        float maxScore = scorer.getMaxScore(DocIdSetIterator.NO_MORE_DOCS);
+        if (maxScore != 0 && Float.isFinite(maxScore)) {
+          // 0 and +Infty should not impact the scale
+          scalingFactor =
+              OptionalInt.of(
+                  Math.min(scalingFactor.orElse(Integer.MAX_VALUE), scalingFactor(maxScore)));
+        }
       }
+
+      // Use a scaling factor of 0 if all max scores are either 0 or +Infty
+      this.scalingFactor = scalingFactor.orElse(0);
+      this.maxScorePropagator = new MaxScoreSumPropagator(scorers);
+    } else {
+      this.scalingFactor = 0;
+      this.maxScorePropagator = null;
     }
-    // Use a scaling factor of 0 if all max scores are either 0 or +Infty
-    this.scalingFactor = scalingFactor.orElse(0);
 
     for (Scorer scorer : scorers) {
       addLead(new DisiWrapper(scorer));
@@ -176,33 +200,34 @@
             scorers.stream().map(Scorer::iterator).mapToLong(DocIdSetIterator::cost),
             scorers.size(),
             minShouldMatch);
-    this.maxScorePropagator = new MaxScoreSumPropagator(scorers);
   }
 
   // returns a boolean so that it can be called from assert
   // the return value is useless: it always returns true
   private boolean ensureConsistent() {
-    long maxScoreSum = 0;
-    for (int i = 0; i < tailSize; ++i) {
-      assert tail[i].doc < doc;
-      maxScoreSum = Math.addExact(maxScoreSum, tail[i].maxScore);
-    }
-    assert maxScoreSum == tailMaxScore : maxScoreSum + " " + tailMaxScore;
+    if (scoreMode == ScoreMode.TOP_SCORES) {
+      long maxScoreSum = 0;
+      for (int i = 0; i < tailSize; ++i) {
+        assert tail[i].doc < doc;
+        maxScoreSum = Math.addExact(maxScoreSum, tail[i].maxScore);
+      }
+      assert maxScoreSum == tailMaxScore : maxScoreSum + " " + tailMaxScore;
 
-    maxScoreSum = 0;
-    for (DisiWrapper w = lead; w != null; w = w.next) {
-      assert w.doc == doc;
-      maxScoreSum = Math.addExact(maxScoreSum, w.maxScore);
+      maxScoreSum = 0;
+      for (DisiWrapper w = lead; w != null; w = w.next) {
+        assert w.doc == doc;
+        maxScoreSum = Math.addExact(maxScoreSum, w.maxScore);
+      }
+      assert maxScoreSum == leadMaxScore : maxScoreSum + " " + leadMaxScore;
+
+      assert minCompetitiveScore == 0 || tailMaxScore < minCompetitiveScore;
+      assert doc <= upTo;
     }
-    assert maxScoreSum == leadMaxScore : maxScoreSum + " " + leadMaxScore;
 
     for (DisiWrapper w : head) {
       assert w.doc > doc;
     }
 
-    assert minCompetitiveScore == 0 || tailMaxScore < minCompetitiveScore;
-    assert doc <= upTo;
-
     return true;
   }
 
@@ -210,6 +235,8 @@
   public void setMinCompetitiveScore(float minScore) throws IOException {
     // Let this disjunction know about the new min score so that it can skip
     // over clauses that produce low scores.
+    assert scoreMode == ScoreMode.TOP_SCORES
+        : "minCompetitiveScore can only be set for ScoreMode.TOP_SCORES, but got: " + scoreMode;
     assert minScore >= 0;
     long scaledMinScore = scaleMinScore(minScore, scalingFactor);
     assert scaledMinScore >= minCompetitiveScore;
@@ -421,7 +448,9 @@
       }
     }
 
-    assert upTo == DocIdSetIterator.NO_MORE_DOCS || (head.size() > 0 && head.top().doc <= upTo);
+    assert (head.size() == 0 && upTo == DocIdSetIterator.NO_MORE_DOCS)
+        || (head.size() > 0 && head.top().doc <= upTo);
+    assert upTo >= target;
   }
 
   /**
@@ -429,16 +458,18 @@
    * 'lead'.
    */
   private void moveToNextCandidate(int target) throws IOException {
-    // Update score bounds if necessary so
-    updateMaxScoresIfNecessary(target);
-    assert upTo >= target;
+    if (scoreMode == ScoreMode.TOP_SCORES) {
+      // Update score bounds if necessary so
+      updateMaxScoresIfNecessary(target);
+      assert upTo >= target;
 
-    // updateMaxScores tries to move forward until a block with matches is found
-    // so if the head is empty it means there are no matches at all anymore
-    if (head.size() == 0) {
-      assert upTo == DocIdSetIterator.NO_MORE_DOCS;
-      doc = DocIdSetIterator.NO_MORE_DOCS;
-      return;
+      // updateMaxScores tries to move forward until a block with matches is found
+      // so if the head is empty it means there are no matches at all anymore
+      if (head.size() == 0) {
+        assert upTo == DocIdSetIterator.NO_MORE_DOCS;
+        doc = DocIdSetIterator.NO_MORE_DOCS;
+        return;
+      }
     }
 
     // The top of `head` defines the next potential match
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
index beba2ed..4710075 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
@@ -240,7 +240,7 @@
       assertEquals(
           "ConjunctionScorer\n"
               + "    MUST ConstantScoreScorer\n"
-              + "    MUST MinShouldMatchSumScorer\n"
+              + "    MUST WANDScorer\n"
               + "            SHOULD TermScorer body:crawler\n"
               + "            SHOULD TermScorer body:web\n"
               + "            SHOULD TermScorer body:nutch",
diff --git a/lucene/core/src/test/org/apache/lucene/search/FuzzyTermOnShortTermsTest.java b/lucene/core/src/test/org/apache/lucene/search/TestFuzzyTermOnShortTerms.java
similarity index 97%
rename from lucene/core/src/test/org/apache/lucene/search/FuzzyTermOnShortTermsTest.java
rename to lucene/core/src/test/org/apache/lucene/search/TestFuzzyTermOnShortTerms.java
index abef8b4..b60d70c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/FuzzyTermOnShortTermsTest.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFuzzyTermOnShortTerms.java
@@ -31,7 +31,7 @@
 import org.apache.lucene.util.TestUtil;
 import org.junit.Test;
 
-public class FuzzyTermOnShortTermsTest extends LuceneTestCase {
+public class TestFuzzyTermOnShortTerms extends LuceneTestCase {
   private static final String FIELD = "field";
 
   @Test
diff --git a/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java b/lucene/core/src/test/org/apache/lucene/search/TestTermInSetQuery.java
similarity index 99%
rename from lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java
rename to lucene/core/src/test/org/apache/lucene/search/TestTermInSetQuery.java
index 0ce51ec..8ae811d 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTermInSetQuery.java
@@ -46,7 +46,7 @@
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.automaton.ByteRunAutomaton;
 
-public class TermInSetQueryTest extends LuceneTestCase {
+public class TestTermInSetQuery extends LuceneTestCase {
 
   public void testDuel() throws IOException {
     final int iters = atLeast(2);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestWANDScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestWANDScorer.java
index c9381fe..5c92cfd 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestWANDScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestWANDScorer.java
@@ -312,6 +312,57 @@
     }
   }
 
+  public void testBasicsWithDisjunctionAndMinShouldMatchAndNonScoringMode() throws Exception {
+    try (Directory dir = newDirectory()) {
+      try (IndexWriter w =
+          new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(newLogMergePolicy()))) {
+        for (String[] values :
+            Arrays.asList(
+                new String[] {"A", "B"}, // 0
+                new String[] {"A"}, // 1
+                new String[] {}, // 2
+                new String[] {"A", "B", "C"}, // 3
+                new String[] {"B"}, // 4
+                new String[] {"B", "C"} // 5
+                )) {
+          Document doc = new Document();
+          for (String value : values) {
+            doc.add(new StringField("foo", value, Store.NO));
+          }
+          w.addDocument(doc);
+        }
+
+        w.forceMerge(1);
+      }
+
+      try (IndexReader reader = DirectoryReader.open(dir)) {
+        IndexSearcher searcher = newSearcher(reader);
+
+        Query query =
+            new BooleanQuery.Builder()
+                .add(
+                    new BoostQuery(new ConstantScoreQuery(new TermQuery(new Term("foo", "A"))), 2),
+                    Occur.SHOULD)
+                .add(new ConstantScoreQuery(new TermQuery(new Term("foo", "B"))), Occur.SHOULD)
+                .add(
+                    new BoostQuery(new ConstantScoreQuery(new TermQuery(new Term("foo", "C"))), 3),
+                    Occur.SHOULD)
+                .setMinimumNumberShouldMatch(2)
+                .build();
+
+        Scorer scorer =
+            searcher
+                .createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1)
+                .scorer(searcher.getIndexReader().leaves().get(0));
+
+        assertEquals(0, scorer.iterator().nextDoc());
+        assertEquals(3, scorer.iterator().nextDoc());
+        assertEquals(5, scorer.iterator().nextDoc());
+        assertEquals(DocIdSetIterator.NO_MORE_DOCS, scorer.iterator().nextDoc());
+      }
+    }
+  }
+
   public void testBasicsWithFilteredDisjunctionAndMinShouldMatch() throws Exception {
     try (Directory dir = newDirectory()) {
       try (IndexWriter w =
@@ -387,6 +438,66 @@
     }
   }
 
+  public void testBasicsWithFilteredDisjunctionAndMinShouldMatchAndNonScoringMode()
+      throws Exception {
+    try (Directory dir = newDirectory()) {
+      try (IndexWriter w =
+          new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(newLogMergePolicy()))) {
+        for (String[] values :
+            Arrays.asList(
+                new String[] {"A", "B"}, // 0
+                new String[] {"A", "C", "D"}, // 1
+                new String[] {}, // 2
+                new String[] {"A", "B", "C", "D"}, // 3
+                new String[] {"B"}, // 4
+                new String[] {"C", "D"} // 5
+                )) {
+          Document doc = new Document();
+          for (String value : values) {
+            doc.add(new StringField("foo", value, Store.NO));
+          }
+          w.addDocument(doc);
+        }
+
+        w.forceMerge(1);
+      }
+
+      try (IndexReader reader = DirectoryReader.open(dir)) {
+        IndexSearcher searcher = newSearcher(reader);
+
+        Query query =
+            new BooleanQuery.Builder()
+                .add(
+                    new BooleanQuery.Builder()
+                        .add(
+                            new BoostQuery(
+                                new ConstantScoreQuery(new TermQuery(new Term("foo", "A"))), 2),
+                            Occur.SHOULD)
+                        .add(
+                            new ConstantScoreQuery(new TermQuery(new Term("foo", "B"))),
+                            Occur.SHOULD)
+                        .add(
+                            new BoostQuery(
+                                new ConstantScoreQuery(new TermQuery(new Term("foo", "D"))), 4),
+                            Occur.SHOULD)
+                        .setMinimumNumberShouldMatch(2)
+                        .build(),
+                    Occur.MUST)
+                .add(new TermQuery(new Term("foo", "C")), Occur.FILTER)
+                .build();
+
+        Scorer scorer =
+            searcher
+                .createWeight(searcher.rewrite(query), ScoreMode.TOP_DOCS, 1)
+                .scorer(searcher.getIndexReader().leaves().get(0));
+
+        assertEquals(1, scorer.iterator().nextDoc());
+        assertEquals(3, scorer.iterator().nextDoc());
+        assertEquals(DocIdSetIterator.NO_MORE_DOCS, scorer.iterator().nextDoc());
+      }
+    }
+  }
+
   public void testBasicsWithFilteredDisjunctionAndMustNotAndMinShouldMatch() throws Exception {
     try (Directory dir = newDirectory()) {
       try (IndexWriter w =
@@ -454,6 +565,58 @@
     }
   }
 
+  public void testBasicsWithFilteredDisjunctionAndMustNotAndMinShouldMatchAndNonScoringMode()
+      throws Exception {
+    try (Directory dir = newDirectory()) {
+      try (IndexWriter w =
+          new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(newLogMergePolicy()))) {
+        for (String[] values :
+            Arrays.asList(
+                new String[] {"A", "B"}, // 0
+                new String[] {"A", "C", "D"}, // 1
+                new String[] {}, // 2
+                new String[] {"A", "B", "C", "D"}, // 3
+                new String[] {"B", "D"}, // 4
+                new String[] {"C", "D"} // 5
+                )) {
+          Document doc = new Document();
+          for (String value : values) {
+            doc.add(new StringField("foo", value, Store.NO));
+          }
+          w.addDocument(doc);
+        }
+
+        w.forceMerge(1);
+      }
+
+      try (IndexReader reader = DirectoryReader.open(dir)) {
+        IndexSearcher searcher = newSearcher(reader);
+
+        Query query =
+            new BooleanQuery.Builder()
+                .add(
+                    new BoostQuery(new ConstantScoreQuery(new TermQuery(new Term("foo", "A"))), 2),
+                    Occur.SHOULD)
+                .add(new ConstantScoreQuery(new TermQuery(new Term("foo", "B"))), Occur.SHOULD)
+                .add(new TermQuery(new Term("foo", "C")), Occur.MUST_NOT)
+                .add(
+                    new BoostQuery(new ConstantScoreQuery(new TermQuery(new Term("foo", "D"))), 4),
+                    Occur.SHOULD)
+                .setMinimumNumberShouldMatch(2)
+                .build();
+
+        Scorer scorer =
+            searcher
+                .createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1)
+                .scorer(searcher.getIndexReader().leaves().get(0));
+
+        assertEquals(0, scorer.iterator().nextDoc());
+        assertEquals(4, scorer.iterator().nextDoc());
+        assertEquals(DocIdSetIterator.NO_MORE_DOCS, scorer.iterator().nextDoc());
+      }
+    }
+  }
+
   public void testRandom() throws IOException {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/FiniteStringsIteratorTest.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestFiniteStringsIterator.java
similarity index 98%
rename from lucene/core/src/test/org/apache/lucene/util/automaton/FiniteStringsIteratorTest.java
rename to lucene/core/src/test/org/apache/lucene/util/automaton/TestFiniteStringsIterator.java
index 1dca315..a86932f 100644
--- a/lucene/core/src/test/org/apache/lucene/util/automaton/FiniteStringsIteratorTest.java
+++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestFiniteStringsIterator.java
@@ -31,7 +31,7 @@
 import org.apache.lucene.util.fst.Util;
 
 /** Test for {@link FiniteStringsIterator}. */
-public class FiniteStringsIteratorTest extends LuceneTestCase {
+public class TestFiniteStringsIterator extends LuceneTestCase {
   public void testRandomFiniteStrings1() {
     int numStrings = atLeast(100);
     if (VERBOSE) {
diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/LimitedFiniteStringsIteratorTest.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestLimitedFiniteStringsIterator.java
similarity index 95%
rename from lucene/core/src/test/org/apache/lucene/util/automaton/LimitedFiniteStringsIteratorTest.java
rename to lucene/core/src/test/org/apache/lucene/util/automaton/TestLimitedFiniteStringsIterator.java
index ea295cf..52145e7 100644
--- a/lucene/core/src/test/org/apache/lucene/util/automaton/LimitedFiniteStringsIteratorTest.java
+++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestLimitedFiniteStringsIterator.java
@@ -16,7 +16,7 @@
  */
 package org.apache.lucene.util.automaton;
 
-import static org.apache.lucene.util.automaton.FiniteStringsIteratorTest.getFiniteStrings;
+import static org.apache.lucene.util.automaton.TestFiniteStringsIterator.getFiniteStrings;
 
 import java.util.List;
 import org.apache.lucene.util.IntsRef;
@@ -26,7 +26,7 @@
 import org.apache.lucene.util.fst.Util;
 
 /** Test for {@link FiniteStringsIterator}. */
-public class LimitedFiniteStringsIteratorTest extends LuceneTestCase {
+public class TestLimitedFiniteStringsIterator extends LuceneTestCase {
   public void testRandomFiniteStrings() {
     // Just makes sure we can run on any random finite
     // automaton:
diff --git a/lucene/facet/build.gradle b/lucene/facet/build.gradle
index e94d8b0..b4f3ac5 100644
--- a/lucene/facet/build.gradle
+++ b/lucene/facet/build.gradle
@@ -28,5 +28,5 @@
   testImplementation project(':lucene:test-framework')
   testImplementation project(':lucene:queries')
   // Required for opening older indexes for backward compatibility tests
-  testCompile group: 'org.apache.lucene', name: 'lucene-codecs', version: '8.6.3'
+  testImplementation project(':lucene:backward-codecs')
 }
diff --git a/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFileTest.java b/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/TestSimpleIniFile.java
similarity index 98%
rename from lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFileTest.java
rename to lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/TestSimpleIniFile.java
index a6c6b58..9f0c272 100644
--- a/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/SimpleIniFileTest.java
+++ b/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/TestSimpleIniFile.java
@@ -28,7 +28,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Test;
 
-public class SimpleIniFileTest extends LuceneTestCase {
+public class TestSimpleIniFile extends LuceneTestCase {
 
   @Test
   public void testStore() throws IOException {
diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/analysis/AnalysisImplTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/analysis/TestAnalysisImpl.java
similarity index 98%
rename from lucene/luke/src/test/org/apache/lucene/luke/models/analysis/AnalysisImplTest.java
rename to lucene/luke/src/test/org/apache/lucene/luke/models/analysis/TestAnalysisImpl.java
index 258c577..3ee9d3a 100644
--- a/lucene/luke/src/test/org/apache/lucene/luke/models/analysis/AnalysisImplTest.java
+++ b/lucene/luke/src/test/org/apache/lucene/luke/models/analysis/TestAnalysisImpl.java
@@ -32,7 +32,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Test;
 
-public class AnalysisImplTest extends LuceneTestCase {
+public class TestAnalysisImpl extends LuceneTestCase {
 
   @Test
   public void testGetPresetAnalyzerTypes() throws Exception {
diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/commits/CommitsImplTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/commits/TestCommitsImpl.java
similarity index 98%
rename from lucene/luke/src/test/org/apache/lucene/luke/models/commits/CommitsImplTest.java
rename to lucene/luke/src/test/org/apache/lucene/luke/models/commits/TestCommitsImpl.java
index 93a60a7..261347d 100644
--- a/lucene/luke/src/test/org/apache/lucene/luke/models/commits/CommitsImplTest.java
+++ b/lucene/luke/src/test/org/apache/lucene/luke/models/commits/TestCommitsImpl.java
@@ -37,7 +37,7 @@
 import org.junit.Before;
 import org.junit.Test;
 
-public class CommitsImplTest extends LuceneTestCase {
+public class TestCommitsImpl extends LuceneTestCase {
 
   private DirectoryReader reader;
 
diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocValuesAdapterTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TestDocValuesAdapter.java
similarity index 98%
rename from lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocValuesAdapterTest.java
rename to lucene/luke/src/test/org/apache/lucene/luke/models/documents/TestDocValuesAdapter.java
index 572806b..75ce994 100644
--- a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocValuesAdapterTest.java
+++ b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TestDocValuesAdapter.java
@@ -33,7 +33,7 @@
 import org.apache.lucene.util.BytesRef;
 import org.junit.Test;
 
-public class DocValuesAdapterTest extends DocumentsTestBase {
+public class TestDocValuesAdapter extends DocumentsTestBase {
 
   @Override
   protected void createIndex() throws IOException {
diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocumentsImplTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TestDocumentsImpl.java
similarity index 98%
rename from lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocumentsImplTest.java
rename to lucene/luke/src/test/org/apache/lucene/luke/models/documents/TestDocumentsImpl.java
index 7306026..cf08f1f 100644
--- a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/DocumentsImplTest.java
+++ b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TestDocumentsImpl.java
@@ -34,7 +34,7 @@
   "FastCompressingStoredFieldsData",
   "FastDecompressionCompressingStoredFieldsData"
 })
-public class DocumentsImplTest extends DocumentsTestBase {
+public class TestDocumentsImpl extends DocumentsTestBase {
 
   @Test
   public void testGetMaxDoc() {
diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TermVectorsAdapterTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TestTermVectorsAdapter.java
similarity index 98%
rename from lucene/luke/src/test/org/apache/lucene/luke/models/documents/TermVectorsAdapterTest.java
rename to lucene/luke/src/test/org/apache/lucene/luke/models/documents/TestTermVectorsAdapter.java
index ec7884d..40266b2 100644
--- a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TermVectorsAdapterTest.java
+++ b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TestTermVectorsAdapter.java
@@ -27,7 +27,7 @@
 import org.apache.lucene.store.Directory;
 import org.junit.Test;
 
-public class TermVectorsAdapterTest extends DocumentsTestBase {
+public class TestTermVectorsAdapter extends DocumentsTestBase {
 
   @Override
   protected void createIndex() throws IOException {
diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/overview/OverviewImplTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TestOverviewImpl.java
similarity index 98%
rename from lucene/luke/src/test/org/apache/lucene/luke/models/overview/OverviewImplTest.java
rename to lucene/luke/src/test/org/apache/lucene/luke/models/overview/TestOverviewImpl.java
index 25f543c..14810de 100644
--- a/lucene/luke/src/test/org/apache/lucene/luke/models/overview/OverviewImplTest.java
+++ b/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TestOverviewImpl.java
@@ -25,7 +25,7 @@
 import org.apache.lucene.store.AlreadyClosedException;
 import org.junit.Test;
 
-public class OverviewImplTest extends OverviewTestBase {
+public class TestOverviewImpl extends OverviewTestBase {
 
   @Test
   public void testGetIndexPath() {
diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TermCountsTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TestTermCounts.java
similarity index 97%
rename from lucene/luke/src/test/org/apache/lucene/luke/models/overview/TermCountsTest.java
rename to lucene/luke/src/test/org/apache/lucene/luke/models/overview/TestTermCounts.java
index 16345f0..cb7a034 100644
--- a/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TermCountsTest.java
+++ b/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TestTermCounts.java
@@ -22,7 +22,7 @@
 import java.util.Map;
 import org.junit.Test;
 
-public class TermCountsTest extends OverviewTestBase {
+public class TestTermCounts extends OverviewTestBase {
 
   @Test
   public void testNumTerms() throws Exception {
diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TopTermsTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TestTopTerms.java
similarity index 95%
rename from lucene/luke/src/test/org/apache/lucene/luke/models/overview/TopTermsTest.java
rename to lucene/luke/src/test/org/apache/lucene/luke/models/overview/TestTopTerms.java
index 80e6358..dab4a6a 100644
--- a/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TopTermsTest.java
+++ b/lucene/luke/src/test/org/apache/lucene/luke/models/overview/TestTopTerms.java
@@ -20,7 +20,7 @@
 import java.util.List;
 import org.junit.Test;
 
-public class TopTermsTest extends OverviewTestBase {
+public class TestTopTerms extends OverviewTestBase {
 
   @Test
   public void testGetTopTerms() throws Exception {
diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/search/SearchImplTest.java b/lucene/luke/src/test/org/apache/lucene/luke/models/search/TestSearchImpl.java
similarity index 99%
rename from lucene/luke/src/test/org/apache/lucene/luke/models/search/SearchImplTest.java
rename to lucene/luke/src/test/org/apache/lucene/luke/models/search/TestSearchImpl.java
index 41679d7..76c6fde 100644
--- a/lucene/luke/src/test/org/apache/lucene/luke/models/search/SearchImplTest.java
+++ b/lucene/luke/src/test/org/apache/lucene/luke/models/search/TestSearchImpl.java
@@ -51,7 +51,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Test;
 
-public class SearchImplTest extends LuceneTestCase {
+public class TestSearchImpl extends LuceneTestCase {
 
   private IndexReader reader;
   private Directory dir;
diff --git a/lucene/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java b/lucene/misc/src/test/org/apache/lucene/misc/TestSweetSpotSimilarity.java
similarity index 98%
rename from lucene/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java
rename to lucene/misc/src/test/org/apache/lucene/misc/TestSweetSpotSimilarity.java
index 46f7da6..9ca44d1 100644
--- a/lucene/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java
+++ b/lucene/misc/src/test/org/apache/lucene/misc/TestSweetSpotSimilarity.java
@@ -36,7 +36,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 
 /** Test of the SweetSpotSimilarity */
-public class SweetSpotSimilarityTest extends LuceneTestCase {
+public class TestSweetSpotSimilarity extends LuceneTestCase {
 
   private static float computeNorm(Similarity sim, String field, int length) throws IOException {
     String value = IntStream.range(0, length).mapToObj(i -> "a").collect(Collectors.joining(" "));
diff --git a/lucene/misc/src/test/org/apache/lucene/misc/store/WindowsDirectoryTest.java b/lucene/misc/src/test/org/apache/lucene/misc/store/TestWindowsDirectory.java
similarity index 95%
rename from lucene/misc/src/test/org/apache/lucene/misc/store/WindowsDirectoryTest.java
rename to lucene/misc/src/test/org/apache/lucene/misc/store/TestWindowsDirectory.java
index 8994e69..9ccc96a 100644
--- a/lucene/misc/src/test/org/apache/lucene/misc/store/WindowsDirectoryTest.java
+++ b/lucene/misc/src/test/org/apache/lucene/misc/store/TestWindowsDirectory.java
@@ -26,7 +26,7 @@
 import org.junit.Rule;
 import org.junit.rules.TestRule;
 
-public class WindowsDirectoryTest extends LuceneTestCase {
+public class TestWindowsDirectory extends LuceneTestCase {
   @Rule
   public static TestRule requiresNative =
       new NativeLibEnableRule(EnumSet.of(NativeLibEnableRule.OperatingSystem.WINDOWS));
diff --git a/lucene/queryparser/docs/surround/README.txt b/lucene/queryparser/docs/surround/README.txt
index 056f080..f922a36 100644
--- a/lucene/queryparser/docs/surround/README.txt
+++ b/lucene/queryparser/docs/surround/README.txt
@@ -70,5 +70,5 @@
 Warnings about missing terms are sent to System.out, this might
 be replaced by another stream, and tested for in the tests.
 
-BooleanQueryTst.TestCollector uses a results checking method that should
+TestBooleanQuery.TestCollector uses a results checking method that should
 be replaced by the checking method from Lucene's TestBasics.java.
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test01Exceptions.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test01Exceptions.java
index f7cda89..0b968b8 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test01Exceptions.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test01Exceptions.java
@@ -57,7 +57,7 @@
   };
 
   public void test01Exceptions() throws Exception {
-    String m = ExceptionQueryTst.getFailQueries(exceptionQueries, verbose);
+    String m = TestExceptionQuery.getFailQueries(exceptionQueries, verbose);
     if (m.length() > 0) {
       fail("No ParseException for:\n" + m);
     }
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java
index fcb731e..7d55b79 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java
@@ -46,11 +46,11 @@
   SingleFieldTestDb db1;
 
   public void normalTest1(String query, int[] expdnrs) throws Exception {
-    BooleanQueryTst bqt =
-        new BooleanQueryTst(
+    TestBooleanQuery tbq =
+        new TestBooleanQuery(
             query, expdnrs, db1, fieldName, this, new BasicQueryFactory(maxBasicQueries));
-    bqt.setVerbose(verbose);
-    bqt.doTest();
+    tbq.setVerbose(verbose);
+    tbq.doTest();
   }
 
   public void test02Terms01() throws Exception {
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java
index 9edbc7e..36554ec 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java
@@ -41,7 +41,7 @@
   };
 
   public void test00Exceptions() throws Exception {
-    String m = ExceptionQueryTst.getFailQueries(exceptionQueries, verbose);
+    String m = TestExceptionQuery.getFailQueries(exceptionQueries, verbose);
     if (m.length() > 0) {
       fail("No ParseException for:\n" + m);
     }
@@ -68,11 +68,11 @@
   }
 
   private void distanceTst(String query, int[] expdnrs, SingleFieldTestDb db) throws Exception {
-    BooleanQueryTst bqt =
-        new BooleanQueryTst(
+    TestBooleanQuery tbq =
+        new TestBooleanQuery(
             query, expdnrs, db, fieldName, this, new BasicQueryFactory(maxBasicQueries));
-    bqt.setVerbose(verbose);
-    bqt.doTest();
+    tbq.setVerbose(verbose);
+    tbq.doTest();
   }
 
   public void distanceTest1(String query, int[] expdnrs) throws Exception {
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/BooleanQueryTst.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/TestBooleanQuery.java
similarity index 98%
rename from lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/BooleanQueryTst.java
rename to lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/TestBooleanQuery.java
index 6690f5f..25c848e 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/BooleanQueryTst.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/TestBooleanQuery.java
@@ -28,7 +28,7 @@
 import org.apache.lucene.search.SimpleCollector;
 import org.junit.Assert;
 
-public class BooleanQueryTst {
+public class TestBooleanQuery {
   String queryText;
   final int[] expectedDocNrs;
   SingleFieldTestDb dBase;
@@ -37,7 +37,7 @@
   BasicQueryFactory qf;
   boolean verbose = true;
 
-  public BooleanQueryTst(
+  public TestBooleanQuery(
       String queryText,
       int[] expectedDocNrs,
       SingleFieldTestDb dBase,
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/ExceptionQueryTst.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/TestExceptionQuery.java
similarity index 91%
rename from lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/ExceptionQueryTst.java
rename to lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/TestExceptionQuery.java
index dc35b09..18574c3 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/ExceptionQueryTst.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/TestExceptionQuery.java
@@ -19,11 +19,11 @@
 import org.apache.lucene.queryparser.surround.parser.ParseException;
 import org.apache.lucene.queryparser.surround.parser.QueryParser;
 
-public class ExceptionQueryTst {
+public class TestExceptionQuery {
   private String queryText;
   private boolean verbose;
 
-  public ExceptionQueryTst(String queryText, boolean verbose) {
+  public TestExceptionQuery(String queryText, boolean verbose) {
     this.queryText = queryText;
     this.verbose = verbose;
   }
@@ -53,7 +53,7 @@
   public static String getFailQueries(String[] exceptionQueries, boolean verbose) {
     StringBuilder failQueries = new StringBuilder();
     for (int i = 0; i < exceptionQueries.length; i++) {
-      new ExceptionQueryTst(exceptionQueries[i], verbose).doTest(failQueries);
+      new TestExceptionQuery(exceptionQueries[i], verbose).doTest(failQueries);
     }
     return failQueries.toString();
   }
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/TestSrndQuery.java
similarity index 96%
rename from lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java
rename to lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/TestSrndQuery.java
index e0a24fa..425d902 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/surround/query/TestSrndQuery.java
@@ -23,7 +23,7 @@
 import org.junit.Test;
 
 /** */
-public class SrndQueryTest extends LuceneTestCase {
+public class TestSrndQuery extends LuceneTestCase {
 
   void checkEqualParsings(String s1, String s2) throws Exception {
     String fieldName = "foo";
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestDocumentDictionary.java
similarity index 99%
rename from lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java
rename to lucene/suggest/src/test/org/apache/lucene/search/suggest/TestDocumentDictionary.java
index 715278a..12dec50 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestDocumentDictionary.java
@@ -48,7 +48,7 @@
 
 // See: https://issues.apache.org/jira/browse/SOLR-12028 Tests cannot remove files on Windows
 // machines occasionally
-public class DocumentDictionaryTest extends LuceneTestCase {
+public class TestDocumentDictionary extends LuceneTestCase {
 
   static final String FIELD_NAME = "f1";
   static final String WEIGHT_FIELD_NAME = "w1";
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestDocumentValueSourceDictionary.java
similarity index 99%
rename from lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java
rename to lucene/suggest/src/test/org/apache/lucene/search/suggest/TestDocumentValueSourceDictionary.java
index cb1c992..4706043 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentValueSourceDictionaryTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestDocumentValueSourceDictionary.java
@@ -49,7 +49,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Test;
 
-public class DocumentValueSourceDictionaryTest extends LuceneTestCase {
+public class TestDocumentValueSourceDictionary extends LuceneTestCase {
 
   static final String FIELD_NAME = "f1";
   static final String WEIGHT_FIELD_NAME_1 = "w1";
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestFileDictionary.java
similarity index 98%
rename from lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java
rename to lucene/suggest/src/test/org/apache/lucene/search/suggest/TestFileDictionary.java
index c1bf883..3fd5ce7 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestFileDictionary.java
@@ -29,7 +29,7 @@
 import org.apache.lucene.util.TestUtil;
 import org.junit.Test;
 
-public class FileDictionaryTest extends LuceneTestCase {
+public class TestFileDictionary extends LuceneTestCase {
 
   private Map.Entry<List<String>, String> generateFileEntry(
       String fieldDelimiter, boolean hasWeight, boolean hasPayload) {
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestLookupBenchmark.java
similarity index 97%
rename from lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java
rename to lucene/suggest/src/test/org/apache/lucene/search/suggest/TestLookupBenchmark.java
index c6b407b..f7386bc 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestLookupBenchmark.java
@@ -49,7 +49,7 @@
 
 /** Benchmarks tests for implementations of {@link Lookup} interface. */
 @Ignore("COMMENT ME TO RUN BENCHMARKS!")
-public class LookupBenchmarkTest extends LuceneTestCase {
+public class TestLookupBenchmark extends LuceneTestCase {
   @SuppressWarnings({"unchecked", "deprecation"})
   private final List<Class<? extends Lookup>> benchmarkClasses =
       Arrays.asList(
@@ -83,9 +83,9 @@
     assert false : "disable assertions before running benchmarks!";
     List<Input> input = readTop50KWiki();
     Collections.shuffle(input, random);
-    LookupBenchmarkTest.dictionaryInput = input.toArray(new Input[input.size()]);
+    TestLookupBenchmark.dictionaryInput = input.toArray(new Input[input.size()]);
     Collections.shuffle(input, random);
-    LookupBenchmarkTest.benchmarkInput = input;
+    TestLookupBenchmark.benchmarkInput = input;
   }
 
   static final Charset UTF_8 = StandardCharsets.UTF_8;
@@ -93,7 +93,7 @@
   /** Collect the multilingual input for benchmarks/ tests. */
   public static List<Input> readTop50KWiki() throws Exception {
     List<Input> input = new ArrayList<>();
-    URL resource = LookupBenchmarkTest.class.getResource("Top50KWiki.utf8");
+    URL resource = TestLookupBenchmark.class.getResource("Top50KWiki.utf8");
     assert resource != null : "Resource missing: Top50KWiki.utf8";
 
     String line = null;
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestPersistence.java
similarity index 98%
rename from lucene/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java
rename to lucene/suggest/src/test/org/apache/lucene/search/suggest/TestPersistence.java
index 731e69d..e208a92 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/PersistenceTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/TestPersistence.java
@@ -28,7 +28,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
-public class PersistenceTest extends LuceneTestCase {
+public class TestPersistence extends LuceneTestCase {
   public final String[] keys =
       new String[] {
         "one",
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestAnalyzingInfixSuggester.java
similarity index 99%
rename from lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java
rename to lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestAnalyzingInfixSuggester.java
index 869e882..3343a6d 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestAnalyzingInfixSuggester.java
@@ -49,7 +49,7 @@
 import org.apache.lucene.util.TestUtil;
 import org.junit.Test;
 
-public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
+public class TestAnalyzingInfixSuggester extends LuceneTestCase {
 
   public void testBasic() throws Exception {
     Input keys[] =
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestAnalyzingSuggester.java
similarity index 99%
rename from lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java
rename to lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestAnalyzingSuggester.java
index bbdd76a..c0c96c3 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestAnalyzingSuggester.java
@@ -57,7 +57,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
-public class AnalyzingSuggesterTest extends LuceneTestCase {
+public class TestAnalyzingSuggester extends LuceneTestCase {
 
   /** this is basically the WFST test ported to KeywordAnalyzer. so it acts the same */
   public void testKeyword() throws Exception {
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestBlendedInfixSuggester.java
similarity index 99%
rename from lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java
rename to lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestBlendedInfixSuggester.java
index 8717dbe..95114ed 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggesterTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestBlendedInfixSuggester.java
@@ -35,7 +35,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
-public class BlendedInfixSuggesterTest extends LuceneTestCase {
+public class TestBlendedInfixSuggester extends LuceneTestCase {
 
   /** Test the weight transformation depending on the position of the matching term. */
   public void testBlendedSort() throws IOException {
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java
index e6f64de..7096cef 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java
@@ -51,7 +51,7 @@
 
   public void testBasic() throws Exception {
     Iterable<Input> keys =
-        AnalyzingSuggesterTest.shuffle(
+        TestAnalyzingSuggester.shuffle(
             new Input("foo bar baz blah", 50), new Input("boo foo bar foo bee", 20));
 
     Analyzer a = new MockAnalyzer(random());
@@ -96,7 +96,7 @@
   public void testIllegalByteDuringBuild() throws Exception {
     // Default separator is INFORMATION SEPARATOR TWO
     // (0x1e), so no input token is allowed to contain it
-    Iterable<Input> keys = AnalyzingSuggesterTest.shuffle(new Input("foo\u001ebar baz", 50));
+    Iterable<Input> keys = TestAnalyzingSuggester.shuffle(new Input("foo\u001ebar baz", 50));
     Analyzer analyzer = new MockAnalyzer(random());
     FreeTextSuggester sug = new FreeTextSuggester(analyzer);
     expectThrows(
@@ -111,7 +111,7 @@
   public void testIllegalByteDuringQuery() throws Exception {
     // Default separator is INFORMATION SEPARATOR TWO
     // (0x1e), so no input token is allowed to contain it
-    Iterable<Input> keys = AnalyzingSuggesterTest.shuffle(new Input("foo bar baz", 50));
+    Iterable<Input> keys = TestAnalyzingSuggester.shuffle(new Input("foo bar baz", 50));
     Analyzer analyzer = new MockAnalyzer(random());
     FreeTextSuggester sug = new FreeTextSuggester(analyzer);
     sug.build(new InputArrayIterator(keys));
@@ -196,7 +196,7 @@
   // Make sure you can suggest based only on unigram model:
   public void testUnigrams() throws Exception {
     Iterable<Input> keys =
-        AnalyzingSuggesterTest.shuffle(new Input("foo bar baz blah boo foo bar foo bee", 50));
+        TestAnalyzingSuggester.shuffle(new Input("foo bar baz blah boo foo bar foo bee", 50));
 
     Analyzer a = new MockAnalyzer(random());
     FreeTextSuggester sug = new FreeTextSuggester(a, a, 1, (byte) 0x20);
@@ -208,7 +208,7 @@
 
   // Make sure the last token is not duplicated
   public void testNoDupsAcrossGrams() throws Exception {
-    Iterable<Input> keys = AnalyzingSuggesterTest.shuffle(new Input("foo bar bar bar bar", 50));
+    Iterable<Input> keys = TestAnalyzingSuggester.shuffle(new Input("foo bar bar bar bar", 50));
     Analyzer a = new MockAnalyzer(random());
     FreeTextSuggester sug = new FreeTextSuggester(a, a, 2, (byte) 0x20);
     sug.build(new InputArrayIterator(keys));
@@ -218,7 +218,7 @@
 
   // Lookup of just empty string produces unicode only matches:
   public void testEmptyString() throws Exception {
-    Iterable<Input> keys = AnalyzingSuggesterTest.shuffle(new Input("foo bar bar bar bar", 50));
+    Iterable<Input> keys = TestAnalyzingSuggester.shuffle(new Input("foo bar bar bar bar", 50));
     Analyzer a = new MockAnalyzer(random());
     FreeTextSuggester sug = new FreeTextSuggester(a, a, 2, (byte) 0x20);
     sug.build(new InputArrayIterator(keys));
@@ -245,7 +245,7 @@
           }
         };
 
-    Iterable<Input> keys = AnalyzingSuggesterTest.shuffle(new Input("wizard of oz", 50));
+    Iterable<Input> keys = TestAnalyzingSuggester.shuffle(new Input("wizard of oz", 50));
     FreeTextSuggester sug = new FreeTextSuggester(a, a, 3, (byte) 0x20);
     sug.build(new InputArrayIterator(keys));
     assertEquals("wizard _ oz/1.00", toString(sug.lookup("wizard of", 10)));
@@ -271,7 +271,7 @@
           }
         };
 
-    Iterable<Input> keys = AnalyzingSuggesterTest.shuffle(new Input("wizard of of oz", 50));
+    Iterable<Input> keys = TestAnalyzingSuggester.shuffle(new Input("wizard of of oz", 50));
     FreeTextSuggester sug = new FreeTextSuggester(a, a, 3, (byte) 0x20);
     sug.build(new InputArrayIterator(keys));
     assertEquals("", toString(sug.lookup("wizard of of", 10)));
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFuzzySuggester.java
similarity index 99%
rename from lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java
rename to lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFuzzySuggester.java
index ea63a1d..472a287 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFuzzySuggester.java
@@ -50,7 +50,7 @@
 import org.apache.lucene.util.automaton.FiniteStringsIterator;
 import org.apache.lucene.util.fst.Util;
 
-public class FuzzySuggesterTest extends LuceneTestCase {
+public class TestFuzzySuggester extends LuceneTestCase {
 
   public void testRandomEdits() throws IOException {
     List<Input> keys = new ArrayList<>();
@@ -284,7 +284,7 @@
   public void testGraphDups() throws Exception {
 
     final Analyzer analyzer =
-        new AnalyzingSuggesterTest.MultiCannedAnalyzer(
+        new TestAnalyzingSuggester.MultiCannedAnalyzer(
             new CannedTokenStream(
                 token("wifi", 1, 1),
                 token("hotspot", 0, 2),
@@ -343,7 +343,7 @@
     //  synonym module
 
     final Analyzer analyzer =
-        new AnalyzingSuggesterTest.MultiCannedAnalyzer(
+        new TestAnalyzingSuggester.MultiCannedAnalyzer(
             new CannedTokenStream(token("ab", 1, 1), token("ba", 0, 1), token("xc", 1, 1)),
             new CannedTokenStream(token("ba", 1, 1), token("xd", 1, 1)),
             new CannedTokenStream(token("ab", 1, 1), token("ba", 0, 1), token("x", 1, 1)));
@@ -387,7 +387,7 @@
 
   private Analyzer getUnusualAnalyzer() {
     // First three calls just returns "a", then returns ["a","b"], then "a" again
-    return new AnalyzingSuggesterTest.MultiCannedAnalyzer(
+    return new TestAnalyzingSuggester.MultiCannedAnalyzer(
         new CannedTokenStream(token("a", 1, 1)),
         new CannedTokenStream(token("a", 1, 1)),
         new CannedTokenStream(token("a", 1, 1)),
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestBytesRefSorters.java
similarity index 97%
rename from lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java
rename to lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestBytesRefSorters.java
index c1bde03..2af2eb0 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestBytesRefSorters.java
@@ -26,7 +26,7 @@
 import org.apache.lucene.util.OfflineSorter;
 import org.junit.Test;
 
-public class BytesRefSortersTest extends LuceneTestCase {
+public class TestBytesRefSorters extends LuceneTestCase {
   @Test
   public void testExternalRefSorter() throws Exception {
     Directory tempDir = newDirectory();
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestFSTCompletion.java
similarity index 98%
rename from lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java
rename to lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestFSTCompletion.java
index 73e6f8d..29b3883 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestFSTCompletion.java
@@ -25,7 +25,7 @@
 import org.apache.lucene.util.*;
 
 /** Unit tests for {@link FSTCompletion}. */
-public class FSTCompletionTest extends LuceneTestCase {
+public class TestFSTCompletion extends LuceneTestCase {
 
   public static Input tf(String t, int v) {
     return new Input(t, v);
@@ -177,7 +177,7 @@
 
   @Slow
   public void testMultilingualInput() throws Exception {
-    List<Input> input = LookupBenchmarkTest.readTop50KWiki();
+    List<Input> input = TestLookupBenchmark.readTop50KWiki();
 
     Directory tempDir = getDirectory();
     FSTCompletionLookup lookup = new FSTCompletionLookup(tempDir, "fst");
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestWFSTCompletion.java
similarity index 98%
rename from lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java
rename to lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestWFSTCompletion.java
index 68a704b..2ed0f80 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestWFSTCompletion.java
@@ -25,7 +25,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
-public class WFSTCompletionTest extends LuceneTestCase {
+public class TestWFSTCompletion extends LuceneTestCase {
 
   public void testBasic() throws Exception {
     Input keys[] =
diff --git a/settings.gradle b/settings.gradle
index 494c710..3b6947e 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -65,7 +65,6 @@
 include "solr:contrib:langid"
 include "solr:contrib:jaegertracer-configurator"
 include "solr:contrib:prometheus-exporter"
-include "solr:contrib:scripting"
 include "solr:contrib:ltr"
 include "solr:webapp"
 include "solr:test-framework"
@@ -75,4 +74,3 @@
 include "solr:documentation"
 include "solr:packaging"
 include "solr:docker"
-include "solr:docker:package"
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 7a17a75..7edd319 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -25,6 +25,9 @@
 
 * SOLR-15019: Replica placement API needs a way to fetch existing replica metrics. (ab, ilan)
 
+* SOLR-15055: Re-implement 'withCollection'. This also adds the placement plugin support
+  for rejecting replica / collection deletions that would violate placement constraints. (ab, ilan)
+
 Improvements
 ----------------------
 * LUCENE-8984: MoreLikeThis MLT is biased for uncommon fields (Andy Hind via Anshum Gupta)
@@ -188,9 +191,6 @@
 
 * SOLR-14297: Replace commons-codec Base64 with JDK8 Base64 (Andras Salamon via Houston Putman)
 
-* SOLR-14067: StatelessScriptUpdateProcessorFactory moved to it's own /contrib/scripting/ package instead
- of shipping as part of Solr due to security concerns.  Renamed to ScriptUpdateProcessorFactory for simpler name. (Eric Pugh)
- 
 Bug Fixes
 ---------------------
 * SOLR-14546: Fix for a relatively hard to hit issue in OverseerTaskProcessor that could lead to out of order execution
@@ -215,7 +215,7 @@
 
 Bug Fixes
 ---------------------
-(No changes)
+* SOLR-15078: Fix ExpandComponent behavior when expanding on numeric fields to differentiate '0' group from null group (hossman)
 
 Other Changes
 ---------------------
@@ -364,6 +364,11 @@
 
 * SOLR-15070: Suggester requests made with SolrJ can now use XMLResponseParser (Jason Gerlowski)
 
+* SOLR-15073: Fix ClassCastException in SystemInfoHandler.getSecurityInfo. (Nikolay Ivanov, Christine Poerschke)
+
+* SOLR-15071: Fix ArrayIndexOutOfBoundsException in contrib/ltr SolrFeatureScorer.
+  (Florin Babes, Ovidiu Mihalcea, David Smiley, Christine Poerschke)
+
 Other Changes
 ---------------------
 
diff --git a/solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/Feature.java b/solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/Feature.java
index bc7ff87..52a0cef 100644
--- a/solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/Feature.java
+++ b/solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/Feature.java
@@ -27,7 +27,6 @@
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.QueryVisitor;
 import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.TwoPhaseIterator;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.util.Accountable;
 import org.apache.lucene.util.RamUsageEstimator;
@@ -365,10 +364,9 @@
         return in.iterator();
       }
 
-      @Override
-      public TwoPhaseIterator twoPhaseIterator() {
-        return in.twoPhaseIterator();
-      }
+      // Currently (Q1 2021) we intentionally don't delegate twoPhaseIterator()
+      // because it doesn't always work and we don't yet know why, please see
+      // SOLR-15071 for more details.
 
       @Override
       public int advanceShallow(int target) throws IOException {
diff --git a/solr/contrib/ltr/src/test/org/apache/solr/ltr/feature/TestEdisMaxSolrFeature.java b/solr/contrib/ltr/src/test/org/apache/solr/ltr/feature/TestEdisMaxSolrFeature.java
index f9401eb..bbc8815 100644
--- a/solr/contrib/ltr/src/test/org/apache/solr/ltr/feature/TestEdisMaxSolrFeature.java
+++ b/solr/contrib/ltr/src/test/org/apache/solr/ltr/feature/TestEdisMaxSolrFeature.java
@@ -21,7 +21,6 @@
 import org.apache.solr.ltr.model.LinearModel;
 import org.junit.After;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 
 public class TestEdisMaxSolrFeature extends TestRerankBase {
@@ -75,7 +74,6 @@
     assertJQ("/query" + query.toQueryString(), "/response/numFound/==4");
   }
 
-  @Ignore("SOLR-15071")
   @Test
   public void testEdisMaxSolrFeatureCustomMM() throws Exception {
     loadFeature(
diff --git a/solr/contrib/ltr/src/test/org/apache/solr/ltr/feature/TestFeature.java b/solr/contrib/ltr/src/test/org/apache/solr/ltr/feature/TestFeature.java
index 15fc40d..6c4dc08 100644
--- a/solr/contrib/ltr/src/test/org/apache/solr/ltr/feature/TestFeature.java
+++ b/solr/contrib/ltr/src/test/org/apache/solr/ltr/feature/TestFeature.java
@@ -30,6 +30,9 @@
     for (final Method scorerClassMethod : Scorer.class.getDeclaredMethods()) {
       try {
 
+        // the FilterFeatureScorer may simply inherit Scorer's default implementation
+        if (scorerClassMethod.getName().equals("twoPhaseIterator")) continue;
+
         // the FilterFeatureScorer's implementation does not influence its parent Weight
         if (scorerClassMethod.getName().equals("getWeight")) continue;
 
diff --git a/solr/contrib/scripting/README.md b/solr/contrib/scripting/README.md
deleted file mode 100644
index 3436a65..0000000
--- a/solr/contrib/scripting/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-Welcome to Apache Solr Scripting!
-===============================
-
-# Introduction
-
-The Scripting contrib module pulls together various scripting related functions.  
-
-Today, the ScriptUpdateProcessorFactory allows Java scripting engines to support scripts written in languages such as JavaScript, Ruby, Python, and Groovy to be used during Solr document update processing, allowing dramatic flexibility in expressing custom document processing before being indexed.  It exposes hooks for commit, delete, etc, but add is the most common usage.  It is implemented as an UpdateProcessor to be placed in an UpdateChain.
-
-## Getting Started
-
-For information on how to get started please see:
- * [Solr Reference Guide's section on Update Request Processors](https://lucene.apache.org/solr/guide/update-request-processors.html)
-  * [Solr Reference Guide's section on ScriptUpdateProcessorFactory](https://lucene.apache.org/solr/guide/script-update-processor.html)
diff --git a/solr/contrib/scripting/src/java/overview.html b/solr/contrib/scripting/src/java/overview.html
deleted file mode 100644
index 82eb0f8..0000000
--- a/solr/contrib/scripting/src/java/overview.html
+++ /dev/null
@@ -1,26 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<html>
-<body>
-Apache Solr Search Server: Scripting contrib
-
-<p>
-This package provides an Update Processor that allows for Java scripting engines
-to be used during the Solr document update processing.
-</p>
-</body>
-</html>
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/invalid.script.xml b/solr/contrib/scripting/src/test-files/solr/collection1/conf/invalid.script.xml
deleted file mode 100644
index c8455af..0000000
--- a/solr/contrib/scripting/src/test-files/solr/collection1/conf/invalid.script.xml
+++ /dev/null
@@ -1,27 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- This file is designed to test that loading a .xml file into
-     a script engine that is configured for JavaScript properly raises
-     an exception.
-
--->
-
-<foo version="1.0">
-  <bar/>
-</foo>
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/schema.xml b/solr/contrib/scripting/src/test-files/solr/collection1/conf/schema.xml
deleted file mode 100644
index 140f12b..0000000
--- a/solr/contrib/scripting/src/test-files/solr/collection1/conf/schema.xml
+++ /dev/null
@@ -1,73 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- The Solr schema file. This file should be named "schema.xml" and
-     should be located where the classloader for the Solr webapp can find it.
-
-     This schema is used for testing, and as such has everything and the
-     kitchen sink thrown in. See example/solr/conf/schema.xml for a
-     more concise example.
-
-  -->
-
-<schema name="test" version="1.6">
-
-  <fieldType name="int" class="${solr.tests.IntegerFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
-  <fieldType name="double" class="${solr.tests.DoubleFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
-
-  <fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
-  <!-- solr.TextField allows the specification of custom
-       text analyzers specified as a tokenizer and a list
-       of token filters.
-    -->
-  <fieldType name="text" class="solr.TextField">
-    <analyzer>
-      <tokenizer class="solr.StandardTokenizerFactory"/>
-      <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.StopFilterFactory"/>
-      <filter class="solr.PorterStemFilterFactory"/>
-    </analyzer>
-  </fieldType>
-
-  <fieldType name="nametext" class="solr.TextField">
-    <analyzer class="org.apache.lucene.analysis.core.WhitespaceAnalyzer"/>
-  </fieldType>
-
-
-  <field name="id" type="string" indexed="true" stored="true" multiValued="false" required="false"/>
-  <field name="name" type="nametext" indexed="true" stored="true"/>
-  <field name="subject" type="text" indexed="true" stored="true"/>
-
-
-  <!-- Dynamic field definitions.  If a field name is not found, dynamicFields
-       will be used if the name matches any of the patterns.
-       RESTRICTION: the glob-like pattern in the name attribute must have
-       a "*" only at the start or the end.
-       EXAMPLE:  name="*_i" will match any field ending in _i (like myid_i, z_i)
-       Longer patterns will be matched first.  if equal size patterns
-       both match, the first appearing in the schema will be used.
-  -->
-  <dynamicField name="*_i" type="int" indexed="true" stored="true"/>
-  <dynamicField name="*_d" type="double" indexed="true" stored="true"/>
-  <dynamicField name="*_s" type="string" indexed="true" stored="true"/>
-  <dynamicField name="*_sm" type="string" indexed="true" stored="true" multiValued="true"/>
-
-  <uniqueKey>id</uniqueKey>
-
-
-</schema>
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/solrconfig-script-updateprocessor.xml b/solr/contrib/scripting/src/test-files/solr/collection1/conf/solrconfig-script-updateprocessor.xml
deleted file mode 100644
index afa5a7c..0000000
--- a/solr/contrib/scripting/src/test-files/solr/collection1/conf/solrconfig-script-updateprocessor.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" ?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!--
-   Test Config for ScriptUpdateProcessorFactory
-
-  -->
-<config>
-  <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
-  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
-  <requestHandler name="/select" class="solr.SearchHandler"></requestHandler>
-  <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
-  <schemaFactory class="ClassicIndexSchemaFactory"/>
-
-  <updateRequestProcessorChain name="force-script-engine" default="true">
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
-      <str name="engine">javascript</str>
-      <str name="script">missleading.extension.updateprocessor.js.txt</str>
-    </processor>
-    <processor class="solr.RunUpdateProcessorFactory" />
-  </updateRequestProcessorChain>
-
-  <updateRequestProcessorChain name="run-no-scripts">
-    <!-- for bypassing all scripts -->
-    <processor class="solr.RunUpdateProcessorFactory" />
-  </updateRequestProcessorChain>
-
-  <updateRequestProcessorChain name="single-script">
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
-      <str name="script">trivial.updateprocessor0.js</str>
-      <lst name="params">
-        <bool name="boolValue">true</bool>
-        <int name="intValue">1</int>
-      </lst>
-    </processor>
-    <processor class="solr.RunUpdateProcessorFactory" />
-  </updateRequestProcessorChain>
-
-  <updateRequestProcessorChain name="dual-scripts-arr">
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
-      <arr name="script">
-        <str>trivial.updateprocessor0.js</str>
-        <str>trivial.updateprocessor1.js</str>
-      </arr>
-      <lst name="params">
-        <bool name="boolValue">true</bool>
-        <int name="intValue">1</int>
-      </lst>
-    </processor>
-    <processor class="solr.RunUpdateProcessorFactory" />
-  </updateRequestProcessorChain>
-
-  <updateRequestProcessorChain name="dual-scripts-strs">
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
-      <str name="script">trivial.updateprocessor0.js</str>
-      <str name="script">trivial.updateprocessor1.js</str>
-      <lst name="params">
-        <bool name="boolValue">true</bool>
-        <int name="intValue">1</int>
-      </lst>
-    </processor>
-    <processor class="solr.RunUpdateProcessorFactory" />
-  </updateRequestProcessorChain>
-
-  <updateRequestProcessorChain name="conditional-scripts">
-    <!-- multiple scripts,
-         test that the first one can conditionally stop execution -->
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
-      <str name="script">conditional.updateprocessor.js</str>
-      <str name="script">addfields.updateprocessor.js</str>
-    </processor>
-  </updateRequestProcessorChain>
-
-  <updateRequestProcessorChain name="conditional-script">
-    <!-- single script, followed by another processor
-         (that happens to be a script).
-         test that the first one can conditionally stop execution -->
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
-      <str name="script">conditional.updateprocessor.js</str>
-    </processor>
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
-      <str name="script">addfields.updateprocessor.js</str>
-    </processor>
-  </updateRequestProcessorChain>
-
-  <updateRequestProcessorChain name="error-on-add">
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
-      <str name="script">throw.error.on.add.updateprocessor.js</str>
-    </processor>
-  </updateRequestProcessorChain>
-
-  <updateRequestProcessorChain name="missing-functions">
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
-      <str name="script">missing.functions.updateprocessor.js</str>
-    </processor>
-  </updateRequestProcessorChain>
-
-  <updateRequestProcessorChain name="javascript-compatibility">
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
-      <str name="script">cross-compatible.js</str>
-    </processor>
-  </updateRequestProcessorChain>
-
-  <updateRequestProcessorChain name="evil">
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
-      <str name="script">evil.js</str>
-    </processor>
-  </updateRequestProcessorChain>
-
-</config>
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml b/solr/contrib/scripting/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
deleted file mode 100644
index de5c714..0000000
--- a/solr/contrib/scripting/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
+++ /dev/null
@@ -1,51 +0,0 @@
-<?xml version="1.0" ?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<!-- 
-
-A solrconfig.xml snippet containing indexConfig settings for randomized testing.
-
--->
-<indexConfig>
-  <!-- this sys property is not set by SolrTestCaseJ4 because we ideally want to use
-       the RandomMergePolicy in all tests - but some tests expect very specific
-       Merge behavior, so those tests can set it as needed.
-  -->
-  <mergePolicyFactory class="${solr.tests.mergePolicyFactory:org.apache.solr.util.RandomMergePolicyFactory}" />
-  
-  <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
-
-  <maxBufferedDocs>${solr.tests.maxBufferedDocs}</maxBufferedDocs>
-  <ramBufferSizeMB>${solr.tests.ramBufferSizeMB}</ramBufferSizeMB>
-  <maxCommitMergeWaitTime>${solr.tests.maxCommitMergeWaitTime:-1}</maxCommitMergeWaitTime>
-  <ramPerThreadHardLimitMB>${solr.tests.ramPerThreadHardLimitMB}</ramPerThreadHardLimitMB>
-
-  <mergeScheduler class="${solr.tests.mergeScheduler}" />
-
-  <writeLockTimeout>1000</writeLockTimeout>
-  <commitLockTimeout>10000</commitLockTimeout>
-
-  <!-- this sys property is not set by SolrTestCaseJ4 because almost all tests should
-       use the single process lockType for speed - but tests that explicitly need
-       to vary the lockType canset it as needed.
-  -->
-  <lockType>${solr.tests.lockType:single}</lockType>
-
-  <infoStream>${solr.tests.infostream:false}</infoStream>
-
-</indexConfig>
diff --git a/solr/contrib/scripting/src/test/org/apache/solr/scripting/update/TestBadScriptingUpdateProcessorConfig.java b/solr/contrib/scripting/src/test/org/apache/solr/scripting/update/TestBadScriptingUpdateProcessorConfig.java
deleted file mode 100644
index 9d64e8d..0000000
--- a/solr/contrib/scripting/src/test/org/apache/solr/scripting/update/TestBadScriptingUpdateProcessorConfig.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.scripting.update;
-
-import javax.script.ScriptEngineManager;
-
-import org.apache.solr.core.AbstractBadConfigTestBase;
-import org.junit.Assume;
-
-public class TestBadScriptingUpdateProcessorConfig extends AbstractBadConfigTestBase {
-
-
-  public void testBogusScriptEngine() throws Exception {
-    // sanity check
-    Assume.assumeTrue(null == (new ScriptEngineManager()).getEngineByName("giberish"));
-
-    assertConfigs("bad-solrconfig-bogus-scriptengine-name.xml",
-                  "schema.xml","giberish");
-  }
-
-  public void testMissingScriptFile() throws Exception {
-    // sanity check
-    Assume.assumeNotNull((new ScriptEngineManager()).getEngineByExtension("js"));
-    assertConfigs("bad-solrconfig-missing-scriptfile.xml",
-                  "schema.xml","a-file-name-that-does-not-exist.js");
-  }
-
-  public void testInvalidScriptFile() throws Exception {
-    // sanity check
-    Assume.assumeNotNull((new ScriptEngineManager()).getEngineByName("javascript"));
-    assertConfigs("bad-solrconfig-invalid-scriptfile.xml",
-                  "schema.xml","invalid.script.xml");
-  }
-
-}
diff --git a/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java b/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
index 448f455..01592ff 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java
@@ -29,7 +29,6 @@
 import java.util.Set;
 
 import org.apache.commons.lang3.StringUtils;
-import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
 import org.apache.solr.cloud.overseer.CollectionMutator;
 import org.apache.solr.cloud.overseer.SliceMutator;
@@ -40,6 +39,7 @@
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionAdminParams;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -74,8 +74,8 @@
   ExclusiveSliceProperty(ClusterState clusterState, ZkNodeProps message) {
     this.clusterState = clusterState;
     String tmp = message.getStr(ZkStateReader.PROPERTY_PROP);
-    if (StringUtils.startsWith(tmp, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) {
-      tmp = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + tmp;
+    if (!StringUtils.startsWith(tmp, CollectionAdminParams.PROPERTY_PREFIX)) {
+      tmp = CollectionAdminParams.PROPERTY_PREFIX + tmp;
     }
     this.property = tmp.toLowerCase(Locale.ROOT);
     collectionName = message.getStr(ZkStateReader.COLLECTION_PROP);
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
index 52d35c7..82bfe69 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/AddReplicaCmd.java
@@ -45,7 +45,6 @@
 import org.apache.solr.cloud.ActiveReplicaWatcher;
 import org.apache.solr.cloud.Overseer;
 import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ShardRequestTracker;
-import org.apache.solr.cluster.placement.PlacementPlugin;
 import org.apache.solr.common.SolrCloseableLatch;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
@@ -58,6 +57,7 @@
 import org.apache.solr.common.params.*;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.Utils;
+import org.apache.solr.core.CoreContainer;
 import org.apache.solr.handler.component.ShardHandler;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -138,7 +138,7 @@
     }
 
     List<CreateReplica> createReplicas = buildReplicaPositions(ocmh.cloudManager, clusterState, collectionName, message, replicaTypesVsCount,
-        ocmh.overseer.getCoreContainer().getPlacementPluginFactory().createPluginInstance())
+        ocmh.overseer.getCoreContainer())
           .stream()
           .map(replicaPosition -> assignReplicaDetails(ocmh.cloudManager, clusterState, message, replicaPosition))
           .collect(Collectors.toList());
@@ -299,7 +299,7 @@
   public static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
                                                             String collectionName, ZkNodeProps message,
                                                             EnumMap<Replica.Type, Integer> replicaTypeVsCount,
-                                                            PlacementPlugin placementPlugin) throws IOException, InterruptedException {
+                                                            CoreContainer coreContainer) throws IOException, InterruptedException {
     boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false);
     boolean skipNodeAssignment = message.getBool(CollectionAdminParams.SKIP_NODE_ASSIGNMENT, false);
     String sliceName = message.getStr(SHARD_ID_PROP);
@@ -323,7 +323,7 @@
     if (!skipCreateReplicaInClusterState && !skipNodeAssignment) {
 
       positions = Assign.getNodesForNewReplicas(clusterState, collection.getName(), sliceName, numNrtReplicas,
-                    numTlogReplicas, numPullReplicas, createNodeSetStr, cloudManager, placementPlugin);
+                    numTlogReplicas, numPullReplicas, createNodeSetStr, cloudManager, coreContainer);
     }
 
     if (positions == null)  {
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
index 01d4ec1..ea9ca6e 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
@@ -51,6 +51,7 @@
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.core.CoreContainer;
 import org.apache.solr.util.NumberUtils;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
@@ -270,7 +271,7 @@
   public static List<ReplicaPosition> getNodesForNewReplicas(ClusterState clusterState, String collectionName,
                                                           String shard, int nrtReplicas, int tlogReplicas, int pullReplicas,
                                                           Object createNodeSet, SolrCloudManager cloudManager,
-                                                          PlacementPlugin placementPlugin) throws IOException, InterruptedException, AssignmentException {
+                                                          CoreContainer coreContainer) throws IOException, InterruptedException, AssignmentException {
     log.debug("getNodesForNewReplicas() shard: {} , nrtReplicas : {} , tlogReplicas: {} , pullReplicas: {} , createNodeSet {}"
         , shard, nrtReplicas, tlogReplicas, pullReplicas, createNodeSet);
     DocCollection coll = clusterState.getCollection(collectionName);
@@ -296,7 +297,7 @@
         .assignPullReplicas(pullReplicas)
         .onNodes(createNodeList)
         .build();
-    AssignStrategy assignStrategy = createAssignStrategy(placementPlugin, clusterState, coll);
+    AssignStrategy assignStrategy = createAssignStrategy(coreContainer, clusterState, coll);
     return assignStrategy.assign(cloudManager, assignRequest);
   }
 
@@ -379,9 +380,46 @@
     }
   }
 
+  /**
+   * Strategy for assigning replicas to nodes.
+   */
   public interface AssignStrategy {
+
+    /**
+     * Assign new replicas to nodes.
+     * @param solrCloudManager current instance of {@link SolrCloudManager}.
+     * @param assignRequest assign request.
+     * @return list of {@link ReplicaPosition}-s for new replicas.
+     * @throws AssignmentException when assignment request cannot produce any valid assignments.
+     */
     List<ReplicaPosition> assign(SolrCloudManager solrCloudManager, AssignRequest assignRequest)
-        throws Assign.AssignmentException, IOException, InterruptedException;
+        throws AssignmentException, IOException, InterruptedException;
+
+    /**
+     * Verify that deleting a collection doesn't violate the replica assignment constraints.
+     * @param solrCloudManager current instance of {@link SolrCloudManager}.
+     * @param collection collection to delete.
+     * @throws AssignmentException when deleting the collection would violate replica assignment constraints.
+     * @throws IOException on general errors.
+     */
+    default void verifyDeleteCollection(SolrCloudManager solrCloudManager, DocCollection collection)
+        throws AssignmentException, IOException, InterruptedException {
+
+    }
+
+    /**
+     * Verify that deleting these replicas doesn't violate the replica assignment constraints.
+     * @param solrCloudManager current instance of {@link SolrCloudManager}.
+     * @param collection collection to delete replicas from.
+     * @param shardName shard name.
+     * @param replicas replicas to delete.
+     * @throws AssignmentException when deleting the replicas would violate replica assignment constraints.
+     * @throws IOException on general errors.
+     */
+    default void verifyDeleteReplicas(SolrCloudManager solrCloudManager, DocCollection collection, String shardName, Set<Replica> replicas)
+        throws AssignmentException, IOException, InterruptedException {
+
+    }
   }
 
   public static class AssignRequest {
@@ -495,7 +533,8 @@
    * <p>If {@link PlacementPlugin} instance is null this call will return {@link LegacyAssignStrategy}, otherwise
    * {@link PlacementPluginAssignStrategy} will be used.</p>
    */
-  public static AssignStrategy createAssignStrategy(PlacementPlugin placementPlugin, ClusterState clusterState, DocCollection collection) {
+  public static AssignStrategy createAssignStrategy(CoreContainer coreContainer, ClusterState clusterState, DocCollection collection) {
+    PlacementPlugin placementPlugin = coreContainer.getPlacementPluginFactory().createPluginInstance();
     if (placementPlugin != null) {
       // If a cluster wide placement plugin is configured (and that's the only way to define a placement plugin)
       return new PlacementPluginAssignStrategy(collection, placementPlugin);
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
index 2e2a06c..d9de9a5 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
@@ -41,7 +41,6 @@
 import org.apache.solr.cloud.ZkController;
 import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ShardRequestTracker;
 import org.apache.solr.cloud.overseer.ClusterStateMutator;
-import org.apache.solr.cluster.placement.PlacementPlugin;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
 import org.apache.solr.common.cloud.Aliases;
@@ -63,6 +62,7 @@
 import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.common.util.TimeSource;
 import org.apache.solr.common.util.Utils;
+import org.apache.solr.core.CoreContainer;
 import org.apache.solr.handler.component.ShardHandler;
 import org.apache.solr.handler.component.ShardRequest;
 import org.apache.solr.util.TimeOut;
@@ -169,8 +169,8 @@
 
       List<ReplicaPosition> replicaPositions = null;
       try {
-        replicaPositions = buildReplicaPositions(ocmh.cloudManager, clusterState, clusterState.getCollection(collectionName),
-            message, shardNames, ocmh.overseer.getCoreContainer().getPlacementPluginFactory().createPluginInstance());
+        replicaPositions = buildReplicaPositions(ocmh.overseer.getCoreContainer(), ocmh.cloudManager, clusterState, clusterState.getCollection(collectionName),
+            message, shardNames);
       } catch (Assign.AssignmentException e) {
         ZkNodeProps deleteMessage = new ZkNodeProps("name", collectionName);
         new DeleteCollectionCmd(ocmh).call(clusterState, deleteMessage, results);
@@ -288,10 +288,10 @@
     }
   }
 
-  private static List<ReplicaPosition> buildReplicaPositions(SolrCloudManager cloudManager, ClusterState clusterState,
+  private static List<ReplicaPosition> buildReplicaPositions(CoreContainer coreContainer, SolrCloudManager cloudManager, ClusterState clusterState,
                                                              DocCollection docCollection,
                                                              ZkNodeProps message,
-                                                             List<String> shardNames, PlacementPlugin placementPlugin) throws IOException, InterruptedException, Assign.AssignmentException {
+                                                             List<String> shardNames) throws IOException, InterruptedException, Assign.AssignmentException {
     final String collectionName = message.getStr(NAME);
     // look at the replication factor and see if it matches reality
     // if it does not, find best nodes to create more cores
@@ -330,7 +330,7 @@
           .assignPullReplicas(numPullReplicas)
           .onNodes(nodeList)
           .build();
-      Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(placementPlugin, clusterState, docCollection);
+      Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(coreContainer, clusterState, docCollection);
       replicaPositions = assignStrategy.assign(cloudManager, assignRequest);
     }
     return replicaPositions;
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
index d9b6679..8c0a1e4 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteCollectionCmd.java
@@ -92,6 +92,13 @@
       collection = extCollection;
     }
 
+    // verify the placement modifications caused by the deletion are allowed
+    DocCollection coll = state.getCollectionOrNull(collection);
+    if (coll != null) {
+      Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(ocmh.overseer.getCoreContainer(), state, coll);
+      assignStrategy.verifyDeleteCollection(ocmh.cloudManager, coll);
+    }
+
     final boolean deleteHistory = message.getBool(CoreAdminParams.DELETE_METRICS_HISTORY, true);
 
     boolean removeCounterNode = true;
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java
index 19865d3..c69675b 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteNodeCmd.java
@@ -18,6 +18,7 @@
 package org.apache.solr.cloud.api.collections;
 
 
+import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
 import java.util.List;
@@ -98,7 +99,7 @@
                               List<ZkNodeProps> sourceReplicas,
                               OverseerCollectionMessageHandler ocmh,
                               String node,
-                              String async) throws InterruptedException {
+                              String async) throws IOException, InterruptedException {
     CountDownLatch cleanupLatch = new CountDownLatch(sourceReplicas.size());
     for (ZkNodeProps sourceReplica : sourceReplicas) {
       String coll = sourceReplica.getStr(COLLECTION_PROP);
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
index 4d7975d..4c5f757 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/DeleteReplicaCmd.java
@@ -23,6 +23,7 @@
 import static org.apache.solr.common.params.CollectionAdminParams.FOLLOW_ALIASES;
 import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
 
+import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -31,6 +32,7 @@
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Callable;
+import java.util.stream.Collectors;
 
 import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.Cmd;
 import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ShardRequestTracker;
@@ -69,7 +71,7 @@
 
   @SuppressWarnings("unchecked")
   void deleteReplica(ClusterState clusterState, ZkNodeProps message, @SuppressWarnings({"rawtypes"})NamedList results, Runnable onComplete)
-          throws KeeperException, InterruptedException {
+          throws KeeperException, IOException, InterruptedException {
     if (log.isDebugEnabled()) {
       log.debug("deleteReplica() : {}", Utils.toJSONString(message));
     }
@@ -101,9 +103,7 @@
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
               "Invalid shard name : " +  shard + " in collection : " +  collectionName);
     }
-
-    deleteCore(slice, collectionName, replicaName, message, shard, results, onComplete,  parallel);
-
+    deleteCore(clusterState, coll, shard, replicaName, message, results, onComplete, parallel, true);
   }
 
 
@@ -117,7 +117,7 @@
                                  @SuppressWarnings({"rawtypes"})NamedList results,
                                  Runnable onComplete,
                                  boolean parallel)
-          throws KeeperException, InterruptedException {
+          throws KeeperException, IOException, InterruptedException {
     ocmh.checkRequired(message, COLLECTION_PROP, COUNT_PROP);
     int count = Integer.parseInt(message.getStr(COUNT_PROP));
     String collectionName = message.getStr(COLLECTION_PROP);
@@ -147,6 +147,17 @@
       }
     }
 
+    // verify that all replicas can be deleted
+    Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(ocmh.overseer.getCoreContainer(), clusterState, coll);
+    for (Map.Entry<Slice, Set<String>> entry : shardToReplicasMapping.entrySet()) {
+      Slice shardSlice = entry.getKey();
+      String shardId = shardSlice.getName();
+      Set<String> replicaNames = entry.getValue();
+      Set<Replica> replicas = replicaNames.stream()
+          .map(name -> shardSlice.getReplica(name)).collect(Collectors.toSet());
+      assignStrategy.verifyDeleteReplicas(ocmh.cloudManager, coll, shardId, replicas);
+    }
+
     for (Map.Entry<Slice, Set<String>> entry : shardToReplicasMapping.entrySet()) {
       Slice shardSlice = entry.getKey();
       String shardId = shardSlice.getName();
@@ -154,7 +165,8 @@
       //callDeleteReplica on all replicas
       for (String replica: replicas) {
         log.debug("Deleting replica {}  for shard {} based on count {}", replica, shardId, count);
-        deleteCore(shardSlice, collectionName, replica, message, shard, results, onComplete, parallel);
+        // don't verify with the placement plugin - we already did it
+        deleteCore(clusterState, coll, shardId, replica, message, results, onComplete, parallel, false);
       }
       results.add("shard_id", shardId);
       results.add("replicas_deleted", replicas);
@@ -212,25 +224,39 @@
   }
 
   @SuppressWarnings({"unchecked"})
-  void deleteCore(Slice slice, String collectionName, String replicaName,ZkNodeProps message, String shard, @SuppressWarnings({"rawtypes"})NamedList results, Runnable onComplete, boolean parallel) throws KeeperException, InterruptedException {
+  void deleteCore(ClusterState clusterState, DocCollection coll,
+                  String shardId,
+                  String replicaName,
+                  ZkNodeProps message,
+                  @SuppressWarnings({"rawtypes"})NamedList results,
+                  Runnable onComplete,
+                  boolean parallel,
+                  boolean verifyPlacement) throws KeeperException, IOException, InterruptedException {
 
+    Slice slice = coll.getSlice(shardId);
     Replica replica = slice.getReplica(replicaName);
     if (replica == null) {
       ArrayList<String> l = new ArrayList<>();
       for (Replica r : slice.getReplicas())
         l.add(r.getName());
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid replica : " +  replicaName + " in shard/collection : " +
-              shard  + "/" + collectionName + " available replicas are " +  StrUtils.join(l, ','));
+              shardId  + "/" + coll.getName() + " available replicas are " +  StrUtils.join(l, ','));
     }
 
     // If users are being safe and only want to remove a shard if it is down, they can specify onlyIfDown=true
     // on the command.
     if (Boolean.parseBoolean(message.getStr(OverseerCollectionMessageHandler.ONLY_IF_DOWN)) && replica.getState() != Replica.State.DOWN) {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-              "Attempted to remove replica : " + collectionName + "/"  + shard + "/" + replicaName +
+              "Attempted to remove replica : " + coll.getName() + "/"  + shardId + "/" + replicaName +
               " with onlyIfDown='true', but state is '" + replica.getStr(ZkStateReader.STATE_PROP) + "'");
     }
 
+    // verify that we are allowed to delete this replica
+    if (verifyPlacement) {
+      Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(ocmh.overseer.getCoreContainer(), clusterState, coll);
+      assignStrategy.verifyDeleteReplicas(ocmh.cloudManager, coll, shardId, Set.of(replica));
+    }
+
     ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
     String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
     String asyncId = message.getStr(ASYNC);
@@ -256,12 +282,12 @@
           shardRequestTracker.processResponses(results, shardHandler, false, null);
 
           //check if the core unload removed the corenode zk entry
-          if (ocmh.waitForCoreNodeGone(collectionName, shard, replicaName, 30000)) return Boolean.TRUE;
+          if (ocmh.waitForCoreNodeGone(coll.getName(), shardId, replicaName, 30000)) return Boolean.TRUE;
         }
 
         // try and ensure core info is removed from cluster state
-        ocmh.deleteCoreNode(collectionName, replicaName, replica, core);
-        if (ocmh.waitForCoreNodeGone(collectionName, shard, replicaName, 30000)) return Boolean.TRUE;
+        ocmh.deleteCoreNode(coll.getName(), replicaName, replica, core);
+        if (ocmh.waitForCoreNodeGone(coll.getName(), shardId, replicaName, 30000)) return Boolean.TRUE;
         return Boolean.FALSE;
       } catch (Exception e) {
         results.add("failure", "Could not complete delete " + e.getMessage());
@@ -275,7 +301,7 @@
       try {
         if (!callable.call())
           throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                  "Could not remove replica : " + collectionName + "/" + shard + "/" + replicaName);
+                  "Could not remove replica : " + coll.getName() + "/" + shardId + "/" + replicaName);
       } catch (InterruptedException | KeeperException e) {
         throw e;
       } catch (Exception ex) {
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java
index 20ec262..919b7b9 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/MigrateCmd.java
@@ -304,7 +304,7 @@
     props.put(CoreAdminParams.NAME, tempCollectionReplica2);
     // copy over property params:
     for (String key : message.keySet()) {
-      if (key.startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
+      if (key.startsWith(CollectionAdminParams.PROPERTY_PREFIX)) {
         props.put(key, message.getStr(key));
       }
     }
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
index 8c55fd3..5949055 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/OverseerCollectionMessageHandler.java
@@ -127,8 +127,6 @@
 
   public static final String REQUESTID = "requestid";
 
-  public static final String COLL_PROP_PREFIX = "property.";
-
   public static final String ONLY_IF_DOWN = "onlyIfDown";
 
   public static final String SHARD_UNIQUE = "shardUnique";
@@ -561,7 +559,7 @@
   void addPropertyParams(ZkNodeProps message, ModifiableSolrParams params) {
     // Now add the property.key=value pairs
     for (String key : message.keySet()) {
-      if (key.startsWith(COLL_PROP_PREFIX)) {
+      if (key.startsWith(CollectionAdminParams.PROPERTY_PREFIX)) {
         params.set(key, message.getStr(key));
       }
     }
@@ -570,7 +568,7 @@
   void addPropertyParams(ZkNodeProps message, Map<String, Object> map) {
     // Now add the property.key=value pairs
     for (String key : message.keySet()) {
-      if (key.startsWith(COLL_PROP_PREFIX)) {
+      if (key.startsWith(CollectionAdminParams.PROPERTY_PREFIX)) {
         map.put(key, message.getStr(key));
       }
     }
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
index 271677f..7140946 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
@@ -121,7 +121,7 @@
               .onNodes(new ArrayList<>(ocmh.cloudManager.getClusterStateProvider().getLiveNodes()))
               .build();
           Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(
-              ocmh.overseer.getCoreContainer().getPlacementPluginFactory().createPluginInstance(),
+              ocmh.overseer.getCoreContainer(),
               clusterState, clusterState.getCollection(sourceCollection));
           targetNode = assignStrategy.assign(ocmh.cloudManager, assignRequest).get(0).node;
         }
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
index 0b6dffc..f6c43b7 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
@@ -230,7 +230,7 @@
             .onNodes(nodeList)
             .build();
     Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(
-        ocmh.overseer.getCoreContainer().getPlacementPluginFactory().createPluginInstance(),
+        ocmh.overseer.getCoreContainer(),
         clusterState, restoreCollection);
     List<ReplicaPosition> replicaPositions = assignStrategy.assign(ocmh.cloudManager, assignRequest);
 
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
index d4d0632..7609123 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
@@ -30,6 +30,7 @@
 import org.apache.solr.common.SolrException.ErrorCode;
 import org.apache.solr.common.cloud.*;
 import org.apache.solr.common.cloud.rule.ImplicitSnitch;
+import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.params.CommonAdminParams;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.CoreAdminParams;
@@ -304,7 +305,7 @@
         propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
         // copy over property params:
         for (String key : message.keySet()) {
-          if (key.startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
+          if (key.startsWith(CollectionAdminParams.PROPERTY_PREFIX)) {
             propMap.put(key, message.getStr(key));
           }
         }
@@ -435,7 +436,7 @@
           .onNodes(new ArrayList<>(clusterState.getLiveNodes()))
           .build();
       Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(
-          ocmh.overseer.getCoreContainer().getPlacementPluginFactory().createPluginInstance(),
+          ocmh.overseer.getCoreContainer(),
           clusterState, collection);
       List<ReplicaPosition> replicaPositions = assignStrategy.assign(ocmh.cloudManager, assignRequest);
       t.stop();
@@ -472,7 +473,7 @@
         propMap.put(CoreAdminParams.NAME, solrCoreName);
         // copy over property params:
         for (String key : message.keySet()) {
-          if (key.startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
+          if (key.startsWith(CollectionAdminParams.PROPERTY_PREFIX)) {
             propMap.put(key, message.getStr(key));
           }
         }
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
index 78857af..c1cbddb 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java
@@ -46,6 +46,7 @@
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionAdminParams;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.util.TestInjection;
 import org.slf4j.Logger;
@@ -121,8 +122,8 @@
     String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
     String replicaName = message.getStr(ZkStateReader.REPLICA_PROP);
     String property = message.getStr(ZkStateReader.PROPERTY_PROP).toLowerCase(Locale.ROOT);
-    if (StringUtils.startsWith(property, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) {
-      property = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + property;
+    if (!StringUtils.startsWith(property, CollectionAdminParams.PROPERTY_PREFIX)) {
+      property = CollectionAdminParams.PROPERTY_PREFIX + property;
     }
     property = property.toLowerCase(Locale.ROOT);
     String propVal = message.getStr(ZkStateReader.PROPERTY_VALUE_PROP);
@@ -186,8 +187,8 @@
     String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
     String replicaName = message.getStr(ZkStateReader.REPLICA_PROP);
     String property = message.getStr(ZkStateReader.PROPERTY_PROP).toLowerCase(Locale.ROOT);
-    if (StringUtils.startsWith(property, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) {
-      property = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + property;
+    if (StringUtils.startsWith(property, CollectionAdminParams.PROPERTY_PREFIX) == false) {
+      property = CollectionAdminParams.PROPERTY_PREFIX + property;
     }
 
     DocCollection collection = clusterState.getCollection(collectionName);
@@ -319,7 +320,7 @@
         replicaProps.put(ZkStateReader.REPLICA_TYPE, oldReplica.getType().toString());
         // Move custom props over.
         for (Map.Entry<String, Object> ent : oldReplica.getProperties().entrySet()) {
-          if (ent.getKey().startsWith(OverseerCollectionMessageHandler.COLL_PROP_PREFIX)) {
+          if (ent.getKey().startsWith(CollectionAdminParams.PROPERTY_PREFIX)) {
             replicaProps.put(ent.getKey(), ent.getValue());
           }
         }
diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
index b5bf512..f8ecd69 100644
--- a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
+++ b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java
@@ -28,7 +28,6 @@
 import org.apache.solr.client.solrj.impl.SolrClientCloudManager;
 import org.apache.solr.cloud.Overseer;
 import org.apache.solr.cloud.api.collections.Assign;
-import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.PerReplicaStatesOps;
@@ -40,6 +39,7 @@
 import org.apache.solr.common.cloud.ZkCoreNodeProps;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionAdminParams;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -49,7 +49,7 @@
 public class SliceMutator {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  public static final String PREFERRED_LEADER_PROP = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + "preferredleader";
+  public static final String PREFERRED_LEADER_PROP = CollectionAdminParams.PROPERTY_PREFIX + "preferredleader";
 
   public static final Set<String> SLICE_UNIQUE_BOOLEAN_PROPERTIES = ImmutableSet.of(PREFERRED_LEADER_PROP);
 
diff --git a/solr/core/src/java/org/apache/solr/cluster/events/impl/CollectionsRepairEventListener.java b/solr/core/src/java/org/apache/solr/cluster/events/impl/CollectionsRepairEventListener.java
index 8984d1d..be4533b 100644
--- a/solr/core/src/java/org/apache/solr/cluster/events/impl/CollectionsRepairEventListener.java
+++ b/solr/core/src/java/org/apache/solr/cluster/events/impl/CollectionsRepairEventListener.java
@@ -41,8 +41,6 @@
 import org.apache.solr.cluster.events.ClusterEvent;
 import org.apache.solr.cluster.events.ClusterEventListener;
 import org.apache.solr.cluster.events.NodesDownEvent;
-import org.apache.solr.cluster.placement.PlacementPluginConfig;
-import org.apache.solr.cluster.placement.PlacementPluginFactory;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.ReplicaPosition;
@@ -72,18 +70,18 @@
 
   private final SolrClient solrClient;
   private final SolrCloudManager solrCloudManager;
+  private final CoreContainer cc;
 
   private State state = State.STOPPED;
 
   private int waitForSecond = DEFAULT_WAIT_FOR_SEC;
 
   private ScheduledThreadPoolExecutor waitForExecutor;
-  private final PlacementPluginFactory<? extends PlacementPluginConfig> placementPluginFactory;
 
   public CollectionsRepairEventListener(CoreContainer cc) {
+    this.cc = cc;
     this.solrClient = cc.getSolrClientCache().getCloudSolrClient(cc.getZkController().getZkClient().getZkServerAddress());
     this.solrCloudManager = cc.getZkController().getSolrCloudManager();
-    this.placementPluginFactory = cc.getPlacementPluginFactory();
   }
 
   @VisibleForTesting
@@ -169,7 +167,7 @@
                 .incrementAndGet();
           }
         });
-        Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(placementPluginFactory.createPluginInstance(), clusterState, coll);
+        Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(cc, clusterState, coll);
         lostReplicas.forEach((shard, types) -> {
           Assign.AssignRequestBuilder assignRequestBuilder = new Assign.AssignRequestBuilder()
               .forCollection(coll.getName())
diff --git a/solr/contrib/scripting/src/java/org/apache/solr/scripting/update/package-info.java b/solr/core/src/java/org/apache/solr/cluster/placement/DeleteCollectionRequest.java
similarity index 84%
rename from solr/contrib/scripting/src/java/org/apache/solr/scripting/update/package-info.java
rename to solr/core/src/java/org/apache/solr/cluster/placement/DeleteCollectionRequest.java
index 5bf1066..fdd3f8b 100644
--- a/solr/contrib/scripting/src/java/org/apache/solr/scripting/update/package-info.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/DeleteCollectionRequest.java
@@ -14,8 +14,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+package org.apache.solr.cluster.placement;
 
 /**
- * Support for scripting during document updates.
+ * Delete collection request.
  */
-package org.apache.solr.scripting.update;
+public interface DeleteCollectionRequest extends ModificationRequest {
+}
diff --git a/solr/contrib/scripting/build.gradle b/solr/core/src/java/org/apache/solr/cluster/placement/DeleteReplicasRequest.java
similarity index 76%
rename from solr/contrib/scripting/build.gradle
rename to solr/core/src/java/org/apache/solr/cluster/placement/DeleteReplicasRequest.java
index 32f4e1e..3ce755d 100644
--- a/solr/contrib/scripting/build.gradle
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/DeleteReplicasRequest.java
@@ -15,11 +15,15 @@
  * limitations under the License.
  */
 
-apply plugin: 'java-library'
+package org.apache.solr.cluster.placement;
 
-description = 'Scripting Package'
+import org.apache.solr.cluster.Replica;
 
-dependencies {
-  implementation project(':solr:core')
-  testImplementation project(':solr:test-framework')
+import java.util.Set;
+
+/**
+ * Delete replicas request.
+ */
+public interface DeleteReplicasRequest extends ModificationRequest {
+  Set<Replica> getReplicas();
 }
diff --git a/solr/contrib/scripting/src/java/org/apache/solr/scripting/update/package-info.java b/solr/core/src/java/org/apache/solr/cluster/placement/DeleteShardsRequest.java
similarity index 80%
copy from solr/contrib/scripting/src/java/org/apache/solr/scripting/update/package-info.java
copy to solr/core/src/java/org/apache/solr/cluster/placement/DeleteShardsRequest.java
index 5bf1066..3638223 100644
--- a/solr/contrib/scripting/src/java/org/apache/solr/scripting/update/package-info.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/DeleteShardsRequest.java
@@ -15,7 +15,13 @@
  * limitations under the License.
  */
 
+package org.apache.solr.cluster.placement;
+
+import java.util.Set;
+
 /**
- * Support for scripting during document updates.
+ * Delete shards request.
  */
-package org.apache.solr.scripting.update;
+public interface DeleteShardsRequest extends ModificationRequest {
+  Set<String> getShardNames();
+}
diff --git a/solr/contrib/scripting/src/java/org/apache/solr/scripting/update/package-info.java b/solr/core/src/java/org/apache/solr/cluster/placement/ModificationRequest.java
similarity index 75%
copy from solr/contrib/scripting/src/java/org/apache/solr/scripting/update/package-info.java
copy to solr/core/src/java/org/apache/solr/cluster/placement/ModificationRequest.java
index 5bf1066..4aed2ba 100644
--- a/solr/contrib/scripting/src/java/org/apache/solr/scripting/update/package-info.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/ModificationRequest.java
@@ -15,7 +15,16 @@
  * limitations under the License.
  */
 
+package org.apache.solr.cluster.placement;
+
+import org.apache.solr.cluster.SolrCollection;
+
 /**
- * Support for scripting during document updates.
+ * Collection modification request.
  */
-package org.apache.solr.scripting.update;
+public interface ModificationRequest {
+  /**
+   * The {@link SolrCollection} to modify.
+   */
+  SolrCollection getCollection();
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/PlacementContext.java b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementContext.java
new file mode 100644
index 0000000..617f6f6
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementContext.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement;
+
+import org.apache.solr.cluster.Cluster;
+
+/**
+ * Placement context makes it easier to pass around and access main placement-related components.
+ */
+public interface PlacementContext {
+  /**
+   * Initial state of the cluster. Note there are {@link java.util.Set}'s and {@link java.util.Map}'s
+   * accessible from the {@link Cluster} and other reachable instances. These collection will not change
+   * while the plugin is executing and will be thrown away once the plugin is done. The plugin code can
+   * therefore modify them if needed.
+   */
+  Cluster getCluster();
+
+  /**
+   * Factory used by the plugin to fetch additional attributes from the cluster nodes, such as
+   * count of cores, system properties etc..
+   */
+  AttributeFetcher getAttributeFetcher();
+
+  /**
+   * Factory used to create instances of {@link PlacementPlan} to return computed decision.
+   */
+  PlacementPlanFactory getPlacementPlanFactory();
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/PlacementModificationException.java b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementModificationException.java
new file mode 100644
index 0000000..f1a5ac2
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementModificationException.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cluster.placement;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Exception thrown when a placement modification is rejected by the placement plugin.
+ * Additional details about the reasons are provided if available
+ * in {@link #getRejectedModifications()} or in the {@link #toString()} methods.
+ */
+public class PlacementModificationException extends PlacementException {
+  private final Map<String, String> rejectedModifications = new HashMap<>();
+
+  public PlacementModificationException() {
+    super();
+  }
+
+  public PlacementModificationException(String message) {
+    super(message);
+  }
+
+  public PlacementModificationException(String message, Throwable cause) {
+    super(message, cause);
+  }
+
+  public PlacementModificationException(Throwable cause) {
+    super(cause);
+  }
+
+  /**
+   * Add information about the modification that cause this exception.
+   * @param modification requested modification details
+   * @param reason reason for rejection
+   */
+  public void addRejectedModification(String modification, String reason) {
+    rejectedModifications.put(modification, reason);
+  }
+
+  /**
+   * Return rejected modifications and reasons for rejections.
+   */
+  public Map<String, String> getRejectedModifications() {
+    return rejectedModifications;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder(super.toString());
+    if (!rejectedModifications.isEmpty()) {
+        sb.append(": ")
+          .append(rejectedModifications.size())
+          .append(" rejections:");
+      rejectedModifications.forEach((modification, reason) ->
+          sb.append("\n")
+              .append(modification)
+              .append("\t")
+              .append(reason));
+
+    }
+    return sb.toString();
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPlugin.java b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPlugin.java
index bbb52cb..97d1957 100644
--- a/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPlugin.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPlugin.java
@@ -17,8 +17,6 @@
 
 package org.apache.solr.cluster.placement;
 
-import org.apache.solr.cluster.Cluster;
-
 /**
  * <p>Implemented by external plugins to control replica placement and movement on the search cluster (as well as other things
  * such as cluster elasticity?) when cluster changes are required (initiated elsewhere, most likely following a Collection
@@ -36,16 +34,21 @@
    *
    * <p>Configuration is passed upon creation of a new instance of this class by {@link PlacementPluginFactory#createPluginInstance}.
    *
-   * @param cluster              initial state of the cluster. Note there are {@link java.util.Set}'s and {@link java.util.Map}'s
-   *                             accessible from the {@link Cluster} and other reachable instances. These collection will not change
-   *                             while the plugin is executing and will be thrown away once the plugin is done. The plugin code can
-   *                             therefore modify them if needed.
    * @param placementRequest     request for placing new replicas or moving existing replicas on the cluster.
-   * @param attributeFetcher     Factory used by the plugin to fetch additional attributes from the cluster nodes, such as
-   *                             count of coresm ssytem properties etc..
-   * @param placementPlanFactory Factory used to create instances of {@link PlacementPlan} to return computed decision.
    * @return plan satisfying the placement request.
    */
-  PlacementPlan computePlacement(Cluster cluster, PlacementRequest placementRequest, AttributeFetcher attributeFetcher,
-                                 PlacementPlanFactory placementPlanFactory) throws PlacementException, InterruptedException;
+  PlacementPlan computePlacement(PlacementRequest placementRequest, PlacementContext placementContext) throws PlacementException, InterruptedException;
+
+  /**
+   * Verify that a collection layout modification doesn't violate constraints on replica placements
+   * required by this plugin. Default implementation is a no-op (any modifications are allowed).
+   * @param modificationRequest modification request.
+   * @param placementContext placement context.
+   * @throws PlacementModificationException if the requested modification would violate replica
+   * placement constraints.
+   */
+  default void verifyAllowedModification(ModificationRequest modificationRequest, PlacementContext placementContext)
+    throws PlacementModificationException, InterruptedException {
+
+  }
 }
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/PlacementRequest.java b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementRequest.java
index 44222a2..0ece962 100644
--- a/solr/core/src/java/org/apache/solr/cluster/placement/PlacementRequest.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementRequest.java
@@ -30,12 +30,7 @@
  * <p>The set of {@link Node}s on which the replicas should be placed
  * is specified (defaults to being equal to the set returned by {@link Cluster#getLiveNodes()}).
  */
-public interface PlacementRequest {
-  /**
-   * The {@link SolrCollection} to add {@link Replica}(s) to.
-   */
-  SolrCollection getCollection();
-
+public interface PlacementRequest extends ModificationRequest {
   /**
    * <p>Shard name(s) for which new replicas placement should be computed. The shard(s) might exist or not (that's why this
    * method returns a {@link Set} of {@link String}'s and not directly a set of {@link Shard} instances).
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/ModificationRequestImpl.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/ModificationRequestImpl.java
new file mode 100644
index 0000000..1bfdc3e
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/ModificationRequestImpl.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement.impl;
+
+import org.apache.solr.cluster.Replica;
+import org.apache.solr.cluster.Shard;
+import org.apache.solr.cluster.SolrCollection;
+import org.apache.solr.cluster.placement.DeleteCollectionRequest;
+import org.apache.solr.cluster.placement.DeleteReplicasRequest;
+import org.apache.solr.cluster.placement.DeleteShardsRequest;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Slice;
+
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * Helper class to create modification request instances.
+ */
+public class ModificationRequestImpl {
+
+  public static DeleteCollectionRequest createDeleteCollectionRequest(DocCollection docCollection) {
+    SolrCollection solrCollection = SimpleClusterAbstractionsImpl.SolrCollectionImpl.fromDocCollection(docCollection);
+    return () -> solrCollection;
+  }
+
+  /**
+   * Create a delete replicas request.
+   * @param collection collection to delete replicas from
+   * @param replicas replicas to delete
+   */
+  public static DeleteReplicasRequest createDeleteReplicasRequest(SolrCollection collection, Set<Replica> replicas) {
+    return new DeleteReplicasRequest() {
+      @Override
+      public Set<Replica> getReplicas() {
+        return replicas;
+      }
+
+      @Override
+      public SolrCollection getCollection() {
+        return collection;
+      }
+
+      @Override
+      public String toString() {
+        return "DeleteReplicasRequest{collection=" + collection.getName() +
+            ",replicas=" + replicas;
+      }
+    };
+  }
+
+  /**
+   * Create a delete replicas request using the internal Solr API.
+   * @param docCollection Solr collection.
+   * @param shardName shard name.
+   * @param replicas Solr replicas (belonging to the shard).
+   */
+  public static DeleteReplicasRequest createDeleteReplicasRequest(DocCollection docCollection, String shardName, Set<org.apache.solr.common.cloud.Replica> replicas) {
+    SolrCollection solrCollection = SimpleClusterAbstractionsImpl.SolrCollectionImpl.fromDocCollection(docCollection);
+    Shard shard = solrCollection.getShard(shardName);
+    Slice slice = docCollection.getSlice(shardName);
+    Set<Replica> solrReplicas = new HashSet<>();
+    replicas.forEach(replica -> {
+      solrReplicas.add(shard.getReplica(replica.getName()));
+    });
+    return createDeleteReplicasRequest(solrCollection, solrReplicas);
+  }
+
+
+  public static DeleteShardsRequest createDeleteShardsRequest(SolrCollection collection, Set<String> shardNames) {
+    return new DeleteShardsRequest() {
+      @Override
+      public Set<String> getShardNames() {
+        return shardNames;
+      }
+
+      @Override
+      public SolrCollection getCollection() {
+        return collection;
+      }
+
+      @Override
+      public String toString() {
+        return "DeleteShardsRequest{collection=" + collection.getName() +
+            ",shards=" + shardNames;
+      }
+    };
+  }
+
+  public static DeleteShardsRequest createDeleteShardsRequest(DocCollection docCollection, Set<String> shardNames) {
+    SolrCollection solrCollection = SimpleClusterAbstractionsImpl.SolrCollectionImpl.fromDocCollection(docCollection);
+    return createDeleteShardsRequest(solrCollection, shardNames);
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPluginAssignStrategy.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPluginAssignStrategy.java
index c4c5667..32abe84 100644
--- a/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPluginAssignStrategy.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPluginAssignStrategy.java
@@ -19,15 +19,19 @@
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
 import org.apache.solr.cloud.api.collections.Assign;
-import org.apache.solr.cluster.Cluster;
 import org.apache.solr.cluster.SolrCollection;
+import org.apache.solr.cluster.placement.DeleteCollectionRequest;
+import org.apache.solr.cluster.placement.DeleteReplicasRequest;
+import org.apache.solr.cluster.placement.PlacementContext;
 import org.apache.solr.cluster.placement.PlacementException;
 import org.apache.solr.cluster.placement.PlacementPlugin;
 import org.apache.solr.cluster.placement.PlacementPlan;
 import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.ReplicaPosition;
 
 /**
@@ -35,8 +39,6 @@
  */
 public class PlacementPluginAssignStrategy implements Assign.AssignStrategy {
 
-  private static final PlacementPlanFactoryImpl PLACEMENT_PLAN_FACTORY = new PlacementPlanFactoryImpl();
-
   private final PlacementPlugin plugin;
   private final DocCollection collection;
 
@@ -53,18 +55,40 @@
   public List<ReplicaPosition> assign(SolrCloudManager solrCloudManager, Assign.AssignRequest assignRequest)
       throws Assign.AssignmentException, IOException, InterruptedException {
 
-    Cluster cluster = new SimpleClusterAbstractionsImpl.ClusterImpl(solrCloudManager);
-    SolrCollection solrCollection = new SimpleClusterAbstractionsImpl.SolrCollectionImpl(collection);
+    PlacementContext placementContext = new SimplePlacementContextImpl(solrCloudManager);
+    SolrCollection solrCollection = placementContext.getCluster().getCollection(collection.getName());
 
-    PlacementRequestImpl placementRequest = PlacementRequestImpl.toPlacementRequest(cluster, solrCollection, assignRequest);
+    PlacementRequestImpl placementRequest = PlacementRequestImpl.toPlacementRequest(placementContext.getCluster(), solrCollection, assignRequest);
 
     final PlacementPlan placementPlan;
     try {
-      placementPlan = plugin.computePlacement(cluster, placementRequest, new AttributeFetcherImpl(solrCloudManager), PLACEMENT_PLAN_FACTORY);
+      placementPlan = plugin.computePlacement(placementRequest, placementContext);
     } catch (PlacementException pe) {
       throw new Assign.AssignmentException(pe);
     }
 
     return ReplicaPlacementImpl.toReplicaPositions(placementPlan.getReplicaPlacements());
   }
+
+  @Override
+  public void verifyDeleteCollection(SolrCloudManager solrCloudManager, DocCollection collection) throws Assign.AssignmentException, IOException, InterruptedException {
+    PlacementContext placementContext = new SimplePlacementContextImpl(solrCloudManager);
+    DeleteCollectionRequest modificationRequest = ModificationRequestImpl.createDeleteCollectionRequest(collection);
+    try {
+      plugin.verifyAllowedModification(modificationRequest, placementContext);
+    } catch (PlacementException pe) {
+      throw new Assign.AssignmentException(pe);
+    }
+  }
+
+  @Override
+  public void verifyDeleteReplicas(SolrCloudManager solrCloudManager, DocCollection collection, String shardId, Set<Replica> replicas) throws Assign.AssignmentException, IOException, InterruptedException {
+    PlacementContext placementContext = new SimplePlacementContextImpl(solrCloudManager);
+    DeleteReplicasRequest modificationRequest = ModificationRequestImpl.createDeleteReplicasRequest(collection, shardId, replicas);
+    try {
+      plugin.verifyAllowedModification(modificationRequest, placementContext);
+    } catch (PlacementException pe) {
+      throw new Assign.AssignmentException(pe);
+    }
+  }
 }
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/ReplicaMetricImpl.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/ReplicaMetricImpl.java
index f793a64..1d822b2 100644
--- a/solr/core/src/java/org/apache/solr/cluster/placement/impl/ReplicaMetricImpl.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/ReplicaMetricImpl.java
@@ -26,9 +26,12 @@
  */
 public class ReplicaMetricImpl<T> extends MetricImpl<T> implements ReplicaMetric<T> {
 
+  /** Replica index size in GB. */
   public static final ReplicaMetricImpl<Double> INDEX_SIZE_GB = new ReplicaMetricImpl<>("sizeGB", "INDEX.sizeInBytes", BYTES_TO_GB_CONVERTER);
 
+  /** 1-min query rate of the /select handler. */
   public static final ReplicaMetricImpl<Double> QUERY_RATE_1MIN = new ReplicaMetricImpl<>("queryRate", "QUERY./select.requestTimes:1minRate");
+  /** 1-min update rate of the /update handler. */
   public static final ReplicaMetricImpl<Double> UPDATE_RATE_1MIN = new ReplicaMetricImpl<>("updateRate", "UPDATE./update.requestTimes:1minRate");
 
   public ReplicaMetricImpl(String name, String internalName) {
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/SimplePlacementContextImpl.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/SimplePlacementContextImpl.java
new file mode 100644
index 0000000..f7cbb77
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/SimplePlacementContextImpl.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.cluster.placement.impl;
+
+import org.apache.solr.client.solrj.cloud.SolrCloudManager;
+import org.apache.solr.cluster.Cluster;
+import org.apache.solr.cluster.placement.AttributeFetcher;
+import org.apache.solr.cluster.placement.PlacementContext;
+import org.apache.solr.cluster.placement.PlacementPlanFactory;
+
+import java.io.IOException;
+
+/**
+ * Implementation of {@link PlacementContext} that uses {@link SimpleClusterAbstractionsImpl}
+ * to create components necessary for the placement plugins to use.
+ */
+public class SimplePlacementContextImpl implements PlacementContext {
+
+  private final Cluster cluster;
+  private final AttributeFetcher attributeFetcher;
+  private final PlacementPlanFactory placementPlanFactory = new PlacementPlanFactoryImpl();
+
+  public SimplePlacementContextImpl(SolrCloudManager solrCloudManager) throws IOException {
+    cluster = new SimpleClusterAbstractionsImpl.ClusterImpl(solrCloudManager);
+    attributeFetcher = new AttributeFetcherImpl(solrCloudManager);
+  }
+
+  @Override
+  public Cluster getCluster() {
+    return cluster;
+  }
+
+  @Override
+  public AttributeFetcher getAttributeFetcher() {
+    return attributeFetcher;
+  }
+
+  @Override
+  public PlacementPlanFactory getPlacementPlanFactory() {
+    return placementPlanFactory;
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementConfig.java b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementConfig.java
index bbf8dc8..b45e6a9 100644
--- a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementConfig.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementConfig.java
@@ -20,12 +20,15 @@
 import org.apache.solr.cluster.placement.PlacementPluginConfig;
 import org.apache.solr.common.annotation.JsonProperty;
 
+import java.util.Map;
+import java.util.Objects;
+
 /**
  * Configuration bean for {@link AffinityPlacementFactory}.
  */
 public class AffinityPlacementConfig implements PlacementPluginConfig {
 
-  public static final AffinityPlacementConfig DEFAULT = new AffinityPlacementConfig();
+  public static final AffinityPlacementConfig DEFAULT = new AffinityPlacementConfig(20L, 100L);
 
   /**
    * If a node has strictly less GB of free disk than this value, the node is excluded from assignment decisions.
@@ -43,14 +46,43 @@
   @JsonProperty
   public long prioritizedFreeDiskGB;
 
-  // no-arg public constructor required for deserialization
+  /**
+   * This property defines an additional constraint that primary collections (keys) should be
+   * located on the same nodes as the secondary collections (values). The plugin will assume
+   * that the secondary collection replicas are already in place and ignore candidate nodes where
+   * they are not already present.
+   */
+  @JsonProperty
+  public Map<String, String> withCollections;
+
+  /**
+   * Zero-arguments public constructor required for deserialization - don't use.
+   */
   public AffinityPlacementConfig() {
-    minimalFreeDiskGB = 20L;
-    prioritizedFreeDiskGB = 100L;
+    this(0L, 0L);
   }
 
+  /**
+   * Configuration for the {@link AffinityPlacementFactory}.
+   * @param minimalFreeDiskGB minimal free disk GB.
+   * @param prioritizedFreeDiskGB prioritized free disk GB.
+   */
   public AffinityPlacementConfig(long minimalFreeDiskGB, long prioritizedFreeDiskGB) {
+    this(minimalFreeDiskGB, prioritizedFreeDiskGB, Map.of());
+  }
+
+  /**
+   * Configuration for the {@link AffinityPlacementFactory}.
+   * @param minimalFreeDiskGB minimal free disk GB.
+   * @param prioritizedFreeDiskGB prioritized free disk GB.
+   * @param withCollections configuration of co-located collections: keys are
+   *                        primary collection names and values are secondary
+   *                        collection names.
+   */
+  public AffinityPlacementConfig(long minimalFreeDiskGB, long prioritizedFreeDiskGB, Map<String, String> withCollections) {
     this.minimalFreeDiskGB = minimalFreeDiskGB;
     this.prioritizedFreeDiskGB = prioritizedFreeDiskGB;
+    Objects.requireNonNull(withCollections);
+    this.withCollections = withCollections;
   }
 }
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
index 9c50289..79d1f92 100644
--- a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactory.java
@@ -27,8 +27,10 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.util.*;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.stream.Collectors;
 
 /**
@@ -147,7 +149,7 @@
 
   @Override
   public PlacementPlugin createPluginInstance() {
-    return new AffinityPlacementPlugin(config.minimalFreeDiskGB, config.prioritizedFreeDiskGB);
+    return new AffinityPlacementPlugin(config.minimalFreeDiskGB, config.prioritizedFreeDiskGB, config.withCollections);
   }
 
   @Override
@@ -171,14 +173,29 @@
 
     private final long prioritizedFreeDiskGB;
 
+    // primary to secondary (1:1)
+    private final Map<String, String> withCollections;
+    // secondary to primary (1:N)
+    private final Map<String, Set<String>> colocatedWith;
+
     private final Random replicaPlacementRandom = new Random(); // ok even if random sequence is predictable.
 
     /**
      * The factory has decoded the configuration for the plugin instance and passes it the parameters it needs.
      */
-    private AffinityPlacementPlugin(long minimalFreeDiskGB, long prioritizedFreeDiskGB) {
+    private AffinityPlacementPlugin(long minimalFreeDiskGB, long prioritizedFreeDiskGB, Map<String, String> withCollections) {
       this.minimalFreeDiskGB = minimalFreeDiskGB;
       this.prioritizedFreeDiskGB = prioritizedFreeDiskGB;
+      Objects.requireNonNull(withCollections, "withCollections must not be null");
+      this.withCollections = withCollections;
+      if (withCollections.isEmpty()) {
+        colocatedWith = Map.of();
+      } else {
+        colocatedWith = new HashMap<>();
+        withCollections.forEach((primary, secondary) ->
+            colocatedWith.computeIfAbsent(secondary, s -> new HashSet<>())
+                .add(primary));
+      }
 
       // We make things reproducible in tests by using test seed if any
       String seed = System.getProperty("tests.seed");
@@ -187,13 +204,16 @@
       }
     }
 
+    @Override
     @SuppressForbidden(reason = "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
-    public PlacementPlan computePlacement(Cluster cluster, PlacementRequest request, AttributeFetcher attributeFetcher,
-                                          PlacementPlanFactory placementPlanFactory) throws PlacementException {
+    public PlacementPlan computePlacement(PlacementRequest request, PlacementContext placementContext) throws PlacementException {
       Set<Node> nodes = request.getTargetNodes();
       SolrCollection solrCollection = request.getCollection();
 
+      nodes = filterNodesWithCollection(placementContext.getCluster(), request, nodes);
+
       // Request all needed attributes
+      AttributeFetcher attributeFetcher = placementContext.getAttributeFetcher();
       attributeFetcher.requestNodeSystemProperty(AVAILABILITY_ZONE_SYSPROP).requestNodeSystemProperty(REPLICA_TYPE_SYSPROP);
       attributeFetcher
           .requestNodeMetric(NodeMetricImpl.NUM_CORES)
@@ -238,11 +258,94 @@
         // failure. Current code does fail if placement is impossible (constraint is at most one replica of a shard on any node).
         for (Replica.ReplicaType replicaType : Replica.ReplicaType.values()) {
           makePlacementDecisions(solrCollection, shardName, availabilityZones, replicaType, request.getCountReplicasToCreate(replicaType),
-              attrValues, replicaTypeToNodes, nodesWithReplicas, coresOnNodes, placementPlanFactory, replicaPlacements);
+              attrValues, replicaTypeToNodes, nodesWithReplicas, coresOnNodes, placementContext.getPlacementPlanFactory(), replicaPlacements);
         }
       }
 
-      return placementPlanFactory.createPlacementPlan(request, replicaPlacements);
+      return placementContext.getPlacementPlanFactory().createPlacementPlan(request, replicaPlacements);
+    }
+
+    @Override
+    public void verifyAllowedModification(ModificationRequest modificationRequest, PlacementContext placementContext) throws PlacementModificationException, InterruptedException {
+      if (modificationRequest instanceof DeleteShardsRequest) {
+        log.warn("DeleteShardsRequest not implemented yet, skipping: {}", modificationRequest);
+      } else if (modificationRequest instanceof DeleteCollectionRequest) {
+        verifyDeleteCollection((DeleteCollectionRequest) modificationRequest, placementContext);
+      } else if (modificationRequest instanceof DeleteReplicasRequest) {
+        verifyDeleteReplicas((DeleteReplicasRequest) modificationRequest, placementContext);
+      } else {
+        log.warn("unsupported request type, skipping: {}", modificationRequest);
+      }
+    }
+
+    private void verifyDeleteCollection(DeleteCollectionRequest deleteCollectionRequest, PlacementContext placementContext) throws PlacementModificationException, InterruptedException {
+      Cluster cluster = placementContext.getCluster();
+      Set<String> colocatedCollections = colocatedWith.getOrDefault(deleteCollectionRequest.getCollection().getName(), Set.of());
+      for (String primaryName : colocatedCollections) {
+        try {
+          if (cluster.getCollection(primaryName) != null) {
+            // still exists
+            throw new PlacementModificationException("colocated collection " + primaryName +
+                " of " + deleteCollectionRequest.getCollection().getName() + " still present");
+          }
+        } catch (IOException e) {
+          throw new PlacementModificationException("failed to retrieve colocated collection information", e);
+        }
+      }
+    }
+
+    private void verifyDeleteReplicas(DeleteReplicasRequest deleteReplicasRequest, PlacementContext placementContext) throws PlacementModificationException, InterruptedException {
+      Cluster cluster = placementContext.getCluster();
+      SolrCollection secondaryCollection = deleteReplicasRequest.getCollection();
+      Set<String> colocatedCollections = colocatedWith.get(secondaryCollection.getName());
+      if (colocatedCollections == null) {
+        return;
+      }
+      Map<Node, Map<String, AtomicInteger>> secondaryNodeShardReplicas = new HashMap<>();
+      secondaryCollection.shards().forEach(shard ->
+          shard.replicas().forEach(replica -> {
+            secondaryNodeShardReplicas.computeIfAbsent(replica.getNode(), n -> new HashMap<>())
+                .computeIfAbsent(replica.getShard().getShardName(), s -> new AtomicInteger())
+                .incrementAndGet();
+          }));
+
+      // find the colocated-with collections
+      Map<Node, Set<String>> colocatingNodes = new HashMap<>();
+      try {
+        for (String colocatedCollection : colocatedCollections) {
+          SolrCollection coll = cluster.getCollection(colocatedCollection);
+          coll.shards().forEach(shard ->
+              shard.replicas().forEach(replica -> {
+                colocatingNodes.computeIfAbsent(replica.getNode(), n -> new HashSet<>())
+                    .add(coll.getName());
+              }));
+        }
+      } catch (IOException ioe) {
+        throw new PlacementModificationException("failed to retrieve colocated collection information", ioe);
+      }
+      PlacementModificationException exception = null;
+      for (Replica replica : deleteReplicasRequest.getReplicas()) {
+        if (!colocatingNodes.containsKey(replica.getNode())) {
+          continue;
+        }
+        // check that there will be at least one replica remaining
+        AtomicInteger secondaryCount = secondaryNodeShardReplicas
+            .getOrDefault(replica.getNode(), Map.of())
+            .getOrDefault(replica.getShard().getShardName(), new AtomicInteger());
+        if (secondaryCount.get() > 1) {
+          // we can delete it - record the deletion
+          secondaryCount.decrementAndGet();
+          continue;
+        }
+        // fail - this replica cannot be removed
+        if (exception == null) {
+          exception = new PlacementModificationException("delete replica(s) rejected");
+        }
+        exception.addRejectedModification(replica.toString(), "co-located with replicas of " + colocatingNodes.get(replica.getNode()));
+      }
+      if (exception != null) {
+        throw exception;
+      }
     }
 
     private Set<String> getZonesFromNodes(Set<Node> nodes, final AttributeValues attrValues) {
@@ -467,7 +570,7 @@
         if (candidateAzEntries == null) {
           // This can happen because not enough nodes for the placement request or already too many nodes with replicas of
           // the shard that can't accept new replicas or not enough nodes with enough free disk space.
-          throw new PlacementException("Not enough nodes to place " + numReplicas + " replica(s) of type " + replicaType +
+          throw new PlacementException("Not enough eligible nodes to place " + numReplicas + " replica(s) of type " + replicaType +
               " for shard " + shardName + " of collection " + solrCollection.getName());
         }
 
@@ -529,6 +632,32 @@
       }
     }
 
+    private Set<Node> filterNodesWithCollection(Cluster cluster, PlacementRequest request, Set<Node> initialNodes) throws PlacementException {
+      // if there's a `withCollection` constraint for this collection then remove nodes
+      // that are not eligible
+      String withCollectionName = withCollections.get(request.getCollection().getName());
+      if (withCollectionName == null) {
+        return initialNodes;
+      }
+      SolrCollection withCollection;
+      try {
+        withCollection = cluster.getCollection(withCollectionName);
+      } catch (Exception e) {
+        throw new PlacementException("Error getting info of withCollection=" + withCollectionName, e);
+      }
+      Set<Node> withCollectionNodes = new HashSet<>();
+      withCollection.shards().forEach(s -> s.replicas().forEach(r -> withCollectionNodes.add(r.getNode())));
+      if (withCollectionNodes.isEmpty()) {
+        throw new PlacementException("Collection " + withCollection + " defined in `withCollection` has no replicas on eligible nodes.");
+      }
+      HashSet<Node> filteredNodes = new HashSet<>(initialNodes);
+      filteredNodes.retainAll(withCollectionNodes);
+      if (filteredNodes.isEmpty()) {
+        throw new PlacementException("Collection " + withCollection + " defined in `withCollection` has no replicas on eligible nodes.");
+      }
+      return filteredNodes;
+    }
+
     /**
      * Comparator implementing the placement strategy based on free space and number of cores: we want to place new replicas
      * on nodes with the less number of cores, but only if they do have enough disk space (expressed as a threshold value).
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/MinimizeCoresPlacementFactory.java b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/MinimizeCoresPlacementFactory.java
index bb1e762..df7735e 100644
--- a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/MinimizeCoresPlacementFactory.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/MinimizeCoresPlacementFactory.java
@@ -26,7 +26,6 @@
 
 import com.google.common.collect.Ordering;
 import com.google.common.collect.TreeMultimap;
-import org.apache.solr.cluster.Cluster;
 import org.apache.solr.cluster.Node;
 import org.apache.solr.cluster.Replica;
 import org.apache.solr.cluster.SolrCollection;
@@ -50,15 +49,15 @@
 
   static private class MinimizeCoresPlacementPlugin implements PlacementPlugin {
 
+    @Override
     @SuppressForbidden(reason = "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
-    public PlacementPlan computePlacement(Cluster cluster, PlacementRequest request, AttributeFetcher attributeFetcher,
-                                          PlacementPlanFactory placementPlanFactory) throws PlacementException {
+    public PlacementPlan computePlacement(PlacementRequest request, PlacementContext placementContext) throws PlacementException {
       int totalReplicasPerShard = 0;
       for (Replica.ReplicaType rt : Replica.ReplicaType.values()) {
         totalReplicasPerShard += request.getCountReplicasToCreate(rt);
       }
 
-      if (cluster.getLiveNodes().size() < totalReplicasPerShard) {
+      if (placementContext.getCluster().getLiveNodes().size() < totalReplicasPerShard) {
         throw new PlacementException("Cluster size too small for number of replicas per shard");
       }
 
@@ -67,6 +66,7 @@
 
       Set<Node> nodes = request.getTargetNodes();
 
+      AttributeFetcher attributeFetcher = placementContext.getAttributeFetcher();
       attributeFetcher.requestNodeMetric(NodeMetricImpl.NUM_CORES);
       attributeFetcher.fetchFrom(nodes);
       AttributeValues attrValues = attributeFetcher.fetchAttributes();
@@ -106,11 +106,11 @@
         }
 
         for (Replica.ReplicaType replicaType : Replica.ReplicaType.values()) {
-          placeReplicas(request.getCollection(), nodeEntriesToAssign, placementPlanFactory, replicaPlacements, shardName, request, replicaType);
+          placeReplicas(request.getCollection(), nodeEntriesToAssign, placementContext.getPlacementPlanFactory(), replicaPlacements, shardName, request, replicaType);
         }
       }
 
-      return placementPlanFactory.createPlacementPlan(request, replicaPlacements);
+      return placementContext.getPlacementPlanFactory().createPlacementPlan(request, replicaPlacements);
     }
 
     private void placeReplicas(SolrCollection solrCollection, ArrayList<Map.Entry<Integer, Node>> nodeEntriesToAssign,
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/RandomPlacementFactory.java b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/RandomPlacementFactory.java
index 0b27d21..e222e14 100644
--- a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/RandomPlacementFactory.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/RandomPlacementFactory.java
@@ -23,7 +23,6 @@
 import java.util.Random;
 import java.util.Set;
 
-import org.apache.solr.cluster.Cluster;
 import org.apache.solr.cluster.Node;
 import org.apache.solr.cluster.Replica;
 import org.apache.solr.cluster.SolrCollection;
@@ -53,14 +52,14 @@
       }
     }
 
-    public PlacementPlan computePlacement(Cluster cluster, PlacementRequest request, AttributeFetcher attributeFetcher,
-                                          PlacementPlanFactory placementPlanFactory) throws PlacementException {
+    @Override
+    public PlacementPlan computePlacement(PlacementRequest request, PlacementContext placementContext) throws PlacementException {
       int totalReplicasPerShard = 0;
       for (Replica.ReplicaType rt : Replica.ReplicaType.values()) {
         totalReplicasPerShard += request.getCountReplicasToCreate(rt);
       }
 
-      if (cluster.getLiveNodes().size() < totalReplicasPerShard) {
+      if (placementContext.getCluster().getLiveNodes().size() < totalReplicasPerShard) {
         throw new PlacementException("Cluster size too small for number of replicas per shard");
       }
 
@@ -69,15 +68,15 @@
       // Now place randomly all replicas of all shards on available nodes
       for (String shardName : request.getShardNames()) {
         // Shuffle the nodes for each shard so that replicas for a shard are placed on distinct yet random nodes
-        ArrayList<Node> nodesToAssign = new ArrayList<>(cluster.getLiveNodes());
+        ArrayList<Node> nodesToAssign = new ArrayList<>(placementContext.getCluster().getLiveNodes());
         Collections.shuffle(nodesToAssign, replicaPlacementRandom);
 
         for (Replica.ReplicaType rt : Replica.ReplicaType.values()) {
-          placeForReplicaType(request.getCollection(), nodesToAssign, placementPlanFactory, replicaPlacements, shardName, request, rt);
+          placeForReplicaType(request.getCollection(), nodesToAssign, placementContext.getPlacementPlanFactory(), replicaPlacements, shardName, request, rt);
         }
       }
 
-      return placementPlanFactory.createPlacementPlan(request, replicaPlacements);
+      return placementContext.getPlacementPlanFactory().createPlacementPlan(request, replicaPlacements);
     }
 
     private void placeForReplicaType(SolrCollection solrCollection, ArrayList<Node> nodesToAssign, PlacementPlanFactory placementPlanFactory,
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index 288f6c5..daf82f6 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -95,7 +95,7 @@
 import static org.apache.solr.client.solrj.response.RequestStatusState.RUNNING;
 import static org.apache.solr.client.solrj.response.RequestStatusState.SUBMITTED;
 import static org.apache.solr.cloud.Overseer.QUEUE_OPERATION;
-import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.COLL_PROP_PREFIX;
+import static org.apache.solr.common.params.CollectionAdminParams.PROPERTY_PREFIX;
 import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET;
 import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY;
 import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.CREATE_NODE_SET_SHUFFLE;
@@ -486,7 +486,7 @@
       if (shardsParam == null) h.copyFromClusterProp(props, NUM_SLICES);
       for (String prop : ImmutableSet.of(NRT_REPLICAS, PULL_REPLICAS, TLOG_REPLICAS))
         h.copyFromClusterProp(props, prop);
-      copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX);
+      copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
       return copyPropertiesWithPrefix(req.getParams(), props, "router.");
 
     }),
@@ -737,7 +737,7 @@
           SPLIT_FUZZ,
           SPLIT_BY_PREFIX,
           FOLLOW_ALIASES);
-      return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX);
+      return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
     }),
     DELETESHARD_OP(DELETESHARD, (req, rsp, h) -> {
       Map<String, Object> map = copy(req.getParams().required(), null,
@@ -775,7 +775,7 @@
           CREATE_NODE_SET,
           WAIT_FOR_FINAL_STATE,
           FOLLOW_ALIASES);
-      return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX);
+      return copyPropertiesWithPrefix(req.getParams(), map, PROPERTY_PREFIX);
     }),
     DELETEREPLICA_OP(DELETEREPLICA, (req, rsp, h) -> {
       Map<String, Object> map = copy(req.getParams().required(), null,
@@ -917,7 +917,7 @@
           CREATE_NODE_SET,
           FOLLOW_ALIASES,
           SKIP_NODE_ASSIGNMENT);
-      return copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX);
+      return copyPropertiesWithPrefix(req.getParams(), props, PROPERTY_PREFIX);
     }),
     OVERSEERSTATUS_OP(OVERSEERSTATUS, (req, rsp, h) -> new LinkedHashMap<>()),
 
@@ -958,8 +958,8 @@
           PROPERTY_VALUE_PROP);
       copy(req.getParams(), map, SHARD_UNIQUE);
       String property = (String) map.get(PROPERTY_PROP);
-      if (!property.startsWith(COLL_PROP_PREFIX)) {
-        property = COLL_PROP_PREFIX + property;
+      if (!property.startsWith(PROPERTY_PREFIX)) {
+        property = PROPERTY_PREFIX + property;
       }
 
       boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
@@ -992,8 +992,8 @@
           PROPERTY_PROP);
       Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE));
       String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
-      if (!StringUtils.startsWith(prop, COLL_PROP_PREFIX)) {
-        prop = COLL_PROP_PREFIX + prop;
+      if (!StringUtils.startsWith(prop, PROPERTY_PREFIX)) {
+        prop = PROPERTY_PREFIX + prop;
       }
 
       if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) {
@@ -1011,7 +1011,7 @@
     // XXX should this command support followAliases?
     MODIFYCOLLECTION_OP(MODIFYCOLLECTION, (req, rsp, h) -> {
       Map<String, Object> m = copy(req.getParams(), null, CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES);
-      copyPropertiesWithPrefix(req.getParams(), m, COLL_PROP_PREFIX);
+      copyPropertiesWithPrefix(req.getParams(), m, PROPERTY_PREFIX);
       if (m.isEmpty()) {
         throw new SolrException(ErrorCode.BAD_REQUEST,
             formatString("no supported values provided {0}", CollectionAdminRequest.MODIFIABLE_COLLECTION_PROPERTIES.toString()));
@@ -1139,7 +1139,7 @@
       // from CREATE_OP:
       copy(req.getParams(), params, COLL_CONF, REPLICATION_FACTOR, NRT_REPLICAS, TLOG_REPLICAS,
           PULL_REPLICAS, CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE);
-      copyPropertiesWithPrefix(req.getParams(), params, COLL_PROP_PREFIX);
+      copyPropertiesWithPrefix(req.getParams(), params, PROPERTY_PREFIX);
       return params;
     }),
     CREATESNAPSHOT_OP(CREATESNAPSHOT, (req, rsp, h) -> {
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java
index eb11b66..3ecb4fd 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java
@@ -344,7 +344,7 @@
       // Mapped roles for this principal
       @SuppressWarnings("resource")
       AuthorizationPlugin auth = cc==null? null: cc.getAuthorizationPlugin();
-      if (auth != null) {
+      if (auth instanceof RuleBasedAuthorizationPluginBase) {
         RuleBasedAuthorizationPluginBase rbap = (RuleBasedAuthorizationPluginBase) auth;
         Set<String> roles = rbap.getUserRoles(req.getUserPrincipal());
         info.add("roles", roles);
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
index 0f523bf..fb20011 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
@@ -71,7 +71,6 @@
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.schema.FieldType;
-import org.apache.solr.schema.NumberType;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.schema.StrField;
 import org.apache.solr.search.CollapsingQParserPlugin;
@@ -214,7 +213,6 @@
     FieldType fieldType = schemaField.getType();
 
     SortedDocValues values = null;
-    long nullValue = 0L;
 
     if(fieldType instanceof StrField) {
       //Get The Top Level SortedDocValues
@@ -225,28 +223,7 @@
       } else {
         values = DocValues.getSorted(reader, field);
       }
-    } else if (fieldType.getNumberType() != null) {
-      //Get the nullValue for the numeric collapse field
-      String defaultValue = searcher.getSchema().getField(field).getDefaultValue();
-      
-      final NumberType numType = fieldType.getNumberType();
-
-      // Since the expand component depends on the operation of the collapse component, 
-      // which validates that numeric field types are 32-bit,
-      // we don't need to handle invalid 64-bit field types here.
-      // FIXME: what happens when expand.field specified?
-      //  how would this work for date field?
-      //  SOLR-10400: before this, long and double were explicitly handled
-      if (defaultValue != null) {
-        if (numType == NumberType.INTEGER) {
-          nullValue = Long.parseLong(defaultValue);
-        } else if (numType == NumberType.FLOAT) {
-          nullValue = Float.floatToIntBits(Float.parseFloat(defaultValue));
-        }
-      } else if (NumberType.FLOAT.equals(numType)) { // Integer case already handled by nullValue defaulting to 0
-        nullValue = Float.floatToIntBits(0.0f);
-      }
-    } else {
+    } else if (fieldType.getNumberType() == null) {
       // possible if directly expand.field is specified
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
           "Expand not supported for fieldType:'" + fieldType.getTypeName() +"'");
@@ -358,13 +335,8 @@
         if (valueDocID < contextDoc) {
           valueDocID = collapseValues.advance(contextDoc);
         }
-        long value;
         if (valueDocID == contextDoc) {
-          value = collapseValues.longValue();
-        } else {
-          value = 0;
-        }
-        if(value != nullValue) {
+          final long value = collapseValues.longValue();
           groupSet.add(value);
           collapsedSet.add(globalDoc);
         }
@@ -399,7 +371,7 @@
       
       groupExpandCollector = new GroupExpandCollector(values, groupBits, collapsedSet, limit, sort);
     } else {
-      groupExpandCollector = new NumericGroupExpandCollector(field, nullValue, groupSet, collapsedSet, limit, sort);
+      groupExpandCollector = new NumericGroupExpandCollector(field, groupSet, collapsedSet, limit, sort);
     }
 
     if(groupQuery !=  null) {
@@ -628,11 +600,9 @@
     private LongObjectHashMap<Collector> groups;
 
     private IntHashSet collapsedSet;
-    private long nullValue;
 
-    public NumericGroupExpandCollector(String field, long nullValue, LongHashSet groupSet, IntHashSet collapsedSet, int limit, Sort sort) throws IOException {
+    public NumericGroupExpandCollector(String field, LongHashSet groupSet, IntHashSet collapsedSet, int limit, Sort sort) throws IOException {
       int numGroups = collapsedSet.size();
-      this.nullValue = nullValue;
       groups = new LongObjectHashMap<>(numGroups);
       for (LongCursor cursor : groupSet) {
         groups.put(cursor.value, getCollector(limit, sort));
@@ -663,17 +633,12 @@
 
         @Override
         public void collect(int docId) throws IOException {
-          long value;
           if (docValues.advanceExact(docId)) {
-            value = docValues.longValue();
-          } else {
-            value = 0;
-          }
-          final int index;
-          if (value != nullValue && 
-              (index = leafCollectors.indexOf(value)) >= 0 && 
-              !collapsedSet.contains(docId + docBase)) {
-            leafCollectors.indexGet(index).collect(docId);
+            final long value = docValues.longValue();
+            final int index = leafCollectors.indexOf(value);
+            if (index >= 0 && !collapsedSet.contains(docId + docBase)) {
+              leafCollectors.indexGet(index).collect(docId);
+            }
           }
         }
       };
diff --git a/solr/contrib/scripting/src/java/org/apache/solr/scripting/update/ScriptEngineCustomizer.java b/solr/core/src/java/org/apache/solr/update/processor/ScriptEngineCustomizer.java
similarity index 95%
rename from solr/contrib/scripting/src/java/org/apache/solr/scripting/update/ScriptEngineCustomizer.java
rename to solr/core/src/java/org/apache/solr/update/processor/ScriptEngineCustomizer.java
index 05dd604..f230526 100644
--- a/solr/contrib/scripting/src/java/org/apache/solr/scripting/update/ScriptEngineCustomizer.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/ScriptEngineCustomizer.java
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.solr.scripting.update;
+package org.apache.solr.update.processor;
 
 import javax.script.ScriptEngine;
 
diff --git a/solr/contrib/scripting/src/java/org/apache/solr/scripting/update/ScriptUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactory.java
similarity index 89%
rename from solr/contrib/scripting/src/java/org/apache/solr/scripting/update/ScriptUpdateProcessorFactory.java
rename to solr/core/src/java/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactory.java
index e0de01b..d2f5a07 100644
--- a/solr/contrib/scripting/src/java/org/apache/solr/scripting/update/ScriptUpdateProcessorFactory.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactory.java
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.solr.scripting.update;
+package org.apache.solr.update.processor;
 
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
@@ -26,8 +26,6 @@
 import org.apache.solr.request.LocalSolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
 import org.apache.solr.update.*;
-import org.apache.solr.update.processor.UpdateRequestProcessor;
-import org.apache.solr.update.processor.UpdateRequestProcessorFactory;
 import org.apache.solr.util.plugin.SolrCoreAware;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.io.FilenameUtils;
@@ -60,33 +58,34 @@
 
 /**
  * <p>
- * An update request processor factory that enables the use of update
- * processors implemented as scripts which can be loaded from the
- * configSet.  Previously known as the StatelessScriptUpdateProcessorFactory.
+ * An update request processor factory that enables the use of update 
+ * processors implemented as scripts which can be loaded by the 
+ * {@link SolrResourceLoader} (usually via the <code>conf</code> dir for 
+ * the SolrCore).
  * </p>
  * <p>
  * This factory requires at least one configuration parameter named
- * <code>script</code> which may be the name of a script file as a string,
- * or an array of multiple script files.  If multiple script files are
- * specified, they are executed sequentially in the order specified in the
+ * <code>script</code> which may be the name of a script file as a string, 
+ * or an array of multiple script files.  If multiple script files are 
+ * specified, they are executed sequentially in the order specified in the 
  * configuration -- as if multiple factories were configured sequentially
  * </p>
  * <p>
- * Each script file is expected to declare functions with the same name
- * as each method in {@link UpdateRequestProcessor}, using the same
- * arguments.  One slight deviation is in the optional return value from
- * these functions: If a script function has a <code>boolean</code> return
- * value, and that value is <code>false</code> then the processor will
- * cleanly terminate processing of the command and return, without forwarding
+ * Each script file is expected to declare functions with the same name 
+ * as each method in {@link UpdateRequestProcessor}, using the same 
+ * arguments.  One slight deviation is in the optional return value from 
+ * these functions: If a script function has a <code>boolean</code> return 
+ * value, and that value is <code>false</code> then the processor will 
+ * cleanly terminate processing of the command and return, without forwarding 
  * the command on to the next script or processor in the chain.
- * Due to limitations in the {@link ScriptEngine} API used by
+ * Due to limitations in the {@link ScriptEngine} API used by 
  * this factory, it can not enforce that all functions exist on initialization,
  * so errors from missing functions will only be generated at runtime when
  * the chain attempts to use them.
  * </p>
  * <p>
- * The factory may also be configured with an optional "params" argument,
- * which can be an {@link NamedList} (or array, or any other simple Java
+ * The factory may also be configured with an optional "params" argument, 
+ * which can be an {@link NamedList} (or array, or any other simple Java 
  * object) which will be put into the global scope for each script.
  * </p>
  * <p>
@@ -98,40 +97,40 @@
  *  <li>params - The "params" init argument in the factory configuration (if any)</li>
  * </ul>
  * <p>
- * Internally this update processor uses JDK 6 scripting engine support,
- * and any {@link Invocable} implementations of <code>ScriptEngine</code>
- * that can be loaded using the Solr Plugin ClassLoader may be used.
- * By default, the engine used for each script is determined by the file
- * extension (ie: a *.js file will be treated as a JavaScript script) but
- * this can be overridden by specifying an explicit "engine" name init
- * param for the factory, which identifies a registered name of a
- * {@link ScriptEngineFactory}.
- * (This may be particularly useful if multiple engines are available for
- * the same scripting language, and you wish to force the usage of a
+ * Internally this update processor uses JDK 6 scripting engine support, 
+ * and any {@link Invocable} implementations of <code>ScriptEngine</code> 
+ * that can be loaded using the Solr Plugin ClassLoader may be used.  
+ * By default, the engine used for each script is determined by the filed 
+ * extension (ie: a *.js file will be treated as a JavaScript script) but 
+ * this can be overridden by specifying an explicit "engine" name init 
+ * param for the factory, which identifies a registered name of a 
+ * {@link ScriptEngineFactory}. 
+ * (This may be particularly useful if multiple engines are available for 
+ * the same scripting language, and you wish to force the usage of a 
  * particular engine because of known quirks)
  * </p>
  * <p>
- * A new {@link ScriptEngineManager} is created for each
- * <code>SolrQueryRequest</code> defining a "global" scope for the script(s)
- * which is request specific.  Separate <code>ScriptEngine</code> instances
- * are then used to evaluate the script files, resulting in an "engine" scope
+ * A new {@link ScriptEngineManager} is created for each 
+ * <code>SolrQueryRequest</code> defining a "global" scope for the script(s) 
+ * which is request specific.  Separate <code>ScriptEngine</code> instances 
+ * are then used to evaluate the script files, resulting in an "engine" scope 
  * that is specific to each script.
  * </p>
  * <p>
  * A simple example...
  * </p>
  * <pre class="prettyprint">
- * &lt;processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory"&gt;
+ * &lt;processor class="solr.StatelessScriptUpdateProcessorFactory"&gt;
  *   &lt;str name="script"&gt;updateProcessor.js&lt;/str&gt;
  * &lt;/processor&gt;
  * </pre>
  * <p>
- * A more complex example involving multiple scripts in different languages,
- * and a "params" <code>NamedList</code> that will be put into the global
+ * A more complex example involving multiple scripts in different languages, 
+ * and a "params" <code>NamedList</code> that will be put into the global 
  * scope of each script...
  * </p>
  * <pre class="prettyprint">
- * &lt;processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory"&gt;
+ * &lt;processor class="solr.StatelessScriptUpdateProcessorFactory"&gt;
  *   &lt;arr name="script"&gt;
  *     &lt;str name="script"&gt;first-processor.js&lt;/str&gt;
  *     &lt;str name="script"&gt;second-processor.py&lt;/str&gt;
@@ -143,11 +142,11 @@
  * &lt;/processor&gt;
  * </pre>
  * <p>
- * An example where the script file extensions are ignored, and an
+ * An example where the script file extensions are ignored, and an 
  * explicit script engine is used....
  * </p>
  * <pre class="prettyprint">
- * &lt;processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory"&gt;
+ * &lt;processor class="solr.StatelessScriptUpdateProcessorFactory"&gt;
  *   &lt;arr name="script"&gt;
  *     &lt;str name="script"&gt;first-processor.txt&lt;/str&gt;
  *     &lt;str name="script"&gt;second-processor.txt&lt;/str&gt;
@@ -155,10 +154,10 @@
  *   &lt;str name="engine"&gt;rhino&lt;/str&gt;
  * &lt;/processor&gt;
  * </pre>
- *
+ * 
  * @since 4.0.0
  */
-public class ScriptUpdateProcessorFactory extends UpdateRequestProcessorFactory implements SolrCoreAware {
+public class StatelessScriptUpdateProcessorFactory extends UpdateRequestProcessorFactory implements SolrCoreAware {
 
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
@@ -183,8 +182,8 @@
     Collection<String> scripts =
       args.removeConfigArgs(SCRIPT_ARG);
     if (scripts.isEmpty()) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                              "ScriptUpdateProcessorFactory must be " +
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
+                              "StatelessScriptUpdateProcessorFactory must be " +
                               "initialized with at least one " + SCRIPT_ARG);
     }
     scriptFiles = new ArrayList<>();
@@ -200,8 +199,8 @@
         engineName = (String)engine;
       } else {
         throw new SolrException
-          (SolrException.ErrorCode.SERVER_ERROR,
-           "'" + ENGINE_NAME_ARG + "' init param must be a String (found: " +
+          (SolrException.ErrorCode.SERVER_ERROR, 
+           "'" + ENGINE_NAME_ARG + "' init param must be a String (found: " + 
            engine.getClass() + ")");
       }
     }
@@ -247,7 +246,7 @@
       req.close();
     }
 
-
+    
   }
 
 
@@ -260,13 +259,13 @@
    * @param rsp The solr response
    * @return The list of initialized script engines.
    */
-  private List<EngineInfo> initEngines(SolrQueryRequest req,
-                                       SolrQueryResponse rsp)
+  private List<EngineInfo> initEngines(SolrQueryRequest req, 
+                                       SolrQueryResponse rsp) 
     throws SolrException {
-
+    
     List<EngineInfo> scriptEngines = new ArrayList<>();
 
-    ScriptEngineManager scriptEngineManager
+    ScriptEngineManager scriptEngineManager 
       = new ScriptEngineManager(resourceLoader.getClassLoader());
 
     scriptEngineManager.put("logger", log);
@@ -282,10 +281,10 @@
         engine = scriptEngineManager.getEngineByName(engineName);
         if (engine == null) {
           String details = getSupportedEngines(scriptEngineManager, false);
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
                                   "No ScriptEngine found by name: "
-                                  + engineName +
-                                  (null != details ?
+                                  + engineName + 
+                                  (null != details ? 
                                    " -- supported names: " + details : ""));
         }
       } else {
@@ -293,18 +292,18 @@
           (scriptFile.getExtension());
         if (engine == null) {
           String details = getSupportedEngines(scriptEngineManager, true);
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
                                   "No ScriptEngine found by file extension: "
-                                  + scriptFile.getFileName() +
-                                  (null != details ?
+                                  + scriptFile.getFileName() + 
+                                  (null != details ? 
                                    " -- supported extensions: " + details : ""));
-
+                                  
         }
       }
 
       if (! (engine instanceof Invocable)) {
-        String msg =
-          "Engine " + ((null != engineName) ? engineName :
+        String msg = 
+          "Engine " + ((null != engineName) ? engineName : 
                        ("for script " + scriptFile.getFileName())) +
           " does not support function invocation (via Invocable): " +
           engine.getClass().toString() + " (" +
@@ -320,7 +319,7 @@
       scriptEngines.add(new EngineInfo((Invocable)engine, scriptFile));
       try {
         Reader scriptSrc = scriptFile.openReader(resourceLoader);
-
+  
         try {
           try {
             AccessController.doPrivileged(new PrivilegedExceptionAction<Void>() {
@@ -334,23 +333,23 @@
             throw (ScriptException) e.getException();
           }
         } catch (ScriptException e) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                                  "Unable to evaluate script: " +
+          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
+                                  "Unable to evaluate script: " + 
                                   scriptFile.getFileName(), e);
         } finally {
           IOUtils.closeQuietly(scriptSrc);
         }
       } catch (IOException ioe) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-            "Unable to evaluate script: " +
-            scriptFile.getFileName(), ioe);
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
+            "Unable to evaluate script: " + 
+            scriptFile.getFileName(), ioe);        
       }
     }
     return scriptEngines;
   }
 
   /**
-   * For error messages - returns null if there are any exceptions of any
+   * For error messages - returns null if there are any exceptions of any 
    * kind building the string (or of the list is empty for some unknown reason).
    * @param ext - if true, list of extensions, otherwise a list of engine names
    */
@@ -404,7 +403,7 @@
       if (invokeFunction("processDelete", cmd)) {
         super.processDelete(cmd);
       }
-
+        
     }
 
     @Override
@@ -436,9 +435,9 @@
     }
 
     /**
-     * returns true if processing should continue, or false if the
-     * request should be ended now.  Result value is computed from the return
-     * value of the script function if: it exists, is non-null, and can be
+     * returns true if processing should continue, or false if the 
+     * request should be ended now.  Result value is computed from the return 
+     * value of the script function if: it exists, is non-null, and can be 
      * cast to a java Boolean.
      */
     private boolean invokeFunction(String name, Object... cmd) {
@@ -462,10 +461,10 @@
           }
 
         } catch (ScriptException | NoSuchMethodException e) {
-          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
-                                  "Unable to invoke function " + name +
-                                  " in script: " +
-                                  engine.getScriptFile().getFileName() +
+          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, 
+                                  "Unable to invoke function " + name + 
+                                  " in script: " + 
+                                  engine.getScriptFile().getFileName() + 
                                   ": " + e.getMessage(), e);
         }
       }
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/addfields.updateprocessor.js b/solr/core/src/test-files/solr/collection1/conf/addfields.updateprocessor.js
similarity index 100%
rename from solr/contrib/scripting/src/test-files/solr/collection1/conf/addfields.updateprocessor.js
rename to solr/core/src/test-files/solr/collection1/conf/addfields.updateprocessor.js
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/bad-solrconfig-bogus-scriptengine-name.xml b/solr/core/src/test-files/solr/collection1/conf/bad-solrconfig-bogus-scriptengine-name.xml
similarity index 93%
rename from solr/contrib/scripting/src/test-files/solr/collection1/conf/bad-solrconfig-bogus-scriptengine-name.xml
rename to solr/core/src/test-files/solr/collection1/conf/bad-solrconfig-bogus-scriptengine-name.xml
index 9bc5506..ded7416 100644
--- a/solr/contrib/scripting/src/test-files/solr/collection1/conf/bad-solrconfig-bogus-scriptengine-name.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/bad-solrconfig-bogus-scriptengine-name.xml
@@ -22,7 +22,7 @@
   <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
 
   <updateRequestProcessorChain name="force-script-engine" default="true">
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
+    <processor class="solr.StatelessScriptUpdateProcessorFactory">
       <str name="engine">giberish</str>
       <str name="script">missleading.extension.updateprocessor.js.txt</str>
     </processor>
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/bad-solrconfig-invalid-scriptfile.xml b/solr/core/src/test-files/solr/collection1/conf/bad-solrconfig-invalid-scriptfile.xml
similarity index 89%
rename from solr/contrib/scripting/src/test-files/solr/collection1/conf/bad-solrconfig-invalid-scriptfile.xml
rename to solr/core/src/test-files/solr/collection1/conf/bad-solrconfig-invalid-scriptfile.xml
index 3542779..709774d 100644
--- a/solr/contrib/scripting/src/test-files/solr/collection1/conf/bad-solrconfig-invalid-scriptfile.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/bad-solrconfig-invalid-scriptfile.xml
@@ -22,10 +22,10 @@
   <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
 
   <updateRequestProcessorChain name="force-script-engine" default="true">
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
+    <processor class="solr.StatelessScriptUpdateProcessorFactory">
       <str name="engine">javascript</str>
       <!-- not parsable as javascript -->
-      <str name="script">invalid.script.xml</str>
+      <str name="script">currency.xml</str>
     </processor>
     <processor class="solr.RunUpdateProcessorFactory" />
   </updateRequestProcessorChain>
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/bad-solrconfig-missing-scriptfile.xml b/solr/core/src/test-files/solr/collection1/conf/bad-solrconfig-missing-scriptfile.xml
similarity index 93%
rename from solr/contrib/scripting/src/test-files/solr/collection1/conf/bad-solrconfig-missing-scriptfile.xml
rename to solr/core/src/test-files/solr/collection1/conf/bad-solrconfig-missing-scriptfile.xml
index ed43d9f..d13ba0b 100644
--- a/solr/contrib/scripting/src/test-files/solr/collection1/conf/bad-solrconfig-missing-scriptfile.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/bad-solrconfig-missing-scriptfile.xml
@@ -22,7 +22,7 @@
   <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
 
   <updateRequestProcessorChain name="force-script-engine" default="true">
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
+    <processor class="solr.StatelessScriptUpdateProcessorFactory">
       <str name="script">a-file-name-that-does-not-exist.js</str>
     </processor>
     <processor class="solr.RunUpdateProcessorFactory" />
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/conditional.updateprocessor.js b/solr/core/src/test-files/solr/collection1/conf/conditional.updateprocessor.js
similarity index 100%
rename from solr/contrib/scripting/src/test-files/solr/collection1/conf/conditional.updateprocessor.js
rename to solr/core/src/test-files/solr/collection1/conf/conditional.updateprocessor.js
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/cross-compatible.js b/solr/core/src/test-files/solr/collection1/conf/cross-compatible.js
similarity index 100%
rename from solr/contrib/scripting/src/test-files/solr/collection1/conf/cross-compatible.js
rename to solr/core/src/test-files/solr/collection1/conf/cross-compatible.js
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/evil.js b/solr/core/src/test-files/solr/collection1/conf/evil.js
similarity index 100%
rename from solr/contrib/scripting/src/test-files/solr/collection1/conf/evil.js
rename to solr/core/src/test-files/solr/collection1/conf/evil.js
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/missing.functions.updateprocessor.js b/solr/core/src/test-files/solr/collection1/conf/missing.functions.updateprocessor.js
similarity index 100%
rename from solr/contrib/scripting/src/test-files/solr/collection1/conf/missing.functions.updateprocessor.js
rename to solr/core/src/test-files/solr/collection1/conf/missing.functions.updateprocessor.js
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/missleading.extension.updateprocessor.js.txt b/solr/core/src/test-files/solr/collection1/conf/missleading.extension.updateprocessor.js.txt
similarity index 100%
rename from solr/contrib/scripting/src/test-files/solr/collection1/conf/missleading.extension.updateprocessor.js.txt
rename to solr/core/src/test-files/solr/collection1/conf/missleading.extension.updateprocessor.js.txt
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/stateless-solrconfig-script-updateprocessor.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-script-updateprocessor.xml
similarity index 79%
rename from solr/contrib/scripting/src/test-files/solr/collection1/conf/stateless-solrconfig-script-updateprocessor.xml
rename to solr/core/src/test-files/solr/collection1/conf/solrconfig-script-updateprocessor.xml
index 58fbb86..74f00fd 100644
--- a/solr/contrib/scripting/src/test-files/solr/collection1/conf/stateless-solrconfig-script-updateprocessor.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-script-updateprocessor.xml
@@ -18,7 +18,7 @@
 -->
 
 <!--
-   Test Config for ScriptUpdateProcessorFactory
+   Test Config that for ScriptUpdateProcessor
 
   -->
 <config>
@@ -29,7 +29,7 @@
   <schemaFactory class="ClassicIndexSchemaFactory"/>
 
   <updateRequestProcessorChain name="force-script-engine" default="true">
-    <processor class="org.apache.solr.update.processor.scripting.StatelessScriptUpdateProcessorFactory">
+    <processor class="solr.StatelessScriptUpdateProcessorFactory">
       <str name="engine">javascript</str>
       <str name="script">missleading.extension.updateprocessor.js.txt</str>
     </processor>
@@ -42,7 +42,7 @@
   </updateRequestProcessorChain>
 
   <updateRequestProcessorChain name="single-script">
-    <processor class="org.apache.solr.update.processor.scripting.StatelessScriptUpdateProcessorFactory">
+    <processor class="solr.StatelessScriptUpdateProcessorFactory">
       <str name="script">trivial.updateprocessor0.js</str>
       <lst name="params">
         <bool name="boolValue">true</bool>
@@ -53,7 +53,7 @@
   </updateRequestProcessorChain>
 
   <updateRequestProcessorChain name="dual-scripts-arr">
-    <processor class="org.apache.solr.update.processor.scripting.StatelessScriptUpdateProcessorFactory">
+    <processor class="solr.StatelessScriptUpdateProcessorFactory">
       <arr name="script">
         <str>trivial.updateprocessor0.js</str>
         <str>trivial.updateprocessor1.js</str>
@@ -67,7 +67,7 @@
   </updateRequestProcessorChain>
 
   <updateRequestProcessorChain name="dual-scripts-strs">
-    <processor class="org.apache.solr.update.processor.scripting.StatelessScriptUpdateProcessorFactory">
+    <processor class="solr.StatelessScriptUpdateProcessorFactory">
       <str name="script">trivial.updateprocessor0.js</str>
       <str name="script">trivial.updateprocessor1.js</str>
       <lst name="params">
@@ -79,22 +79,22 @@
   </updateRequestProcessorChain>
 
   <updateRequestProcessorChain name="conditional-scripts">
-    <!-- multiple scripts,
+    <!-- multiple scripts, 
          test that the first one can conditionally stop execution -->
-    <processor class="org.apache.solr.update.processor.scripting.StatelessScriptUpdateProcessorFactory">
+    <processor class="solr.StatelessScriptUpdateProcessorFactory">
       <str name="script">conditional.updateprocessor.js</str>
       <str name="script">addfields.updateprocessor.js</str>
     </processor>
   </updateRequestProcessorChain>
 
   <updateRequestProcessorChain name="conditional-script">
-    <!-- single script, followed by another processor
-         (that happens to be a script).
+    <!-- single script, followed by another processor 
+         (that happens to be a script). 
          test that the first one can conditionally stop execution -->
-    <processor class="org.apache.solr.update.processor.scripting.StatelessScriptUpdateProcessorFactory">
+    <processor class="solr.StatelessScriptUpdateProcessorFactory">
       <str name="script">conditional.updateprocessor.js</str>
     </processor>
-    <processor class="org.apache.solr.update.processor.scripting.StatelessScriptUpdateProcessorFactory">
+    <processor class="solr.StatelessScriptUpdateProcessorFactory">
       <str name="script">addfields.updateprocessor.js</str>
     </processor>
   </updateRequestProcessorChain>
@@ -112,13 +112,13 @@
   </updateRequestProcessorChain>
 
   <updateRequestProcessorChain name="javascript-compatibility">
-    <processor class="org.apache.solr.update.processor.scripting.StatelessScriptUpdateProcessorFactory">
+    <processor class="solr.StatelessScriptUpdateProcessorFactory">
       <str name="script">cross-compatible.js</str>
     </processor>
   </updateRequestProcessorChain>
 
   <updateRequestProcessorChain name="evil">
-    <processor class="org.apache.solr.update.processor.scripting.StatelessScriptUpdateProcessorFactory">
+    <processor class="solr.StatelessScriptUpdateProcessorFactory">
       <str name="script">evil.js</str>
     </processor>
   </updateRequestProcessorChain>
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/throw.error.on.add.updateprocessor.js b/solr/core/src/test-files/solr/collection1/conf/throw.error.on.add.updateprocessor.js
similarity index 100%
rename from solr/contrib/scripting/src/test-files/solr/collection1/conf/throw.error.on.add.updateprocessor.js
rename to solr/core/src/test-files/solr/collection1/conf/throw.error.on.add.updateprocessor.js
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/trivial.updateprocessor0.js b/solr/core/src/test-files/solr/collection1/conf/trivial.updateprocessor0.js
similarity index 100%
rename from solr/contrib/scripting/src/test-files/solr/collection1/conf/trivial.updateprocessor0.js
rename to solr/core/src/test-files/solr/collection1/conf/trivial.updateprocessor0.js
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/trivial.updateprocessor1.js b/solr/core/src/test-files/solr/collection1/conf/trivial.updateprocessor1.js
similarity index 100%
rename from solr/contrib/scripting/src/test-files/solr/collection1/conf/trivial.updateprocessor1.js
rename to solr/core/src/test-files/solr/collection1/conf/trivial.updateprocessor1.js
diff --git a/solr/contrib/scripting/src/test-files/solr/collection1/conf/missleading.extension.updateprocessor.js.txt b/solr/core/src/test-files/solr/configsets/upload/with-script-processor/missleading.extension.updateprocessor.js.txt
similarity index 100%
copy from solr/contrib/scripting/src/test-files/solr/collection1/conf/missleading.extension.updateprocessor.js.txt
copy to solr/core/src/test-files/solr/configsets/upload/with-script-processor/missleading.extension.updateprocessor.js.txt
diff --git a/solr/core/src/test-files/solr/configsets/upload/with-script-processor/solrconfig.xml b/solr/core/src/test-files/solr/configsets/upload/with-script-processor/solrconfig.xml
index affccbf..1c62889 100644
--- a/solr/core/src/test-files/solr/configsets/upload/with-script-processor/solrconfig.xml
+++ b/solr/core/src/test-files/solr/configsets/upload/with-script-processor/solrconfig.xml
@@ -37,9 +37,12 @@
 
   <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
 
-  <lib dir="${solr.install.dir:../../../../../}/contrib/scripting/build/libs/" regex="solr-scripting-\d.*\.jar" />
+  <updateHandler class="solr.DirectUpdateHandler2">
+    <commitWithin>
+      <softCommit>${solr.commitwithin.softcommit:true}</softCommit>
+    </commitWithin>
 
-
+  </updateHandler>
   <requestHandler name="/select" class="solr.SearchHandler">
     <lst name="defaults">
       <str name="echoParams">explicit</str>
@@ -52,10 +55,11 @@
   <updateRequestProcessorChain name="force-script-engine" default="true">
     <processor class="solr.StatelessScriptUpdateProcessorFactory">
       <str name="engine">javascript</str>
-      <str name="script">trivial.updateprocessor.js</str>
+      <str name="script">missleading.extension.updateprocessor.js.txt</str>
     </processor>
     <processor class="solr.RunUpdateProcessorFactory" />
   </updateRequestProcessorChain>
 
   <requestHandler name="/update" class="solr.UpdateRequestHandler"  />
 </config>
+
diff --git a/solr/core/src/test-files/solr/configsets/upload/with-script-processor/trivial.updateprocessor.js b/solr/core/src/test-files/solr/configsets/upload/with-script-processor/trivial.updateprocessor.js
deleted file mode 100644
index 698b78c..0000000
--- a/solr/core/src/test-files/solr/configsets/upload/with-script-processor/trivial.updateprocessor.js
+++ /dev/null
@@ -1,22 +0,0 @@
-function processAdd(cmd) {
-  // Integer.valueOf is needed here to get a tru java object, because 
-  // all javascript numbers are floating point (ie: java.lang.Double)
-  cmd.getSolrInputDocument().addField("script_added_i",
-                                      java.lang.Integer.valueOf(42));
-}
-
-function processDelete() {
-    // NOOP
-}
-function processCommit() {
-    // NOOP
-}
-function processRollback() {
-    // NOOP
-}
-function processMergeIndexes() {
-    // NOOP
-}
-function finish() {
-    // NOOP
-}
diff --git a/solr/core/src/test/org/apache/solr/cluster/placement/Builders.java b/solr/core/src/test/org/apache/solr/cluster/placement/Builders.java
index 21b8369..43de56e 100644
--- a/solr/core/src/test/org/apache/solr/cluster/placement/Builders.java
+++ b/solr/core/src/test/org/apache/solr/cluster/placement/Builders.java
@@ -22,6 +22,7 @@
 import org.apache.solr.cluster.placement.impl.AttributeValuesImpl;
 import org.apache.solr.cluster.placement.impl.CollectionMetricsBuilder;
 import org.apache.solr.cluster.placement.impl.NodeMetricImpl;
+import org.apache.solr.cluster.placement.impl.PlacementPlanFactoryImpl;
 import org.apache.solr.cluster.placement.impl.ReplicaMetricImpl;
 import org.apache.solr.common.util.Pair;
 import org.junit.Assert;
@@ -92,6 +93,29 @@
       return clusterCollections;
     }
 
+    private static final PlacementPlanFactory PLACEMENT_PLAN_FACTORY = new PlacementPlanFactoryImpl();
+
+    public PlacementContext buildPlacementContext() {
+      Cluster cluster = build();
+      AttributeFetcher attributeFetcher = buildAttributeFetcher();
+      return new PlacementContext() {
+        @Override
+        public Cluster getCluster() {
+          return cluster;
+        }
+
+        @Override
+        public AttributeFetcher getAttributeFetcher() {
+          return attributeFetcher;
+        }
+
+        @Override
+        public PlacementPlanFactory getPlacementPlanFactory() {
+          return PLACEMENT_PLAN_FACTORY;
+        }
+      };
+    }
+
     public AttributeFetcher buildAttributeFetcher() {
       Map<String, Map<Node, String>> sysprops = new HashMap<>();
       Map<NodeMetric<?>, Map<Node, Object>> metrics = new HashMap<>();
diff --git a/solr/core/src/test/org/apache/solr/cluster/placement/impl/PlacementPluginIntegrationTest.java b/solr/core/src/test/org/apache/solr/cluster/placement/impl/PlacementPluginIntegrationTest.java
index 6967550..199d779 100644
--- a/solr/core/src/test/org/apache/solr/cluster/placement/impl/PlacementPluginIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cluster/placement/impl/PlacementPluginIntegrationTest.java
@@ -55,6 +55,7 @@
 import java.lang.invoke.MethodHandles;
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
@@ -71,7 +72,7 @@
 public class PlacementPluginIntegrationTest extends SolrCloudTestCase {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  private static final String COLLECTION = PlacementPluginIntegrationTest.class.getName() + "_collection";
+  private static final String COLLECTION = PlacementPluginIntegrationTest.class.getSimpleName() + "_collection";
 
   private static SolrCloudManager cloudManager;
   private static CoreContainer cc;
@@ -232,6 +233,88 @@
   }
 
   @Test
+  public void testWithCollectionIntegration() throws Exception {
+    PlacementPluginFactory<? extends PlacementPluginConfig> pluginFactory = cc.getPlacementPluginFactory();
+    assertTrue("wrong type " + pluginFactory.getClass().getName(), pluginFactory instanceof DelegatingPlacementPluginFactory);
+    DelegatingPlacementPluginFactory wrapper = (DelegatingPlacementPluginFactory) pluginFactory;
+
+    int version = wrapper.getVersion();
+    log.debug("--initial version={}", version);
+
+    Set<String> nodeSet = new HashSet<>();
+    for (String node : cloudManager.getClusterStateProvider().getLiveNodes()) {
+      if (nodeSet.size() > 1) {
+        break;
+      }
+      nodeSet.add(node);
+    }
+
+    String SECONDARY_COLLECTION = COLLECTION + "_secondary";
+    PluginMeta plugin = new PluginMeta();
+    plugin.name = PlacementPluginFactory.PLUGIN_NAME;
+    plugin.klass = AffinityPlacementFactory.class.getName();
+    plugin.config = new AffinityPlacementConfig(1, 2, Map.of(COLLECTION, SECONDARY_COLLECTION));
+    V2Request req = new V2Request.Builder("/cluster/plugin")
+        .forceV2(true)
+        .POST()
+        .withPayload(singletonMap("add", plugin))
+        .build();
+    req.process(cluster.getSolrClient());
+
+    version = waitForVersionChange(version, wrapper, 10);
+
+    CollectionAdminResponse rsp = CollectionAdminRequest.createCollection(SECONDARY_COLLECTION, "conf", 1, 3)
+        .process(cluster.getSolrClient());
+    assertTrue(rsp.isSuccess());
+    cluster.waitForActiveCollection(SECONDARY_COLLECTION, 1, 3);
+    DocCollection secondary = cloudManager.getClusterStateProvider().getClusterState().getCollection(SECONDARY_COLLECTION);
+    Set<String> secondaryNodes = new HashSet<>();
+    secondary.forEachReplica((shard, replica) -> secondaryNodes.add(replica.getNodeName()));
+
+    rsp = CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 2)
+        .setCreateNodeSet(String.join(",", nodeSet))
+        .process(cluster.getSolrClient());
+    assertTrue(rsp.isSuccess());
+    cluster.waitForActiveCollection(COLLECTION, 2, 4);
+    // make sure the primary replicas were placed on the nodeset
+    DocCollection primary = cloudManager.getClusterStateProvider().getClusterState().getCollection(COLLECTION);
+    primary.forEachReplica((shard, replica) ->
+        assertTrue("primary replica not on secondary node!", nodeSet.contains(replica.getNodeName())));
+
+    // try deleting secondary replica from node without the primary replica
+    Optional<String> onlySecondaryReplica = secondary.getReplicas().stream()
+        .filter(replica -> !nodeSet.contains(replica.getNodeName()))
+        .map(replica -> replica.getName()).findFirst();
+    assertTrue("no secondary node without primary replica", onlySecondaryReplica.isPresent());
+
+    rsp = CollectionAdminRequest.deleteReplica(SECONDARY_COLLECTION, "shard1", onlySecondaryReplica.get())
+        .process(cluster.getSolrClient());
+    assertTrue("delete of a lone secondary replica should succeed", rsp.isSuccess());
+
+    // try deleting secondary replica from node WITH the primary replica - should fail
+    Optional<String> secondaryWithPrimaryReplica = secondary.getReplicas().stream()
+        .filter(replica -> nodeSet.contains(replica.getNodeName()))
+        .map(replica -> replica.getName()).findFirst();
+    assertTrue("no secondary node with primary replica", secondaryWithPrimaryReplica.isPresent());
+    try {
+      rsp = CollectionAdminRequest.deleteReplica(SECONDARY_COLLECTION, "shard1", secondaryWithPrimaryReplica.get())
+          .process(cluster.getSolrClient());
+      fail("should have failed: " + rsp);
+    } catch (Exception e) {
+      assertTrue(e.toString(), e.toString().contains("co-located with replicas"));
+    }
+
+    // try deleting secondary collection
+    try {
+      rsp = CollectionAdminRequest.deleteCollection(SECONDARY_COLLECTION)
+          .process(cluster.getSolrClient());
+      fail("should have failed: " + rsp);
+    } catch (Exception e) {
+      assertTrue(e.toString(), e.toString().contains("colocated collection"));
+    }
+  }
+
+  @Test
   public void testAttributeFetcherImpl() throws Exception {
     CollectionAdminResponse rsp = CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 2)
         .process(cluster.getSolrClient());
diff --git a/solr/core/src/test/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactoryTest.java b/solr/core/src/test/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactoryTest.java
index 81dda9d..2fd02a0 100644
--- a/solr/core/src/test/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactoryTest.java
+++ b/solr/core/src/test/org/apache/solr/cluster/placement/plugins/AffinityPlacementFactoryTest.java
@@ -25,7 +25,7 @@
 import org.apache.solr.cluster.SolrCollection;
 import org.apache.solr.cluster.placement.*;
 import org.apache.solr.cluster.placement.Builders;
-import org.apache.solr.cluster.placement.impl.PlacementPlanFactoryImpl;
+import org.apache.solr.cluster.placement.impl.ModificationRequestImpl;
 import org.apache.solr.cluster.placement.impl.PlacementRequestImpl;
 import org.apache.solr.common.util.Pair;
 import org.junit.BeforeClass;
@@ -50,10 +50,15 @@
 
   private final static long MINIMAL_FREE_DISK_GB = 10L;
   private final static long PRIORITIZED_FREE_DISK_GB = 50L;
+  private final static String secondaryCollectionName = "withCollection_secondary";
+  private final static String primaryCollectionName = "withCollection_primary";
 
   @BeforeClass
   public static void setupPlugin() {
-    AffinityPlacementConfig config = new AffinityPlacementConfig(MINIMAL_FREE_DISK_GB, PRIORITIZED_FREE_DISK_GB);
+    AffinityPlacementConfig config = new AffinityPlacementConfig(
+        MINIMAL_FREE_DISK_GB,
+        PRIORITIZED_FREE_DISK_GB,
+        Map.of(primaryCollectionName, secondaryCollectionName));
     AffinityPlacementFactory factory = new AffinityPlacementFactory();
     factory.configure(config);
     plugin = factory.createPluginInstance();
@@ -93,8 +98,7 @@
       collectionBuilder.initializeShardsReplicas(1, 0, 0, 0, List.of());
     }
 
-    Cluster cluster = clusterBuilder.build();
-    AttributeFetcher attributeFetcher = clusterBuilder.buildAttributeFetcher();
+    PlacementContext placementContext = clusterBuilder.buildPlacementContext();
 
     SolrCollection solrCollection = collectionBuilder.build();
     List<Node> liveNodes = clusterBuilder.buildLiveNodes();
@@ -104,7 +108,7 @@
         Set.of(solrCollection.shards().iterator().next().getShardName()), new HashSet<>(liveNodes),
         1, 0, 0);
 
-    PlacementPlan pp = plugin.computePlacement(cluster, placementRequest, attributeFetcher, new PlacementPlanFactoryImpl());
+    PlacementPlan pp = plugin.computePlacement(placementRequest, placementContext);
 
     assertEquals(1, pp.getReplicaPlacements().size());
     ReplicaPlacement rp = pp.getReplicaPlacements().iterator().next();
@@ -144,7 +148,7 @@
     PlacementRequestImpl placementRequest = new PlacementRequestImpl(solrCollection, solrCollection.getShardNames(), new HashSet<>(liveNodes),
         2, 2, 2);
 
-    PlacementPlan pp = plugin.computePlacement(clusterBuilder.build(), placementRequest, clusterBuilder.buildAttributeFetcher(), new PlacementPlanFactoryImpl());
+    PlacementPlan pp = plugin.computePlacement(placementRequest, clusterBuilder.buildPlacementContext());
 
     assertEquals(18, pp.getReplicaPlacements().size()); // 3 shards, 6 replicas total each
     Set<Pair<String, Node>> placements = new HashSet<>();
@@ -157,7 +161,7 @@
     // Verify that if we ask for 7 replicas, the placement will use the low free space node
     placementRequest = new PlacementRequestImpl(solrCollection, solrCollection.getShardNames(), new HashSet<>(liveNodes),
         7, 0, 0);
-    pp = plugin.computePlacement(clusterBuilder.build(), placementRequest, clusterBuilder.buildAttributeFetcher(), new PlacementPlanFactoryImpl());
+    pp = plugin.computePlacement(placementRequest, clusterBuilder.buildPlacementContext());
     assertEquals(21, pp.getReplicaPlacements().size()); // 3 shards, 7 replicas each
     placements = new HashSet<>();
     for (ReplicaPlacement rp : pp.getReplicaPlacements()) {
@@ -170,7 +174,7 @@
     try {
       placementRequest = new PlacementRequestImpl(solrCollection, solrCollection.getShardNames(), new HashSet<>(liveNodes),
           8, 0, 0);
-      plugin.computePlacement(clusterBuilder.build(), placementRequest, clusterBuilder.buildAttributeFetcher(), new PlacementPlanFactoryImpl());
+      plugin.computePlacement(placementRequest, clusterBuilder.buildPlacementContext());
       fail("Placing 8 replicas should not be possible given only 7 nodes have enough space");
     } catch (PlacementException e) {
       // expected
@@ -214,7 +218,7 @@
     // The replicas must be placed on the most appropriate nodes, i.e. those that do not already have a replica for the
     // shard and then on the node with the lowest number of cores.
     // NRT are placed first and given the cluster state here the placement is deterministic (easier to test, only one good placement).
-    PlacementPlan pp = plugin.computePlacement(clusterBuilder.build(), placementRequest, clusterBuilder.buildAttributeFetcher(), new PlacementPlanFactoryImpl());
+    PlacementPlan pp = plugin.computePlacement(placementRequest, clusterBuilder.buildPlacementContext());
 
     // Each expected placement is represented as a string "shard replica-type node"
     Set<String> expectedPlacements = Set.of("1 NRT 1", "1 TLOG 2", "2 NRT 0", "2 TLOG 4");
@@ -312,7 +316,7 @@
     // Add 2 NRT and one TLOG to each shard.
     PlacementRequestImpl placementRequest = new PlacementRequestImpl(solrCollection, solrCollection.getShardNames(), new HashSet<>(liveNodes),
         2, 1, 0);
-    PlacementPlan pp = plugin.computePlacement(clusterBuilder.build(), placementRequest, clusterBuilder.buildAttributeFetcher(), new PlacementPlanFactoryImpl());
+    PlacementPlan pp = plugin.computePlacement(placementRequest, clusterBuilder.buildPlacementContext());
     // Shard 1: The NRT's should go to the med cores node on AZ2 and low core on az3 (even though
     // a low core node can take the replica in az1, there's already an NRT replica there and we want spreading across AZ's),
     // the TLOG to the TLOG node on AZ2 (because the tlog node on AZ1 has low free disk)
@@ -326,7 +330,7 @@
     // If we add instead 2 PULL replicas to each shard
     placementRequest = new PlacementRequestImpl(solrCollection, solrCollection.getShardNames(), new HashSet<>(liveNodes),
         0, 0, 2);
-    pp = plugin.computePlacement(clusterBuilder.build(), placementRequest, clusterBuilder.buildAttributeFetcher(), new PlacementPlanFactoryImpl());
+    pp = plugin.computePlacement(placementRequest, clusterBuilder.buildPlacementContext());
     // Shard 1: Given node AZ3_TLOGPULL is taken by the TLOG replica, the PULL should go to AZ1_TLOGPULL_LOWFREEDISK and AZ2_TLOGPULL
     // Shard 2: Similarly AZ2_TLOGPULL is taken. Replicas should go to AZ1_TLOGPULL_LOWFREEDISK and AZ3_TLOGPULL
     expectedPlacements = Set.of("1 PULL " + AZ1_TLOGPULL_LOWFREEDISK, "1 PULL " + AZ2_TLOGPULL,
@@ -373,7 +377,7 @@
     for (int countNrtToPlace = 1; countNrtToPlace <= 9; countNrtToPlace++) {
       PlacementRequestImpl placementRequest = new PlacementRequestImpl(solrCollection, solrCollection.getShardNames(), new HashSet<>(liveNodes),
           countNrtToPlace, 0, 0);
-      PlacementPlan pp = plugin.computePlacement(clusterBuilder.build(), placementRequest, clusterBuilder.buildAttributeFetcher(), new PlacementPlanFactoryImpl());
+      PlacementPlan pp = plugin.computePlacement(placementRequest, clusterBuilder.buildPlacementContext());
       verifyPlacements(placements.get(countNrtToPlace - 1), pp, collectionBuilder.getShardBuilders(), liveNodes);
     }
   }
@@ -409,7 +413,7 @@
     PlacementRequestImpl placementRequest = new PlacementRequestImpl(solrCollection, Set.of(solrCollection.iterator().next().getShardName()), new HashSet<>(liveNodes),
         0, 0, 1);
 
-    PlacementPlan pp = plugin.computePlacement(clusterBuilder.build(), placementRequest, clusterBuilder.buildAttributeFetcher(), new PlacementPlanFactoryImpl());
+    PlacementPlan pp = plugin.computePlacement(placementRequest, clusterBuilder.buildPlacementContext());
 
     // Each expected placement is represented as a string "shard replica-type node"
     // Node 0 has less cores than node 1 (0 vs 1) so the placement should go there.
@@ -422,7 +426,7 @@
     it.next(); // skip first shard to do placement for the second one...
     placementRequest = new PlacementRequestImpl(solrCollection, Set.of(it.next().getShardName()), new HashSet<>(liveNodes),
         0, 0, 1);
-    pp = plugin.computePlacement(clusterBuilder.build(), placementRequest, clusterBuilder.buildAttributeFetcher(), new PlacementPlanFactoryImpl());
+    pp = plugin.computePlacement(placementRequest, clusterBuilder.buildPlacementContext());
     expectedPlacements = Set.of("2 PULL 0");
     verifyPlacements(expectedPlacements, pp, collectionBuilder.getShardBuilders(), liveNodes);
   }
@@ -505,7 +509,8 @@
     collectionBuilder.initializeShardsReplicas(2, 0, 0, 0, clusterBuilder.getLiveNodeBuilders());
     clusterBuilder.addCollection(collectionBuilder);
 
-    Cluster cluster = clusterBuilder.build();
+    PlacementContext placementContext = clusterBuilder.buildPlacementContext();
+    Cluster cluster = placementContext.getCluster();
 
     SolrCollection solrCollection = cluster.getCollection(collectionName);
 
@@ -514,14 +519,12 @@
             .map(Shard::getShardName).collect(Collectors.toSet()),
         cluster.getLiveNodes(), 2, 2, 2);
 
-    PlacementPlanFactory placementPlanFactory = new PlacementPlanFactoryImpl();
-    AttributeFetcher attributeFetcher = clusterBuilder.buildAttributeFetcher();
-    PlacementPlan pp = plugin.computePlacement(cluster, placementRequest, attributeFetcher, placementPlanFactory);
+    PlacementPlan pp = plugin.computePlacement(placementRequest, placementContext);
     // 2 shards, 6 replicas
     assertEquals(12, pp.getReplicaPlacements().size());
     // shard -> AZ -> replica count
     Map<Replica.ReplicaType, Map<String, Map<String, AtomicInteger>>> replicas = new HashMap<>();
-    AttributeValues attributeValues = attributeFetcher.fetchAttributes();
+    AttributeValues attributeValues = placementContext.getAttributeFetcher().fetchAttributes();
     for (ReplicaPlacement rp : pp.getReplicaPlacements()) {
       Optional<String> azOptional = attributeValues.getSystemProperty(rp.getNode(), AffinityPlacementFactory.AVAILABILITY_ZONE_SYSPROP);
       if (!azOptional.isPresent()) {
@@ -565,7 +568,8 @@
     collectionBuilder.initializeShardsReplicas(2, 0, 0, 0, clusterBuilder.getLiveNodeBuilders());
     clusterBuilder.addCollection(collectionBuilder);
 
-    Cluster cluster = clusterBuilder.build();
+    PlacementContext placementContext = clusterBuilder.buildPlacementContext();
+    Cluster cluster = placementContext.getCluster();
 
     SolrCollection solrCollection = cluster.getCollection(collectionName);
 
@@ -574,14 +578,12 @@
             .map(Shard::getShardName).collect(Collectors.toSet()),
         cluster.getLiveNodes(), 2, 2, 2);
 
-    PlacementPlanFactory placementPlanFactory = new PlacementPlanFactoryImpl();
-    AttributeFetcher attributeFetcher = clusterBuilder.buildAttributeFetcher();
-    PlacementPlan pp = plugin.computePlacement(cluster, placementRequest, attributeFetcher, placementPlanFactory);
+    PlacementPlan pp = plugin.computePlacement(placementRequest, placementContext);
     // 2 shards, 6 replicas
     assertEquals(12, pp.getReplicaPlacements().size());
     // shard -> group -> replica count
     Map<Replica.ReplicaType, Map<String, Map<String, AtomicInteger>>> replicas = new HashMap<>();
-    AttributeValues attributeValues = attributeFetcher.fetchAttributes();
+    AttributeValues attributeValues = placementContext.getAttributeFetcher().fetchAttributes();
     for (ReplicaPlacement rp : pp.getReplicaPlacements()) {
       Optional<String> groupOptional = attributeValues.getSystemProperty(rp.getNode(), "group");
       if (!groupOptional.isPresent()) {
@@ -632,7 +634,8 @@
     collectionBuilder.initializeShardsReplicas(2, 0, 0, 0, clusterBuilder.getLiveNodeBuilders());
     clusterBuilder.addCollection(collectionBuilder);
 
-    Cluster cluster = clusterBuilder.build();
+    PlacementContext placementContext = clusterBuilder.buildPlacementContext();
+    Cluster cluster = placementContext.getCluster();
 
     SolrCollection solrCollection = cluster.getCollection(collectionName);
 
@@ -641,15 +644,104 @@
             .map(Shard::getShardName).collect(Collectors.toSet()),
         cluster.getLiveNodes(), 1, 0, 1);
 
-    PlacementPlanFactory placementPlanFactory = new PlacementPlanFactoryImpl();
-    AttributeFetcher attributeFetcher = clusterBuilder.buildAttributeFetcher();
-    PlacementPlan pp = plugin.computePlacement(cluster, placementRequest, attributeFetcher, placementPlanFactory);
+    PlacementPlan pp = plugin.computePlacement(placementRequest, placementContext);
     assertEquals(4, pp.getReplicaPlacements().size());
     for (ReplicaPlacement rp : pp.getReplicaPlacements()) {
       assertFalse("should not put any replicas on " + smallNode, rp.getNode().equals(smallNode));
     }
   }
 
+  @Test
+  public void testWithCollectionPlacement() throws Exception {
+    int NUM_NODES = 3;
+    Builders.ClusterBuilder clusterBuilder = Builders.newClusterBuilder().initializeLiveNodes(NUM_NODES);
+    Builders.CollectionBuilder collectionBuilder = Builders.newCollectionBuilder(secondaryCollectionName);
+    collectionBuilder.initializeShardsReplicas(1, 2, 0, 0, clusterBuilder.getLiveNodeBuilders());
+    clusterBuilder.addCollection(collectionBuilder);
+
+    collectionBuilder = Builders.newCollectionBuilder(primaryCollectionName);
+    collectionBuilder.initializeShardsReplicas(0, 0, 0, 0, clusterBuilder.getLiveNodeBuilders());
+    clusterBuilder.addCollection(collectionBuilder);
+
+    PlacementContext placementContext = clusterBuilder.buildPlacementContext();
+    Cluster cluster = placementContext.getCluster();
+
+    SolrCollection secondaryCollection = cluster.getCollection(secondaryCollectionName);
+    SolrCollection primaryCollection = cluster.getCollection(primaryCollectionName);
+
+    Set<Node> secondaryNodes = new HashSet<>();
+    secondaryCollection.shards().forEach(s -> s.replicas().forEach(r -> secondaryNodes.add(r.getNode())));
+
+    PlacementRequestImpl placementRequest = new PlacementRequestImpl(primaryCollection,
+      Set.of("shard1", "shard2"), cluster.getLiveNodes(), 1, 0, 0);
+
+
+    PlacementPlan pp = plugin.computePlacement(placementRequest, placementContext);
+    assertEquals(2, pp.getReplicaPlacements().size());
+    // verify that all placements are on nodes with the secondary replica
+    pp.getReplicaPlacements().forEach(placement ->
+        assertTrue("placement node " + placement.getNode() + " not in secondary=" + secondaryNodes,
+            secondaryNodes.contains(placement.getNode())));
+
+    placementRequest = new PlacementRequestImpl(primaryCollection,
+        Set.of("shard1"), cluster.getLiveNodes(), 3, 0, 0);
+    try {
+      pp = plugin.computePlacement(placementRequest, placementContext);
+      fail("should generate 'Not enough eligible nodes' failure here");
+    } catch (PlacementException pe) {
+      assertTrue(pe.toString().contains("Not enough eligible nodes"));
+    }
+  }
+
+  @Test
+  public void testWithCollectionModificationRejected() throws Exception {
+    int NUM_NODES = 2;
+    Builders.ClusterBuilder clusterBuilder = Builders.newClusterBuilder().initializeLiveNodes(NUM_NODES);
+    Builders.CollectionBuilder collectionBuilder = Builders.newCollectionBuilder(secondaryCollectionName);
+    collectionBuilder.initializeShardsReplicas(1, 4, 0, 0, clusterBuilder.getLiveNodeBuilders());
+    clusterBuilder.addCollection(collectionBuilder);
+
+    collectionBuilder = Builders.newCollectionBuilder(primaryCollectionName);
+    collectionBuilder.initializeShardsReplicas(2, 2, 0, 0, clusterBuilder.getLiveNodeBuilders());
+    clusterBuilder.addCollection(collectionBuilder);
+
+    PlacementContext placementContext = clusterBuilder.buildPlacementContext();
+    Cluster cluster = placementContext.getCluster();
+
+    SolrCollection secondaryCollection = cluster.getCollection(secondaryCollectionName);
+    SolrCollection primaryCollection = cluster.getCollection(primaryCollectionName);
+
+    Node node = cluster.getLiveNodes().iterator().next();
+    Set<Replica> secondaryReplicas = new HashSet<>();
+    secondaryCollection.shards().forEach(shard ->
+        shard.replicas().forEach(replica -> {
+          if (secondaryReplicas.size() < 1 && replica.getNode().equals(node)) {
+            secondaryReplicas.add(replica);
+          }
+        }));
+
+    DeleteReplicasRequest deleteReplicasRequest = ModificationRequestImpl.createDeleteReplicasRequest(secondaryCollection, secondaryReplicas);
+    try {
+      plugin.verifyAllowedModification(deleteReplicasRequest, placementContext);
+    } catch (PlacementException pe) {
+      fail("should have succeeded: " + pe.toString());
+    }
+
+    secondaryCollection.shards().forEach(shard ->
+        shard.replicas().forEach(replica -> {
+          if (secondaryReplicas.size() < 2 && replica.getNode().equals(node)) {
+            secondaryReplicas.add(replica);
+          }
+        }));
+
+    deleteReplicasRequest = ModificationRequestImpl.createDeleteReplicasRequest(secondaryCollection, secondaryReplicas);
+    try {
+      plugin.verifyAllowedModification(deleteReplicasRequest, placementContext);
+      fail("should have failed: " + deleteReplicasRequest);
+    } catch (PlacementException pe) {
+    }
+  }
+
   @Test @Slow
   public void testScalability() throws Exception {
     log.info("==== numNodes ====");
@@ -684,9 +776,7 @@
     Builders.CollectionBuilder collectionBuilder = Builders.newCollectionBuilder(collectionName);
     collectionBuilder.initializeShardsReplicas(numShards, 0, 0, 0, List.of());
 
-    Cluster cluster = clusterBuilder.build();
-    AttributeFetcher attributeFetcher = clusterBuilder.buildAttributeFetcher();
-
+    PlacementContext placementContext = clusterBuilder.buildPlacementContext();
     SolrCollection solrCollection = collectionBuilder.build();
     List<Node> liveNodes = clusterBuilder.buildLiveNodes();
 
@@ -695,7 +785,7 @@
         new HashSet<>(liveNodes), nrtReplicas, tlogReplicas, pullReplicas);
 
     long start = System.nanoTime();
-    PlacementPlan pp = plugin.computePlacement(cluster, placementRequest, attributeFetcher, new PlacementPlanFactoryImpl());
+    PlacementPlan pp = plugin.computePlacement(placementRequest, placementContext);
     long end = System.nanoTime();
 
     final int REPLICAS_PER_SHARD = nrtReplicas + tlogReplicas + pullReplicas;
diff --git a/solr/core/src/test/org/apache/solr/core/TestBadConfig.java b/solr/core/src/test/org/apache/solr/core/TestBadConfig.java
index 91fd9ae..1dfad85 100644
--- a/solr/core/src/test/org/apache/solr/core/TestBadConfig.java
+++ b/solr/core/src/test/org/apache/solr/core/TestBadConfig.java
@@ -16,6 +16,10 @@
  */
 package org.apache.solr.core;
 
+import javax.script.ScriptEngineManager;
+
+import org.junit.Assume;
+
 public class TestBadConfig extends AbstractBadConfigTestBase {
 
   public void testUnsetSysProperty() throws Exception {
@@ -40,7 +44,7 @@
   }
 
   public void testUpdateLogButNoVersionField() throws Exception {
-
+    
     System.setProperty("enable.update.log", "true");
     try {
       assertConfigs("solrconfig.xml", "schema12.xml", "_version_");
@@ -49,6 +53,28 @@
     }
   }
 
+  public void testBogusScriptEngine() throws Exception {
+    // sanity check
+    Assume.assumeTrue(null == (new ScriptEngineManager()).getEngineByName("giberish"));
+                      
+    assertConfigs("bad-solrconfig-bogus-scriptengine-name.xml",
+                  "schema.xml","giberish");
+  }
+
+  public void testMissingScriptFile() throws Exception {
+    // sanity check
+    Assume.assumeNotNull((new ScriptEngineManager()).getEngineByExtension("js"));
+    assertConfigs("bad-solrconfig-missing-scriptfile.xml",
+                  "schema.xml","a-file-name-that-does-not-exist.js");
+  }
+
+  public void testInvalidScriptFile() throws Exception {
+    // sanity check
+    Assume.assumeNotNull((new ScriptEngineManager()).getEngineByName("javascript"));
+    assertConfigs("bad-solrconfig-invalid-scriptfile.xml",
+                  "schema.xml","currency.xml");
+  }
+
   public void testBogusMergePolicy() throws Exception {
     assertConfigs("bad-mpf-solrconfig.xml", "schema-minimal.xml",
                   "DummyMergePolicyFactory");
@@ -63,7 +89,7 @@
     assertConfigs("bad-solrconfig-managed-schema-named-schema.xml.xml",
                   "schema-minimal.xml", "managedSchemaResourceName can't be 'schema.xml'");
   }
-
+  
   public void testUnknownSchemaAttribute() throws Exception {
     assertConfigs("bad-solrconfig-unexpected-schema-attribute.xml", "schema-minimal.xml",
                   "Unexpected arg(s): {bogusParam=bogusValue}");
diff --git a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
index 3c640da..1a4da88 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
@@ -358,7 +358,7 @@
       Object timesFailed = ((NamedList)details.get("follower")).get(IndexFetcher.TIMES_FAILED);
       // SOLR-7134: we can have a fail because some mock index files have no checksum, will
       // always be downloaded, and may not be able to be moved into the existing index
-      assertTrue(i + ": " + "follower has fetch error count: " + (String)timesFailed, timesFailed == null || ((String) timesFailed).equals("1"));
+      assertTrue(i + ": " + "follower has fetch error count: " + timesFailed, timesFailed == null || ((Number) timesFailed).intValue() == 1);
 
       if (3 != i) {
         // index & fetch
diff --git a/solr/core/src/test/org/apache/solr/handler/component/DistributedExpandComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/DistributedExpandComponentTest.java
index a7b63b6..aafe388 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/DistributedExpandComponentTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/DistributedExpandComponentTest.java
@@ -16,6 +16,7 @@
  */
 package org.apache.solr.handler.component;
 
+import java.util.Arrays;
 import java.util.Iterator;
 import java.util.Map;
 
@@ -26,7 +27,6 @@
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.junit.BeforeClass;
-import org.junit.Test;
 
 /**
  * Test for distributed ExpandComponent
@@ -44,25 +44,36 @@
     initCore("solrconfig-collapseqparser.xml", "schema11.xml");
   }
 
-  @Test
   @ShardsFixed(num = 3)
   public void test() throws Exception {
-    final String group = (random().nextBoolean() ? "group_s" : "group_s_dv");
+    _test("group_s", "g1", "g2", "g3", "g4");
+    _test("group_s_dv", "g1", "g2", "g3", "g4");
+    _test("group_i", "1", "0", "3", "-1"); // NOTE: using 0 to explicitly confim we don't assume null
+    _test("group_ti_dv", "1", "-2", "0", "4"); // NOTE: using 0 to explicitly confim we don't assume null
+  }
+  
+  private void _test(final String group,
+                     final String aaa, final String bbb, final String ccc, final String ddd) throws Exception {
     
     del("*:*");
 
-    index_specific(0,"id","1", "term_s", "YYYY", group, "group1", "test_i", "5",  "test_l", "10", "test_f", "2000");
-    index_specific(0,"id","2", "term_s", "YYYY", group, "group1", "test_i", "50", "test_l", "100", "test_f", "200");
-    index_specific(1,"id","5", "term_s", "YYYY", group, "group2", "test_i", "4",  "test_l", "10", "test_f", "2000");
-    index_specific(1,"id","6", "term_s", "YYYY", group, "group2", "test_i", "10", "test_l", "100", "test_f", "200");
-    index_specific(0,"id","7", "term_s", "YYYY", group, "group1", "test_i", "1",  "test_l", "100000", "test_f", "2000");
-    index_specific(1,"id","8", "term_s", "YYYY", group, "group2", "test_i", "2",  "test_l", "100000", "test_f", "200");
-    index_specific(2,"id","9", "term_s", "YYYY", group, "group3", "test_i", "1000", "test_l", "1005", "test_f", "3000");
-    index_specific(2, "id", "10", "term_s", "YYYY", group, "group3", "test_i", "1500", "test_l", "1001", "test_f", "3200");
-    index_specific(2,"id", "11",  "term_s", "YYYY", group, "group3", "test_i", "1300", "test_l", "1002", "test_f", "3300");
-    index_specific(1,"id","12", "term_s", "YYYY", group, "group4", "test_i", "15",  "test_l", "10", "test_f", "2000");
-    index_specific(1,"id","13", "term_s", "YYYY", group, "group4", "test_i", "16",  "test_l", "9", "test_f", "2000");
-    index_specific(1,"id","14", "term_s", "YYYY", group, "group4", "test_i", "1",  "test_l", "20", "test_f", "2000");
+    index_specific(0,"id","1", "term_s", "YYYY", group, aaa, "test_i", "5",  "test_l", "10", "test_f", "2000");
+    index_specific(0,"id","2", "term_s", "YYYY", group, aaa, "test_i", "50", "test_l", "100", "test_f", "200");
+    index_specific(1,"id","5", "term_s", "YYYY", group, bbb, "test_i", "4",  "test_l", "10", "test_f", "2000");
+    index_specific(1,"id","6", "term_s", "YYYY", group, bbb, "test_i", "10", "test_l", "100", "test_f", "200");
+    index_specific(0,"id","7", "term_s", "YYYY", group, aaa, "test_i", "1",  "test_l", "100000", "test_f", "2000");
+    index_specific(1,"id","8", "term_s", "YYYY", group, bbb, "test_i", "2",  "test_l", "100000", "test_f", "200");
+    index_specific(2,"id","9", "term_s", "YYYY", group, ccc, "test_i", "1000", "test_l", "1005", "test_f", "3000");
+    index_specific(2,"id","10","term_s", "YYYY", group, ccc, "test_i", "1500", "test_l", "1001", "test_f", "3200");
+
+    // NOTE: nullPolicy=collapse will only be viable because all null docs are in collocated in shard #2
+    index_specific(2,"id","88", "test_i", "1001", "test_l", "1001", "test_f", "3200");
+    index_specific(2,"id","99", "test_i", "11", "test_l", "100", "test_f", "200");
+
+    index_specific(2,"id","11","term_s", "YYYY", group, ccc, "test_i", "1300", "test_l", "1002", "test_f", "3300");
+    index_specific(1,"id","12","term_s", "YYYY", group, ddd, "test_i", "15",  "test_l", "10", "test_f", "2000");
+    index_specific(1,"id","13","term_s", "YYYY", group, ddd, "test_i", "16",  "test_l", "9", "test_f", "2000");
+    index_specific(1,"id","14","term_s", "YYYY", group, ddd, "test_i", "1",  "test_l", "20", "test_f", "2000");
 
 
     commit();
@@ -91,7 +102,7 @@
     query("q", "*:*", "start","1", "rows", "1", "fq", "{!collapse field="+group+"}", "defType", "edismax", "bf", "field(test_i)", "expand", "true", "fl","*,score");
 
     // multiple collapse and equal cost
-    ModifiableSolrParams baseParams = params("q", "*:*", "defType", "edismax", "expand", "true", "fl", "*,score",
+    ModifiableSolrParams baseParams = params("q", "term_s:YYYY", "defType", "edismax", "expand", "true", "fl", "*,score",
         "bf", "field(test_i)", "expand.sort", "id asc");
     baseParams.set("fq", "{!collapse field="+group+"}", "{!collapse field=test_i}");
     query(baseParams);
@@ -110,137 +121,185 @@
     assertTrue(e.getMessage().contains("missing expand field"));
     resetExceptionIgnores();
 
-    //First basic test case.
-    ModifiableSolrParams params = new ModifiableSolrParams();
-    params.add("q", "*:*");
-    params.add("fq", "{!collapse field="+group+"}");
-    params.add("defType", "edismax");
-    params.add("bf", "field(test_i)");
-    params.add("expand", "true");
-
-    setDistributedParams(params);
-    QueryResponse rsp = queryServer(params);
-    Map<String, SolrDocumentList> results = rsp.getExpandedResults();
-    assertExpandGroups(results, "group1","group2", "group3", "group4");
-    assertExpandGroupCountAndOrder("group1", 2, results, "1", "7");
-    assertExpandGroupCountAndOrder("group2", 2, results, "5", "8");
-    assertExpandGroupCountAndOrder("group3", 2, results, "11", "9");
-    assertExpandGroupCountAndOrder("group4", 2, results, "12", "14");
+    // Since none of these queries will match any doc w/null in the group field, it shouldn't matter what nullPolicy is used...
+    for (String np : Arrays.asList("", " nullPolicy=ignore", " nullPolicy=expand", " nullPolicy=collapse")) {
+      
+      //First basic test case.
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.add("q", "term_s:YYYY");
+      params.add("fq", "{!collapse field="+group+np+"}");
+      params.add("defType", "edismax");
+      params.add("bf", "field(test_i)");
+      params.add("expand", "true");
+      
+      setDistributedParams(params);
+      QueryResponse rsp = queryServer(params);
+      assertCountAndOrder(4, rsp.getResults(), "10" /* c */, "2" /* a */, "13" /* d */, "6" /* b */);
+      Map<String, SolrDocumentList> results = rsp.getExpandedResults();
+      assertExpandGroups(results, aaa, bbb, ccc, ddd);
+      assertExpandGroupCountAndOrder(aaa, 2, results, "1", "7");
+      assertExpandGroupCountAndOrder(bbb, 2, results, "5", "8");
+      assertExpandGroupCountAndOrder(ccc, 2, results, "11", "9");
+      assertExpandGroupCountAndOrder(ddd, 2, results, "12", "14");
+      
+      
+      //Test expand.sort
+      
+      params = new ModifiableSolrParams();
+      params.add("q", "term_s:YYYY");
+      params.add("fq", "{!collapse field="+group+np+"}");
+      params.add("defType", "edismax");
+      params.add("bf", "field(test_i)");
+      params.add("expand", "true");
+      params.add("expand.sort", "test_l desc");
+      setDistributedParams(params);
+      rsp = queryServer(params);
+      assertCountAndOrder(4, rsp.getResults(), "10" /* c */, "2" /* a */, "13" /* d */, "6" /* b */);
+      results = rsp.getExpandedResults();
+      assertExpandGroups(results, aaa, bbb, ccc, ddd);
+      assertExpandGroupCountAndOrder(aaa, 2, results, "7", "1");
+      assertExpandGroupCountAndOrder(bbb, 2, results, "8", "5");
+      assertExpandGroupCountAndOrder(ccc, 2, results, "9", "11");
+      assertExpandGroupCountAndOrder(ddd, 2, results, "14", "12");
 
 
-    //Test expand.sort
+      //Test expand.rows
+      
+      params = new ModifiableSolrParams();
+      params.add("q", "term_s:YYYY");
+      params.add("fq", "{!collapse field="+group+np+"}");
+      params.add("defType", "edismax");
+      params.add("bf", "field(test_i)");
+      params.add("expand", "true");
+      params.add("expand.sort", "test_l desc");
+      params.add("expand.rows", "1");
+      setDistributedParams(params);
+      rsp = queryServer(params);
+      assertCountAndOrder(4, rsp.getResults(), "10" /* c */, "2" /* a */, "13" /* d */, "6" /* b */);
+      results = rsp.getExpandedResults();
+      assertExpandGroups(results, aaa, bbb, ccc, ddd);
+      assertExpandGroupCountAndOrder(aaa, 1, results, "7");
+      assertExpandGroupCountAndOrder(bbb, 1, results, "8");
+      assertExpandGroupCountAndOrder(ccc, 1, results, "9");
+      assertExpandGroupCountAndOrder(ddd, 1, results, "14");
+      
+      //Test expand.rows = 0 - no docs only expand count
+      
+      params = new ModifiableSolrParams();
+      params.add("q", "term_s:YYYY");
+      params.add("fq", "{!collapse field="+group+np+"}");
+      params.add("defType", "edismax");
+      params.add("bf", "field(test_i)");
+      params.add("expand", "true");
+      params.add("expand.rows", "0");
+      params.add("fl", "id");
+      setDistributedParams(params);
+      rsp = queryServer(params);
+      assertCountAndOrder(4, rsp.getResults(), "10" /* c */, "2" /* a */, "13" /* d */, "6" /* b */);
+      results = rsp.getExpandedResults();
+      assertExpandGroups(results, aaa, bbb, ccc, ddd);
+      assertExpandGroupCountAndOrder(aaa, 0, results);
+      assertExpandGroupCountAndOrder(bbb, 0, results);
+      assertExpandGroupCountAndOrder(ccc, 0, results);
+      assertExpandGroupCountAndOrder(ddd, 0, results);
+      
+      //Test expand.rows = 0 with expand.field
+      
+      params = new ModifiableSolrParams();
+      params.add("q", "term_s:YYYY");
+      params.add("fq", "test_l:10");
+      params.add("defType", "edismax");
+      params.add("bf", "field(test_i)");
+      params.add("expand", "true");
+      params.add("expand.fq", "test_f:2000");
+      params.add("expand.field", group);
+      params.add("expand.rows", "0");
+      params.add("fl", "id,score");
+      setDistributedParams(params);
+      rsp = queryServer(params);
+      assertCountAndOrder(3, rsp.getResults(), "12" /* d */, "1" /* a */, "5" /* b */);
+      results = rsp.getExpandedResults();
+      assertExpandGroups(results, aaa, ddd);
+      assertExpandGroupCountAndOrder(aaa, 0, results);
+      assertExpandGroupCountAndOrder(ddd, 0, results);
+      
+      //Test key-only fl
+      
+      params = new ModifiableSolrParams();
+      params.add("q", "term_s:YYYY");
+      params.add("fq", "{!collapse field="+group+np+"}");
+      params.add("defType", "edismax");
+      params.add("bf", "field(test_i)");
+      params.add("expand", "true");
+      params.add("fl", "id");
+      
+      setDistributedParams(params);
+      rsp = queryServer(params);
+      assertCountAndOrder(4, rsp.getResults(), "10" /* c */, "2" /* a */, "13" /* d */, "6" /* b */);
+      results = rsp.getExpandedResults();
+      assertExpandGroups(results, aaa, bbb, ccc, ddd);
+      assertExpandGroupCountAndOrder(aaa, 2, results, "1", "7");
+      assertExpandGroupCountAndOrder(bbb, 2, results, "5", "8");
+      assertExpandGroupCountAndOrder(ccc, 2, results, "11", "9");
+      assertExpandGroupCountAndOrder(ddd, 2, results, "12", "14");
+      
+      //Test distrib.singlePass true
+      
+      params = new ModifiableSolrParams();
+      params.add("q", "term_s:YYYY");
+      params.add("fq", "{!collapse field="+group+np+"}");
+      params.add("defType", "edismax");
+      params.add("bf", "field(test_i)");
+      params.add("expand", "true");
+      params.add("distrib.singlePass", "true");
+      
+      setDistributedParams(params);
+      rsp = queryServer(params);
+      assertCountAndOrder(4, rsp.getResults(), "10" /* c */, "2" /* a */, "13" /* d */, "6" /* b */);
+      results = rsp.getExpandedResults();
+      assertExpandGroups(results, aaa, bbb, ccc, ddd);
+      assertExpandGroupCountAndOrder(aaa, 2, results, "1", "7");
+      assertExpandGroupCountAndOrder(bbb, 2, results, "5", "8");
+      assertExpandGroupCountAndOrder(ccc, 2, results, "11", "9");
+      assertExpandGroupCountAndOrder(ddd, 2, results, "12", "14");
+    }
 
-    params = new ModifiableSolrParams();
-    params.add("q", "*:*");
-    params.add("fq", "{!collapse field="+group+"}");
-    params.add("defType", "edismax");
-    params.add("bf", "field(test_i)");
-    params.add("expand", "true");
-    params.add("expand.sort", "test_l desc");
-    setDistributedParams(params);
-    rsp = queryServer(params);
-    results = rsp.getExpandedResults();
-    assertExpandGroups(results, "group1","group2", "group3", "group4");
-    assertExpandGroupCountAndOrder("group1", 2, results, "7", "1");
-    assertExpandGroupCountAndOrder("group2", 2, results, "8", "5");
-    assertExpandGroupCountAndOrder("group3", 2, results, "9", "11");
-    assertExpandGroupCountAndOrder("group4", 2, results, "14", "12");
+    { // queries matching all docs to test null groups from collapse and how it affects expand
+
+      ModifiableSolrParams params = new ModifiableSolrParams();
+      params.add("q", "*:*");
+      params.add("defType", "edismax");
+      params.add("bf", "field(test_i)");
+      params.add("expand", "true");
+      setDistributedParams(params);
+
+      // nullPolicy=expand
+      params.add("fq", "{!collapse field="+group+" nullPolicy=expand}");
+      
+      QueryResponse rsp = queryServer(params);
+      assertCountAndOrder(6, rsp.getResults(), "10" /* c */, "88" /* null */, "2" /* a */, "13" /* d */, "99" /* null */, "6" /* b */);
+      Map<String, SolrDocumentList> results = rsp.getExpandedResults();
+      assertExpandGroups(results, aaa, bbb, ccc, ddd);
+      assertExpandGroupCountAndOrder(aaa, 2, results, "1", "7");
+      assertExpandGroupCountAndOrder(bbb, 2, results, "5", "8");
+      assertExpandGroupCountAndOrder(ccc, 2, results, "11", "9");
+      assertExpandGroupCountAndOrder(ddd, 2, results, "12", "14");
+
+      // nullPolicy=collapse
+      params.set("fq", "{!collapse field="+group+" nullPolicy=collapse}");
+      
+      rsp = queryServer(params);
+      assertCountAndOrder(5, rsp.getResults(), "10" /* c */, "88" /* null */, "2" /* a */, "13" /* d */, "6" /* b */);
+      results = rsp.getExpandedResults();
+      assertExpandGroups(results, aaa, bbb, ccc, ddd);
+      assertExpandGroupCountAndOrder(aaa, 2, results, "1", "7");
+      assertExpandGroupCountAndOrder(bbb, 2, results, "5", "8");
+      assertExpandGroupCountAndOrder(ccc, 2, results, "11", "9");
+      assertExpandGroupCountAndOrder(ddd, 2, results, "12", "14");
 
 
-    //Test expand.rows
-
-    params = new ModifiableSolrParams();
-    params.add("q", "*:*");
-    params.add("fq", "{!collapse field="+group+"}");
-    params.add("defType", "edismax");
-    params.add("bf", "field(test_i)");
-    params.add("expand", "true");
-    params.add("expand.sort", "test_l desc");
-    params.add("expand.rows", "1");
-    setDistributedParams(params);
-    rsp = queryServer(params);
-    results = rsp.getExpandedResults();
-    assertExpandGroups(results, "group1","group2", "group3", "group4");
-    assertExpandGroupCountAndOrder("group1", 1, results, "7");
-    assertExpandGroupCountAndOrder("group2", 1, results, "8");
-    assertExpandGroupCountAndOrder("group3", 1, results, "9");
-    assertExpandGroupCountAndOrder("group4", 1, results, "14");
-
-    //Test expand.rows = 0 - no docs only expand count
-
-    params = new ModifiableSolrParams();
-    params.add("q", "*:*");
-    params.add("fq", "{!collapse field="+group+"}");
-    params.add("defType", "edismax");
-    params.add("bf", "field(test_i)");
-    params.add("expand", "true");
-    params.add("expand.rows", "0");
-    params.add("fl", "id");
-    setDistributedParams(params);
-    rsp = queryServer(params);
-    results = rsp.getExpandedResults();
-    assertExpandGroups(results, "group1","group2", "group3", "group4");
-    assertExpandGroupCountAndOrder("group1", 0, results);
-    assertExpandGroupCountAndOrder("group2", 0, results);
-    assertExpandGroupCountAndOrder("group3", 0, results);
-    assertExpandGroupCountAndOrder("group4", 0, results);
-
-    //Test expand.rows = 0 with expand.field
-
-    params = new ModifiableSolrParams();
-    params.add("q", "*:*");
-    params.add("fq", "test_l:10");
-    params.add("defType", "edismax");
-    params.add("expand", "true");
-    params.add("expand.fq", "test_f:2000");
-    params.add("expand.field", group);
-    params.add("expand.rows", "0");
-    params.add("fl", "id,score");
-    setDistributedParams(params);
-    rsp = queryServer(params);
-    results = rsp.getExpandedResults();
-    assertExpandGroups(results, "group1", "group4");
-    assertExpandGroupCountAndOrder("group1", 0, results);
-    assertExpandGroupCountAndOrder("group4", 0, results);
-
-    //Test key-only fl
-
-    params = new ModifiableSolrParams();
-    params.add("q", "*:*");
-    params.add("fq", "{!collapse field="+group+"}");
-    params.add("defType", "edismax");
-    params.add("bf", "field(test_i)");
-    params.add("expand", "true");
-    params.add("fl", "id");
-
-    setDistributedParams(params);
-    rsp = queryServer(params);
-    results = rsp.getExpandedResults();
-    assertExpandGroups(results, "group1","group2", "group3", "group4");
-    assertExpandGroupCountAndOrder("group1", 2, results, "1", "7");
-    assertExpandGroupCountAndOrder("group2", 2, results, "5", "8");
-    assertExpandGroupCountAndOrder("group3", 2, results, "11", "9");
-    assertExpandGroupCountAndOrder("group4", 2, results, "12", "14");
-
-    //Test distrib.singlePass true
-
-    params = new ModifiableSolrParams();
-    params.add("q", "*:*");
-    params.add("fq", "{!collapse field="+group+"}");
-    params.add("defType", "edismax");
-    params.add("bf", "field(test_i)");
-    params.add("expand", "true");
-    params.add("distrib.singlePass", "true");
-
-    setDistributedParams(params);
-    rsp = queryServer(params);
-    results = rsp.getExpandedResults();
-    assertExpandGroups(results, "group1","group2", "group3", "group4");
-    assertExpandGroupCountAndOrder("group1", 2, results, "1", "7");
-    assertExpandGroupCountAndOrder("group2", 2, results, "5", "8");
-    assertExpandGroupCountAndOrder("group3", 2, results, "11", "9");
-    assertExpandGroupCountAndOrder("group4", 2, results, "12", "14");
-
+    }
+    
   }
 
   private void assertExpandGroups(Map<String, SolrDocumentList> expandedResults, String... groups) throws Exception {
@@ -264,12 +323,17 @@
     return buf.toString();
   }
 
-  private void assertExpandGroupCountAndOrder(String group, int count, Map<String, SolrDocumentList>expandedResults, String... docs) throws Exception {
+  private void assertExpandGroupCountAndOrder(final String group, final int count,
+                                              final Map<String, SolrDocumentList>expandedResults,
+                                              final String... docs) throws Exception {
     SolrDocumentList results = expandedResults.get(group);
     if(results == null) {
       throw new Exception("Group Not Found:"+group);
     }
-
+    assertCountAndOrder(count, results, docs);
+  }
+  private void assertCountAndOrder(final int count, final SolrDocumentList results,
+                                   final String... docs) throws Exception {
     if(results.size() != count) {
       throw new Exception("Expected Count "+results.size()+" Not Found:"+count);
     }
diff --git a/solr/core/src/test/org/apache/solr/handler/component/TestExpandComponent.java b/solr/core/src/test/org/apache/solr/handler/component/TestExpandComponent.java
index 7f23d68..e75c9f2 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/TestExpandComponent.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/TestExpandComponent.java
@@ -16,10 +16,8 @@
  */
 package org.apache.solr.handler.component;
 
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.List;
 
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.SolrException;
@@ -46,50 +44,50 @@
     assertU(commit());
   }
 
-  @Test
-  public void testExpand() throws Exception {
-    List<String> groups = new ArrayList<>();
-    groups.add("group_s");
-    groups.add("group_s_dv");
-
-    Collections.shuffle(groups, random());
-    String floatAppend = "";
-
-    String hint = (random().nextBoolean() ? " hint="+ CollapsingQParserPlugin.HINT_TOP_FC : "");
-
-    _testExpand(groups.get(0), floatAppend, hint);
+  private static String maybeTopFc() {
+    return (random().nextBoolean() ? " hint="+ CollapsingQParserPlugin.HINT_TOP_FC : "");
   }
-
-  @Test
-  public void testNumericExpand() throws Exception {
-    List<String> groups = new ArrayList<>();
-    groups.add("group_i");
-    groups.add("group_ti_dv");
-    groups.add("group_f");
-    groups.add("group_tf_dv");
-    Collections.shuffle(groups, random());
-    String floatAppend = "";
-    if(groups.get(0).indexOf("f") > -1) {
-      floatAppend = "."+random().nextInt(100);  //Append the float
-      floatAppend = Float.toString(Float.parseFloat(floatAppend)); //Create a proper float out of the string.
-      floatAppend = floatAppend.substring(1);  //Drop off the leading 0, leaving just the decimal
-    }
-
-    String hint = "";
-
-    _testExpand(groups.get(0), floatAppend, hint);
+  private static String floatAppend() {
+    String floatAppend = "."+random().nextInt(100);  //Append the float
+    floatAppend = Float.toString(Float.parseFloat(floatAppend)); //Create a proper float out of the string.
+    floatAppend = floatAppend.substring(1);  //Drop off the leading 0, leaving just the decimal
+    return floatAppend;
   }
-
+  
+  public void testString() throws Exception {
+    _testExpand("group_s", "", maybeTopFc());
+  }
+  public void testStringDv() throws Exception {
+    _testExpand("group_s_dv", "", maybeTopFc());
+  }
+ 
+  public void testInt() throws Exception {
+    _testExpand("group_i", "", "");
+  }
+  public void testIntDv() throws Exception {
+    _testExpand("group_ti_dv", "", "");
+  }
+  
+  public void testFloat() throws Exception {
+    _testExpand("group_f", floatAppend(), "");
+    _testExpand("group_f", ".0", ""); // explicit 0 check for 0 vs null group
+  }
+  public void testFloatDv() throws Exception {
+    _testExpand("group_tf_dv", floatAppend(), "");
+    _testExpand("group_tf_dv", ".0", ""); // explicit 0 check for 0 vs null group
+  }
+  
   private void _testExpand(String group, String floatAppend, String hint) throws Exception {
+    // NOTE: one of our groups uses '0' as the group value to explicitly check numeric expand for 0 vs null group behavior
     String[][] docs = {
         {"id","1", "term_s", "YYYY", group, "1"+floatAppend, "test_i", "5", "test_l", "10", "test_f", "2000", "type_s", "parent"},
         {"id","2", "term_s","YYYY", group, "1"+floatAppend, "test_i", "50", "test_l", "100", "test_f", "200", "type_s", "child"},
         {"id","3", "term_s", "YYYY", "test_i", "5000", "test_l", "100", "test_f", "200"},
         {"id","4", "term_s", "YYYY", "test_i", "500", "test_l", "1000", "test_f", "2000"},
-        {"id","5", "term_s", "YYYY", group, "2"+floatAppend, "test_i", "4", "test_l", "10", "test_f", "2000", "type_s", "parent"},
-        {"id","6", "term_s","YYYY", group, "2"+floatAppend, "test_i", "10", "test_l", "100", "test_f", "200", "type_s", "child"},
+        {"id","5", "term_s", "YYYY", group, "0"+floatAppend, "test_i", "4", "test_l", "10", "test_f", "2000", "type_s", "parent"},
+        {"id","6", "term_s","YYYY", group, "0"+floatAppend, "test_i", "10", "test_l", "100", "test_f", "200", "type_s", "child"},
         {"id","7", "term_s", "YYYY", group, "1"+floatAppend, "test_i", "1", "test_l", "100000", "test_f", "2000", "type_s", "child"},
-        {"id","8", "term_s","YYYY", group, "2"+floatAppend, "test_i", "2", "test_l",  "100000", "test_f", "200", "type_s", "child"}
+        {"id","8", "term_s","YYYY", group, "0"+floatAppend, "test_i", "2", "test_l",  "100000", "test_f", "200", "type_s", "child"}
     };
     createIndex(docs);
 
@@ -107,8 +105,8 @@
         "/response/result/doc[2]/str[@name='id'][.='6']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='1']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[2]/str[@name='id'][.='7']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='5']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[2]/str[@name='id'][.='8']"
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='5']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[2]/str[@name='id'][.='8']"
     );
 
     //Basic test case page 2
@@ -116,8 +114,8 @@
     assertQ(req(params, "rows", "1", "start", "1"), "*[count(/response/result/doc)=1]",
         "*[count(/response/lst[@name='expanded']/result)=1]",
         "/response/result/doc[1]/str[@name='id'][.='6']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='5']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[2]/str[@name='id'][.='8']"
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='5']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[2]/str[@name='id'][.='8']"
     );
 
     //Test expand.sort
@@ -129,12 +127,12 @@
         "/response/result/doc[2]/str[@name='id'][.='6']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='7']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[2]/str[@name='id'][.='1']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[2]/str[@name='id'][.='5']"
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[2]/str[@name='id'][.='5']"
     );
 
     //Test with nullPolicy, ExpandComponent should ignore docs with null values in the collapse fields.
-    //Main result set should include the doc with null value in the collapse field.
+    //Main result set should include the doc(s) with null value in the collapse field.
     params = new ModifiableSolrParams();
     params.add("q", "*:*");
     params.add("fq", "{!collapse field="+group+hint+" nullPolicy=collapse}");
@@ -149,8 +147,20 @@
         "/response/result/doc[3]/str[@name='id'][.='6']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='7']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[2]/str[@name='id'][.='1']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[2]/str[@name='id'][.='5']"
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[2]/str[@name='id'][.='5']"
+    );
+    params.set("fq", "{!collapse field="+group+hint+" nullPolicy=expand}");
+    assertQ(req(params), "*[count(/response/result/doc)=4]",
+        "*[count(/response/lst[@name='expanded']/result)=2]",
+        "/response/result/doc[1]/str[@name='id'][.='3']",
+        "/response/result/doc[2]/str[@name='id'][.='4']",
+        "/response/result/doc[3]/str[@name='id'][.='2']",
+        "/response/result/doc[4]/str[@name='id'][.='6']",
+        "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='7']",
+        "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[2]/str[@name='id'][.='1']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[2]/str[@name='id'][.='5']"
     );
 
 
@@ -169,8 +179,8 @@
         "/response/result/doc[2]/str[@name='id'][.='5']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='7']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[2]/str[@name='id'][.='2']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[2]/str[@name='id'][.='6']"
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[2]/str[@name='id'][.='6']"
     );
 
 
@@ -190,8 +200,8 @@
         "/response/result/doc[2]/str[@name='id'][.='5']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='7']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[2]/str[@name='id'][.='2']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[2]/str[@name='id'][.='6']"
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[2]/str[@name='id'][.='6']"
     );
 
     //Test override expand.fq and expand.q
@@ -211,8 +221,8 @@
         "/response/result/doc[2]/str[@name='id'][.='5']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='7']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[2]/str[@name='id'][.='2']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[2]/str[@name='id'][.='6']"
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[2]/str[@name='id'][.='6']"
     );
 
     //Test expand.rows
@@ -227,11 +237,11 @@
     assertQ(req(params), "*[count(/response/result/doc)=2]",
         "*[count(/response/lst[@name='expanded']/result)=2]",
         "*[count(/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc)=1]",
-        "*[count(/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc)=1]",
+        "*[count(/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc)=1]",
         "/response/result/doc[1]/str[@name='id'][.='2']",
         "/response/result/doc[2]/str[@name='id'][.='6']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='7']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='8']"
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='8']"
     );
 
     //Test expand.rows = 0 - no docs only expand count
@@ -245,7 +255,7 @@
     assertQ(req(params), "*[count(/response/result/doc)=2]",
             "*[count(/response/lst[@name='expanded']/result)=2]",
             "*[count(/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc)=0]",
-            "*[count(/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc)=0]",
+            "*[count(/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc)=0]",
             "/response/result/doc[1]/str[@name='id'][.='2']",
             "/response/result/doc[2]/str[@name='id'][.='6']"
     );
@@ -263,7 +273,7 @@
     assertQ(req(params, "fl", "id"), "*[count(/response/result/doc)=2]",
             "*[count(/response/lst[@name='expanded']/result)=2]",
             "*[count(/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc)=0]",
-            "*[count(/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc)=0]",
+            "*[count(/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc)=0]",
             "/response/result/doc[1]/str[@name='id'][.='1']",
             "/response/result/doc[2]/str[@name='id'][.='5']"
     );
@@ -281,7 +291,7 @@
     assertQ(req(params, "fl", "id,score"), "*[count(/response/result/doc)=2]",
             "*[count(/response/lst[@name='expanded']/result)=2]",
             "*[count(/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc)=0]",
-            "*[count(/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc)=0]",
+            "*[count(/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc)=0]",
             "*[count(/response/lst[@name='expanded']/result[@maxScore])=0]", //maxScore should not be available
             "/response/result/doc[1]/str[@name='id'][.='1']",
             "/response/result/doc[2]/str[@name='id'][.='5']",
@@ -330,8 +340,8 @@
         "/response/result/doc[2]/str[@name='id'][.='6']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='1']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[2]/str[@name='id'][.='7']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='5']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[2]/str[@name='id'][.='8']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='5']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[2]/str[@name='id'][.='8']",
         "count(//*[@name='score'])=0" // score shouldn't be returned when not requested
     );
 
@@ -342,8 +352,8 @@
         "/response/result/doc[2]/str[@name='id'][.='6']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='1']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[2]/str[@name='id'][.='7']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='5']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[2]/str[@name='id'][.='8']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='5']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[2]/str[@name='id'][.='8']",
         "count(//*[@name='score' and .='NaN'])=0"
     );
 
@@ -356,8 +366,8 @@
         "/response/result/doc[2]/str[@name='id'][.='6']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='1']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[2]/str[@name='id'][.='7']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='5']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[2]/str[@name='id'][.='8']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='5']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[2]/str[@name='id'][.='8']",
         "count(//*[@name='score' and .='NaN'])=0"
     );
 
@@ -370,8 +380,8 @@
         // note that the expanded docs are score descending order (score is 1 test_i)
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='7']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[2]/str[@name='id'][.='1']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[2]/str[@name='id'][.='5']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[2]/str[@name='id'][.='5']",
         "count(//*[@name='score' and .='NaN'])=0",
         "count(/response/lst[@name='expanded']/result/doc[number(*/@name='score')!=number(*/@name='test_i')])=0"
     );
@@ -385,8 +395,8 @@
         // note that the expanded docs are score descending order (score is 1 test_i)
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='7']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[2]/str[@name='id'][.='1']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[2]/str[@name='id'][.='5']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='8']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[2]/str[@name='id'][.='5']",
         "count(//*[@name='score' and .='NaN'])=0",
         "count(/response/lst[@name='expanded']/result/doc[number(*/@name='score')!=number(*/@name='test_i')])=0"
     );
@@ -408,7 +418,7 @@
     );
 
     // Support expand enabled without previous collapse
-    assertQ(req("q", "type_s:child", "sort", group+" asc, test_l desc", "defType", "edismax",
+    assertQ(req("q", "type_s:child", "sort", group+" desc, test_l desc", "defType", "edismax",
         "expand", "true", "expand.q", "type_s:parent", "expand.field", group),
         "*[count(/response/result/doc)=4]",
         "*[count(/response/lst[@name='expanded']/result)=2]",
@@ -417,7 +427,7 @@
         "/response/result/doc[3]/str[@name='id'][.='8']",
         "/response/result/doc[4]/str[@name='id'][.='6']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='1']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='5']"
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='5']"
     );
 
     // With multiple collapse
@@ -456,8 +466,8 @@
         "*[count(/response/result/doc)=2]",
         "/response/result/doc[1]/str[@name='id'][.='8']",
         "/response/result/doc[2]/str[@name='id'][.='7']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[1]/str[@name='id'][.='5']",
-        "/response/lst[@name='expanded']/result[@name='2"+floatAppend+"']/doc[2]/str[@name='id'][.='6']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[1]/str[@name='id'][.='5']",
+        "/response/lst[@name='expanded']/result[@name='0"+floatAppend+"']/doc[2]/str[@name='id'][.='6']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[1]/str[@name='id'][.='1']",
         "/response/lst[@name='expanded']/result[@name='1"+floatAppend+"']/doc[2]/str[@name='id'][.='2']"
     );
diff --git a/solr/core/src/test/org/apache/solr/search/TestBlockCollapse.java b/solr/core/src/test/org/apache/solr/search/TestBlockCollapse.java
index 5eb8f37..a9de149 100644
--- a/solr/core/src/test/org/apache/solr/search/TestBlockCollapse.java
+++ b/solr/core/src/test/org/apache/solr/search/TestBlockCollapse.java
@@ -818,7 +818,6 @@
    * (and the other tests should adequeately prove that the block hueristics for _root_ collapsing work)
    */
   public void testBlockCollapseWithExpandComponent() throws Exception {
-    // NOTE: due to SOLR-15078 we don't bother trying to collapse/expand on int in this test
 
     { // convert our docs + some docs w/o collapse fields, along with some commits, to update commands
       // in a shuffled order and process all of them...
@@ -839,10 +838,10 @@
     // we don't bother testing _root_ field collapsing, since it contains different field values then block_s1
     for (String opt : Arrays.asList(// no block collapse logic used (sanity checks)
                                     "field=block_s1",            
-                                    // "field=block_i", // TODO: SOLR-15078
+                                    "field=block_i",
 
                                     // block collapse used explicitly (int)
-                                    // "field=block_i  hint=block", // TODO: SOLR-15078
+                                    "field=block_i  hint=block",
                                     
                                     // block collapse used explicitly (ord)
                                     "field=block_s1 hint=block"
diff --git a/solr/contrib/scripting/src/test/org/apache/solr/scripting/update/ScriptEngineTest.java b/solr/core/src/test/org/apache/solr/update/processor/ScriptEngineTest.java
similarity index 98%
rename from solr/contrib/scripting/src/test/org/apache/solr/scripting/update/ScriptEngineTest.java
rename to solr/core/src/test/org/apache/solr/update/processor/ScriptEngineTest.java
index e0bd83b..8a913e1 100644
--- a/solr/contrib/scripting/src/test/org/apache/solr/scripting/update/ScriptEngineTest.java
+++ b/solr/core/src/test/org/apache/solr/update/processor/ScriptEngineTest.java
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.solr.scripting.update;
+package org.apache.solr.update.processor;
 
 import org.apache.lucene.util.Constants;
 
diff --git a/solr/contrib/scripting/src/test/org/apache/solr/scripting/update/ScriptUpdateProcessorFactoryTest.java b/solr/core/src/test/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactoryTest.java
similarity index 87%
rename from solr/contrib/scripting/src/test/org/apache/solr/scripting/update/ScriptUpdateProcessorFactoryTest.java
rename to solr/core/src/test/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactoryTest.java
index 0a53db0..09dd783 100644
--- a/solr/contrib/scripting/src/test/org/apache/solr/scripting/update/ScriptUpdateProcessorFactoryTest.java
+++ b/solr/core/src/test/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactoryTest.java
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.solr.scripting.update;
+package org.apache.solr.update.processor;
 
 import javax.script.ScriptEngine;
 import javax.script.ScriptEngineManager;
@@ -25,26 +25,24 @@
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.core.SolrCore;
-import org.apache.solr.update.processor.UpdateProcessorTestBase;
-import org.apache.solr.update.processor.UpdateRequestProcessorChain;
 import org.junit.Assume;
 import org.junit.BeforeClass;
 
 /**
- * Tests {@link ScriptUpdateProcessorFactory}.
+ * Tests {@link StatelessScriptUpdateProcessorFactory}.
  *
- * TODO: This test, to run from an IDE, requires a working directory of &lt;path-to&gt;/solr/contrib/scripting/src/test-files.  Fix!
+ * TODO: This test, to run from an IDE, requires a working directory of &lt;path-to&gt;/solr/core/src/test-files.  Fix!
  */
-public class ScriptUpdateProcessorFactoryTest extends UpdateProcessorTestBase {
+public class StatelessScriptUpdateProcessorFactoryTest extends UpdateProcessorTestBase {
 
   @BeforeClass
   public static void beforeClass() throws Exception {
     Assume.assumeNotNull((new ScriptEngineManager()).getEngineByExtension("js"));
-    initCore("solrconfig-script-updateprocessor.xml", "schema.xml");
+    initCore("solrconfig-script-updateprocessor.xml", "schema12.xml");
   }
 
   /**
-   * simple test of a basic script processor chain using the full
+   * simple test of a basic script processor chain using the full 
    * RequestHandler + UpdateProcessorChain flow
    */
   public void testFullRequestHandlerFlow() throws Exception {
@@ -64,13 +62,13 @@
     // clean up
     processDeleteById("run-no-scripts","4055");
     processCommit("run-no-scripts");
-
+    
   }
 
   public void testSingleScript() throws Exception {
     SolrCore core = h.getCore();
     UpdateRequestProcessorChain chained = core.getUpdateProcessingChain("single-script");
-    final ScriptUpdateProcessorFactory factory = ((ScriptUpdateProcessorFactory) chained.getProcessors().get(0));
+    final StatelessScriptUpdateProcessorFactory factory = ((StatelessScriptUpdateProcessorFactory) chained.getProcessors().get(0));
     final List<String> functionMessages = new ArrayList<>();
     factory.setScriptEngineCustomizer(new ScriptEngineCustomizer() {
       @Override
@@ -93,7 +91,7 @@
 
     processDeleteById("single-script","1");
     processCommit("single-script");
-
+    
     assertQ("found deleted doc",
             req("q","id:1")
             , "//result[@numFound=0]");
@@ -110,12 +108,12 @@
   public void testMultipleScripts() throws Exception {
     SolrCore core = h.getCore();
 
-    for (final String chain : new String[] {"dual-scripts-arr",
+    for (final String chain : new String[] {"dual-scripts-arr", 
                                             "dual-scripts-strs"}) {
-
+    
       UpdateRequestProcessorChain chained = core.getUpdateProcessingChain(chain);
-      final ScriptUpdateProcessorFactory factory =
-        ((ScriptUpdateProcessorFactory) chained.getProcessors().get(0));
+      final StatelessScriptUpdateProcessorFactory factory = 
+        ((StatelessScriptUpdateProcessorFactory) chained.getProcessors().get(0));
       final List<String> functionMessages = new ArrayList<>();
       ScriptEngineCustomizer customizer = new ScriptEngineCustomizer() {
           @Override
@@ -130,12 +128,12 @@
                                        doc(f("id", "2"),
                                            f("name", " foo "),
                                            f("subject", "bar")));
-
-      assertEquals(chain + " didn't add Double field",
+      
+      assertEquals(chain + " didn't add Double field", 
                    42.3d, d.getFieldValue("script_added_d"));
       assertEquals(chain + " didn't add integer field",
           42, d.getFieldValue("script_added_i"));
-
+      
       processCommit("run-no-scripts");
 
       assertQ(chain + ": couldn't find doc by id",
@@ -144,72 +142,72 @@
 
       processDeleteById(chain, "2");
       processCommit(chain);
-
+      
       assertEquals(chain, 6, functionMessages.size());
       assertTrue(chain, functionMessages.contains("processAdd0"));
       assertTrue(chain, functionMessages.contains("processAdd1"));
       assertTrue(chain + ": script order doesn't match conf order",
-                 functionMessages.indexOf("processAdd0")
+                 functionMessages.indexOf("processAdd0") 
                  < functionMessages.indexOf("processAdd1"));
 
       assertTrue(chain, functionMessages.contains("processDelete0"));
       assertTrue(chain, functionMessages.contains("processDelete1"));
       assertTrue(chain + ": script order doesn't match conf order",
-                 functionMessages.indexOf("processDelete0")
+                 functionMessages.indexOf("processDelete0") 
                  < functionMessages.indexOf("processDelete1"));
 
       assertTrue(chain, functionMessages.contains("processCommit0"));
       assertTrue(chain, functionMessages.contains("processCommit1"));
       assertTrue(chain + ": script order doesn't match conf order",
-                 functionMessages.indexOf("processCommit0")
+                 functionMessages.indexOf("processCommit0") 
                  < functionMessages.indexOf("processCommit1"));
 
       finish(chain);
-
+    
       assertEquals(chain, 8, functionMessages.size());
 
       assertTrue(chain, functionMessages.contains("finish0"));
       assertTrue(chain, functionMessages.contains("finish1"));
       assertTrue(chain + ": script order doesn't match conf order",
-                 functionMessages.indexOf("finish0")
+                 functionMessages.indexOf("finish0") 
                  < functionMessages.indexOf("finish1"));
 
       assertQ(chain + ": found deleted doc",
               req("q","id:2")
               , "//result[@numFound=0]");
-
+      
     }
   }
 
 
   public void testConditionalExecution() throws Exception {
-    for (String chain : new String[] {"conditional-script",
+    for (String chain : new String[] {"conditional-script", 
                                       "conditional-scripts"}) {
 
       ModifiableSolrParams reqParams = new ModifiableSolrParams();
-
+      
       SolrInputDocument d = processAdd(chain,
                                        reqParams,
                                        doc(f("id", "3"),
                                            f("name", " foo "),
                                            f("subject", "bar")));
-
-      assertFalse(chain + " added String field despite condition",
+      
+      assertFalse(chain + " added String field despite condition", 
                   d.containsKey("script_added_s"));
-      assertFalse(chain + " added Double field despite condition",
+      assertFalse(chain + " added Double field despite condition", 
                   d.containsKey("script_added_d"));
-
+      
       reqParams.add("go-for-it", "true");
-
+      
       d = processAdd(chain,
                      reqParams,
                      doc(f("id", "4"),
                          f("name", " foo "),
                          f("subject", "bar")));
-
-      assertEquals(chain + " didn't add String field",
+      
+      assertEquals(chain + " didn't add String field", 
                    "i went for it", d.getFieldValue("script_added_s"));
-      assertEquals(chain +" didn't add Double field",
+      assertEquals(chain +" didn't add Double field", 
                    42.3d, d.getFieldValue("script_added_d"));
       assertEquals(chain + " didn't add integer field",
           42, d.getFieldValue("script_added_i"));
@@ -224,8 +222,8 @@
                                      doc(f("id", "5"),
                                          f("name", " foo "),
                                          f("subject", "bar")));
-
-    assertEquals(chain +" didn't add Double field",
+      
+    assertEquals(chain +" didn't add Double field", 
                  42.3d, d.getFieldValue("script_added_d"));
     assertEquals(chain + " didn't add integer field",
         42, d.getFieldValue("script_added_i"));
diff --git a/solr/docker/Dockerfile b/solr/docker/Dockerfile
index 9246521..d1a0db1 100644
--- a/solr/docker/Dockerfile
+++ b/solr/docker/Dockerfile
@@ -1,9 +1,21 @@
-ARG SOLR_PACKAGE_IMAGE
 ARG BASE_IMAGE=openjdk:11-jre-slim
 
-FROM $SOLR_PACKAGE_IMAGE as solr_package
+FROM $BASE_IMAGE as input
+ARG SOLR_VERSION
 
-FROM $BASE_IMAGE as runtime
+# ADD extracts tgz !
+ADD /releases/solr-$SOLR_VERSION.tgz /opt/
+COPY /scripts /scripts
+
+# remove what we don't want; ensure permissions are right
+#  TODO; arguably these permissions should have been set correctly previously in the TAR
+RUN set -ex; \
+  rm -Rf /opt/solr-$SOLR_VERSION/docs /opt/solr-$SOLR_VERSION/dist/{solr-solrj-$SOLR_VERSION.jar,solrj-lib,solr-test-framework-$SOLR_VERSION.jar,test-framework}; \
+  find "/opt/solr-$SOLR_VERSION" -type d -print0 | xargs -0 chmod 0755; \
+  find "/opt/solr-$SOLR_VERSION" -type f -print0 | xargs -0 chmod 0644; \
+  chmod -R 0755 /scripts "/opt/solr-$SOLR_VERSION/bin" "/opt/solr-$SOLR_VERSION/contrib/prometheus-exporter/bin/solr-exporter" "/opt/solr-$SOLR_VERSION/server/scripts/cloud-scripts"
+
+FROM $BASE_IMAGE
 
 LABEL maintainer="The Apache Lucene/Solr Project"
 LABEL repository="https://github.com/apache/lucene-solr"
@@ -13,7 +25,7 @@
 
 RUN set -ex; \
     apt-get update; \
-    apt-get -y install acl dirmngr gpg lsof procps wget netcat gosu tini; \
+    apt-get -y install acl dirmngr lsof procps wget netcat gosu tini; \
     rm -rf /var/lib/apt/lists/*; \
     cd /usr/local/bin; wget -nv https://${GITHUB_URL}/apangin/jattach/releases/download/v1.5/jattach; chmod 755 jattach; \
     echo >jattach.sha512 "d8eedbb3e192a8596c08efedff99b9acf1075331e1747107c07cdb1718db2abe259ef168109e46bd4cf80d47d43028ff469f95e6ddcbdda4d7ffa73a20e852f9  jattach"; \
@@ -35,34 +47,25 @@
   groupadd -r --gid "$SOLR_GID" "$SOLR_GROUP"; \
   useradd -r --uid "$SOLR_UID" --gid "$SOLR_GID" "$SOLR_USER"
 
-COPY --chown=0:0 scripts /opt/docker-solr/scripts
+COPY --from=input scripts /opt/docker-solr/scripts
 
 ARG SOLR_VERSION
 
 # Used by solr-fg
 ENV SOLR_VERSION $SOLR_VERSION
 
-COPY --from=solr_package "/opt/solr-$SOLR_VERSION.tgz" "/opt/solr-$SOLR_VERSION.tgz"
+COPY --from=input /opt/solr-$SOLR_VERSION /opt/solr-$SOLR_VERSION
 
 RUN set -ex; \
-  tar -C /opt --extract --file "/opt/solr-$SOLR_VERSION.tgz" && \
-  rm "/opt/solr-$SOLR_VERSION.tgz"; \
   (cd /opt; ln -s "solr-$SOLR_VERSION" solr); \
-  rm -Rf /opt/solr/docs/ /opt/solr/dist/{solr-solrj-$SOLR_VERSION.jar,solrj-lib,solr-test-framework-$SOLR_VERSION.jar,test-framework}; \
-  mkdir -p /opt/solr/server/solr/lib /docker-entrypoint-initdb.d /opt/docker-solr; \
-  chown -R 0:0 "/opt/solr-$SOLR_VERSION"; \
-  find "/opt/solr-$SOLR_VERSION" -type d -print0 | xargs -0 chmod 0755; \
-  find "/opt/solr-$SOLR_VERSION" -type f -print0 | xargs -0 chmod 0644; \
-  chmod -R 0755 "/opt/solr-$SOLR_VERSION/bin" "/opt/solr-$SOLR_VERSION/contrib/prometheus-exporter/bin/solr-exporter" /opt/solr-$SOLR_VERSION/server/scripts/cloud-scripts; \
+  mkdir -p /opt/solr/server/solr/lib /docker-entrypoint-initdb.d; \
   cp /opt/solr/bin/solr.in.sh /etc/default/solr.in.sh; \
   mv /opt/solr/bin/solr.in.sh /opt/solr/bin/solr.in.sh.orig; \
   mv /opt/solr/bin/solr.in.cmd /opt/solr/bin/solr.in.cmd.orig; \
-  chown root:0 /etc/default/solr.in.sh; \
   chmod 0664 /etc/default/solr.in.sh; \
   mkdir -p -m0770 /var/solr; \
   sed -i -e "s/\"\$(whoami)\" == \"root\"/\$(id -u) == 0/" /opt/solr/bin/solr; \
   sed -i -e 's/lsof -PniTCP:/lsof -t -PniTCP:/' /opt/solr/bin/solr; \
-  chown -R "0:0" /opt/solr-$SOLR_VERSION /docker-entrypoint-initdb.d /opt/docker-solr; \
   chown -R "$SOLR_USER:0" /var/solr;
 
 VOLUME /var/solr
diff --git a/solr/docker/README.md b/solr/docker/README.md
index 583e8b0..d6d865d 100644
--- a/solr/docker/README.md
+++ b/solr/docker/README.md
@@ -76,7 +76,7 @@
 Solr expects some files and directories in `/var/solr`; if you use your own directory or volume you can either pre-populate them, or let Solr docker copy them for you. See [init-var-solr](scripts/init-var-solr).
 If you want to use custom configuration, mount it in the appropriate place. See below for examples.
 
-The Solr docker distribution adds [scripts](include/scripts) in `/opt/docker-solr/scripts` to make it easier to use under Docker, for example to create cores on container startup.
+The Solr docker distribution adds [scripts](scripts) in `/opt/docker-solr/scripts` to make it easier to use under Docker, for example to create cores on container startup.
 
 ## Creating cores
 
diff --git a/solr/docker/build.gradle b/solr/docker/build.gradle
index d6e80ac..6b7e214 100644
--- a/solr/docker/build.gradle
+++ b/solr/docker/build.gradle
@@ -18,106 +18,186 @@
 import com.google.common.base.Preconditions
 import com.google.common.base.Strings
 
-apply plugin: 'base'
-apply plugin: 'com.palantir.docker'
-
-subprojects {
-  apply plugin: 'base'
-  apply plugin: 'com.palantir.docker'
-}
-
 description = 'Solr Docker image'
 
-def dockerPackage = project(':solr:docker:package')
+apply plugin: 'base'
 
-dependencies {
-  docker dockerPackage
-}
-
+// Solr Docker inputs
 def dockerImageRepo = propertyOrEnvOrDefault("solr.docker.imageRepo", "SOLR_DOCKER_IMAGE_REPO", "apache/solr")
 def dockerImageTag = propertyOrEnvOrDefault("solr.docker.imageTag", "SOLR_DOCKER_IMAGE_TAG", "${version}")
 def dockerImageName = propertyOrEnvOrDefault("solr.docker.imageName", "SOLR_DOCKER_IMAGE_NAME", "${dockerImageRepo}:${dockerImageTag}")
 def baseDockerImage = propertyOrEnvOrDefault("solr.docker.baseImage", "SOLR_DOCKER_BASE_IMAGE", 'openjdk:11-jre-slim')
 def githubUrlOrMirror = propertyOrEnvOrDefault("solr.docker.githubUrl", "SOLR_DOCKER_GITHUB_URL", 'github.com')
 
-docker {
-  name = dockerImageName
-  files file('include')
-  buildArgs(['BASE_IMAGE' : baseDockerImage, 'SOLR_PACKAGE_IMAGE' : 'apache/solr-build:local-package', 'SOLR_VERSION': "${version}", 'GITHUB_URL': githubUrlOrMirror])
+// Build directory locations
+def dockerBuildDistribution = "$buildDir/distributions"
+def imageIdFile = "$buildDir/image-id"
+
+configurations {
+  packaging {
+    canBeResolved = true
+  }
+  dockerImage {
+    canBeResolved = true
+  }
 }
 
-tasks.docker {
-  // In order to create the solr docker image, the solr package image must be created first.
-  dependsOn(dockerPackage.tasks.docker)
+dependencies {
+  packaging project(path: ":solr:packaging", configuration: 'archives')
+
+  dockerImage files(imageIdFile) {
+    builtBy 'dockerBuild'
+  }
+}
+
+task dockerTar(type: Tar) {
+  group = 'Docker'
+  description = 'Package docker context to prepare for docker build'
+
+  dependsOn configurations.packaging
+  into('scripts') {
+    from file('scripts')
+    fileMode 755
+  }
+  into('releases') {
+    from configurations.packaging
+    include '*.tgz'
+  }
+  from file('Dockerfile')
+  destinationDirectory = file(dockerBuildDistribution)
+  extension 'tgz'
+  compression = Compression.GZIP
+}
+
+task dockerBuild(dependsOn: tasks.dockerTar) {
+  group = 'Docker'
+  description = 'Build Solr docker image'
+
+  // Ensure that the docker image is rebuilt on build-arg changes or changes in the docker context
+  inputs.properties([
+          baseDockerImage: baseDockerImage,
+          githubUrlOrMirror: githubUrlOrMirror,
+          version: version
+  ])
+  inputs.dir(dockerBuildDistribution)
+
+  doLast {
+    exec {
+      standardInput = tasks.dockerTar.outputs.files.singleFile.newDataInputStream()
+      commandLine "docker", "build",
+              "--iidfile", imageIdFile,
+              "--build-arg", "BASE_IMAGE=${inputs.properties.baseDockerImage}",
+              "--build-arg", "SOLR_VERSION=${version}",
+              "--build-arg", "GITHUB_URL=${inputs.properties.githubUrlOrMirror}",
+              "-"
+    }
+  }
 
   // Print information on the image after it has been created
   doLast {
+    def dockerImageId = file(imageIdFile).text
     project.logger.lifecycle("Solr Docker Image Created")
-    project.logger.lifecycle("\tName: $dockerImageName")
-    project.logger.lifecycle("\tBase Image: $baseDockerImage")
+    project.logger.lifecycle("\tID: \t$dockerImageId")
+    project.logger.lifecycle("\tBase Image: \t$baseDockerImage")
+    project.logger.lifecycle("\tSolr Version: \t$version")
+  }
+
+  outputs.files(imageIdFile)
+}
+
+task dockerTag(dependsOn: tasks.dockerBuild) {
+  group = 'Docker'
+  description = 'Tag Solr docker image'
+
+  def dockerImageIdFile = file(imageIdFile)
+  // Ensure that the docker image is re-tagged if the image ID or desired tag changes
+  inputs.properties([
+          dockerImageName: dockerImageName,
+  ])
+  inputs.file(dockerImageIdFile)
+
+  doLast {
+    def dockerImageId = dockerImageIdFile.text
+
+    exec {
+      commandLine "docker", "tag", dockerImageId, inputs.properties.dockerImageName
+    }
+
+    // Print information on the image after it has been created
+    project.logger.lifecycle("Solr Docker Image Tagged")
+    project.logger.lifecycle("\tID: \t$dockerImageId")
+    project.logger.lifecycle("\tTag: \t$dockerImageName")
   }
 }
 
-abstract class DockerTestSuite extends DefaultTask {
-  private String solrImageName = null;
-  private List<String> tests = new ArrayList<>();
-  private List<String> ignore = new ArrayList<>();
+task testDocker(dependsOn: tasks.dockerBuild) {
+  group = 'Docker'
+  description = 'Test Solr docker image'
 
-  @OutputDirectory
-  abstract DirectoryProperty getOutputDir()
+  def inputDir = "tests/cases"
+  def outputDir = "$buildDir/tmp/tests"
 
-  public void setSolrImageName(String solrImageName) {
-    this.solrImageName = solrImageName
-  }
+  // Ensure that the docker image is re-tested if the image ID changes or the test files change
+  inputs.properties([
+          includeTests: new HashSet(Arrays.asList(propertyOrEnvOrDefault("solr.docker.tests.include", "SOLR_DOCKER_TESTS_INCLUDE", ",").split(","))),
+          excludeTests: new HashSet(Arrays.asList(propertyOrEnvOrDefault("solr.docker.tests.exclude", "SOLR_DOCKER_TESTS_EXCLUDE", ",").split(",")))
+  ])
+  inputs.file(imageIdFile)
+  inputs.dir(inputDir)
 
-  public String getSolrImageName() {
-    Preconditions.checkArgument(!Strings.isNullOrEmpty(solrImageName), "solrImageName is a required dockerTests configuration item.")
-    return solrImageName
-  }
+  doLast {
+    def solrImageId = tasks.dockerBuild.outputs.files.singleFile.text
+    def solrImageName = solrImageId.substring(7, 14)
 
-  @Option(option = "tests", description = "Only run these specified tests, comma separated.")
-  public void setTests(List<String> tests) {
-    this.tests = tests;
-  }
-
-  @Input
-  public List<String> getTests() {
-    return tests;
-  }
-
-  @Option(option = "ignore", description = "Ignore these tests, comma separated.")
-  public void setIgnore(List<String> ignore) {
-    this.ignore = ignore;
-  }
-
-  @Input
-  public List<String> getIgnore() {
-    return ignore;
-  }
-
-  @TaskAction
-  void execute() {
     // Print information on the image before it is tested
-    project.logger.lifecycle("Testing Solr Image: $solrImageName\n")
-    def sourceDir = project.file("tests/cases")
+    logger.lifecycle("Testing Solr Image:")
+    logger.lifecycle("\tID: $solrImageId\n")
+
+    // Run the tests
+    def sourceDir = file(inputDir)
     sourceDir.eachFile  { file ->
       def testName = file.getName()
-      def testCaseBuildDir = outputDir.dir(testName).get().toString()
+      def testCaseBuildDir = "${outputDir}/${testName}"
 
       // If specific tests are specified, only run those. Otherwise run all that are not ignored.
-      def runTest = !this.tests.isEmpty() ? tests.contains(testName) : !ignore.contains(testName)
+      def runTest = !inputs.properties.includeTests.isEmpty() ? inputs.properties.includeTests.contains(testName) : !inputs.properties.excludeTests.contains(testName)
       if (runTest) {
-        project.exec {
-          environment "TEST_DIR", "$file"
-          environment "BUILD_DIR", "$testCaseBuildDir"
+        exec {
+          environment "TEST_DIR", file
+          environment "BUILD_DIR", testCaseBuildDir
           commandLine "bash", "$file/test.sh", solrImageName
         }
       }
     }
   }
+
+  outputs.dir(outputDir)
 }
 
-task testDocker(type: DockerTestSuite) {
-  outputDir = project.file("$buildDir/tmp/tests")
-  solrImageName = dockerImageName
+task dockerPush(dependsOn: tasks.dockerTag) {
+  group = 'Docker'
+  description = 'Push Solr docker image'
+
+  // Ensure that the docker image is re-pushed if the image ID or tag changes
+  inputs.properties([
+          dockerImageName: dockerImageName,
+  ])
+  inputs.file(imageIdFile)
+
+  // We don't want to push a docker image unless the tests have passed
+  mustRunAfter tasks.testDocker
+
+  doLast {
+    exec {
+      commandLine "docker", "push", dockerImageName
+    }
+
+    // Print information on the image after it has been created
+    project.logger.lifecycle("Solr Docker Image Pushed: \t$dockerImageName")
+  }
+}
+
+// One task to build and tag a Solr docker image
+task docker {
+  dependsOn tasks.dockerBuild, tasks.dockerTag
 }
\ No newline at end of file
diff --git a/solr/docker/gradle-help.txt b/solr/docker/gradle-help.txt
new file mode 100644
index 0000000..63ad735
--- /dev/null
+++ b/solr/docker/gradle-help.txt
@@ -0,0 +1,80 @@
+Docker Images for Solr
+======================
+
+In order to build and tag a Solr docker image, merely run the following command:
+
+gradlew docker
+
+This calls the dockerBuild and dockerTag tasks, which have inputs that are described below.
+
+Building
+--------
+
+In order to build the Solr Docker image, run:
+
+gradlew dockerBuild
+
+The docker build task accepts the following inputs, all accepted via both Environment Variables and Gradle Properties.
+
+Base Docker Image: (The docker image used for the "FROM" in the Solr Dockerfile)
+   Default: "openjdk:11-jre-slim"
+   EnvVar: SOLR_DOCKER_BASE_IMAGE
+   Gradle Property: -Psolr.docker.baseImage
+
+Github URL or Mirror: (The URL of github or a mirror of github releases. This is of use when building the docker image behind a firewall that does not have access to external Github.)
+   Default: "github.com"
+   EnvVar: SOLR_DOCKER_GITHUB_URL
+   Gradle Property: -Psolr.docker.githubUrl
+
+Tagging and Pushing
+-------
+
+To tag the docker image, run the following command.
+This will also ensure that the docker image has been built as per the inputs detailed above.
+
+gradlew dockerTag
+
+And to push the image with the given tag, run the following command.
+Gradle will ensure that the docker image is built and tagged as the inputs describe before being pushed.
+
+gradlew dockerPush
+
+The docker image tag can be customized via the following options, all accepted via both Environment Variables and Gradle Properties.
+
+Docker Image Repository:
+   Default: "apache/solr"
+   EnvVar: SOLR_DOCKER_IMAGE_REPO
+   Gradle Property: -Psolr.docker.imageRepo
+
+Docker Image Tag:
+   Default: the Solr version, e.g. "9.0.0-SNAPSHOT"
+   EnvVar: SOLR_DOCKER_IMAGE_TAG
+   Gradle Property: -Psolr.docker.imageTag
+
+Docker Image Name: (Use this to explicitly set a whole image name. If given, the image repo and image version options above are ignored.)
+   Default: {image_repo}/{image_tag} (both options provided above, with defaults)
+   EnvVar: SOLR_DOCKER_IMAGE_NAME
+   Gradle Property: -Psolr.docker.imageName
+
+Testing
+-------
+
+To test the docker image, run the following command.
+This will also ensure that the docker image has been built as per the inputs detailed above in the "Building" section.
+
+gradlew testDocker
+
+If a docker image build parameters were used during building, then the same inputs must be used while testing.
+Otherwise a new docker image will be built for the tests to run with.
+
+You can also specify an explicit list of tests to run, or an explicit list of tests to ignore.
+Both inputs are optional, and by default all tests will be run.
+Each input tasks a comma separated list of test names.
+
+Run specific tests:
+   EnvVar: SOLR_DOCKER_TESTS_INCLUDE
+   Gradle Property: -Psolr.docker.tests.include
+
+Exclude specific tests:
+   EnvVar: SOLR_DOCKER_TESTS_EXCLUDE
+   Gradle Property: -Psolr.docker.tests.exclude
diff --git a/solr/docker/package/Dockerfile.local-package b/solr/docker/package/Dockerfile.local-package
deleted file mode 100644
index e37d67f..0000000
--- a/solr/docker/package/Dockerfile.local-package
+++ /dev/null
@@ -1,3 +0,0 @@
-FROM scratch
-
-COPY releases/ /opt/
\ No newline at end of file
diff --git a/solr/docker/package/Dockerfile.release-package b/solr/docker/package/Dockerfile.release-package
deleted file mode 100644
index 85947c9..0000000
--- a/solr/docker/package/Dockerfile.release-package
+++ /dev/null
@@ -1,74 +0,0 @@
-ARG BASE_IMAGE=openjdk:11-jre
-
-FROM $BASE_IMAGE as downloader
-
-ARG SOLR_VERSION
-ARG SOLR_SHA512
-ARG SOLR_KEYS
-# If specified, this will override SOLR_DOWNLOAD_SERVER and all ASF mirrors. Typically used downstream for custom builds
-ARG SOLR_DOWNLOAD_URL
-
-# Override the solr download location with e.g.:
-#   docker build -t mine --build-arg SOLR_DOWNLOAD_SERVER=http://www-eu.apache.org/dist/lucene/solr .
-ARG SOLR_DOWNLOAD_SERVER
-# This is only applicable when SOLR_DOWNLOAD_URL is not provided. Skips the GPG check for Solr downloads.
-ARG SKIP_GPG_CHECK="true"
-
-ENV SOLR_CLOSER_URL="http://www.apache.org/dyn/closer.lua?filename=lucene/solr/$SOLR_VERSION/solr-$SOLR_VERSION.tgz&action=download" \
-    SOLR_DIST_URL="https://www.apache.org/dist/lucene/solr/$SOLR_VERSION/solr-$SOLR_VERSION.tgz" \
-    SOLR_ARCHIVE_URL="https://archive.apache.org/dist/lucene/solr/$SOLR_VERSION/solr-$SOLR_VERSION.tgz"
-
-RUN set -ex; \
-    apt-get update; \
-    apt-get -y install dirmngr gpg wget; \
-    rm -rf /var/lib/apt/lists/*;
-
-RUN set -ex; \
-  export GNUPGHOME="/tmp/gnupg_home"; \
-  mkdir -p "$GNUPGHOME"; \
-  chmod 700 "$GNUPGHOME"; \
-  echo "disable-ipv6" >> "$GNUPGHOME/dirmngr.conf"; \
-  for key in $SOLR_KEYS; do \
-    found=''; \
-    for server in \
-      ha.pool.sks-keyservers.net \
-      hkp://keyserver.ubuntu.com:80 \
-      hkp://p80.pool.sks-keyservers.net:80 \
-      pgp.mit.edu \
-    ; do \
-      echo "  trying $server for $key"; \
-      gpg --batch --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$key" && found=yes && break; \
-      gpg --batch --keyserver "$server" --keyserver-options timeout=10 --recv-keys "$key" && found=yes && break; \
-    done; \
-    test -z "$found" && echo >&2 "error: failed to fetch $key from several disparate servers -- network issues?" && exit 1; \
-  done; \
-  exit 0
-
-RUN set -ex; \
-  export GNUPGHOME="/tmp/gnupg_home"; \
-  MAX_REDIRECTS=1; \
-  if [ -n "$SOLR_DOWNLOAD_URL" ]; then \
-    # If a custom URL is defined, we download from non-ASF mirror URL and allow more redirects and skip GPG step
-    # This takes effect only if the SOLR_DOWNLOAD_URL build-arg is specified, typically in downstream Dockerfiles
-    MAX_REDIRECTS=4; \
-    SKIP_GPG_CHECK="true"; \
-  elif [ -n "$SOLR_DOWNLOAD_SERVER" ]; then \
-    SOLR_DOWNLOAD_URL="$SOLR_DOWNLOAD_SERVER/$SOLR_VERSION/solr-$SOLR_VERSION.tgz"; \
-  fi; \
-  for url in $SOLR_DOWNLOAD_URL $SOLR_CLOSER_URL $SOLR_DIST_URL $SOLR_ARCHIVE_URL; do \
-    if [ -f "/opt/solr-$SOLR_VERSION.tgz" ]; then break; fi; \
-    echo "downloading $url"; \
-    if wget -t 10 --max-redirect $MAX_REDIRECTS --retry-connrefused -nv "$url" -O "/opt/solr-$SOLR_VERSION.tgz"; then break; else rm -f "/opt/solr-$SOLR_VERSION.tgz"; fi; \
-  done; \
-  if [ ! -f "/opt/solr-$SOLR_VERSION.tgz" ]; then echo "failed all download attempts for solr-$SOLR_VERSION.tgz"; exit 1; fi; \
-  if [ "$SKIP_GPG_CHECK" != "true" ]; then \
-    echo "downloading $SOLR_ARCHIVE_URL.asc"; \
-    wget -nv "$SOLR_ARCHIVE_URL.asc" -O "/opt/solr-$SOLR_VERSION.tgz.asc"; \
-    echo "$SOLR_SHA512 */opt/solr-$SOLR_VERSION.tgz" | sha512sum -c -; \
-    (>&2 ls -l "/opt/solr-$SOLR_VERSION.tgz" "/opt/solr-$SOLR_VERSION.tgz.asc"); \
-    gpg --batch --verify "/opt/solr-$SOLR_VERSION.tgz.asc" "/opt/solr-$SOLR_VERSION.tgz"; \
-  else \
-    echo "Skipping GPG validation due to non-Apache build"; \
-  fi; \
-  { command -v gpgconf; gpgconf --kill all || :; }; \
-  rm -r "$GNUPGHOME";
diff --git a/solr/docker/package/build.gradle b/solr/docker/package/build.gradle
deleted file mode 100644
index 94cbfdb..0000000
--- a/solr/docker/package/build.gradle
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-description = 'Solr Docker Package image'
-
-// The solr package docker image relies on the output of the solr:packaging project.
-Project solrPackaging = project(':solr:packaging')
-
-dependencies {
-  docker solrPackaging
-}
-
-docker {
-  name = 'apache/solr-build:local-package'
-  dockerfile file('Dockerfile.local-package')
-  files(solrPackaging.tasks.distTar.outputs)
-  getCopySpec().into('releases')
-}
-
-// Only allow the following docker tasks
-def availableDockerTasks = ["docker", "dockerClean", "dockerPrepare", "dockerfileZip"]
-project.tasks.configureEach { t -> t.enabled = t.getGroup() != "Docker" || availableDockerTasks.contains(t.getName()) }
diff --git a/solr/docker/include/scripts/docker-entrypoint.sh b/solr/docker/scripts/docker-entrypoint.sh
similarity index 100%
rename from solr/docker/include/scripts/docker-entrypoint.sh
rename to solr/docker/scripts/docker-entrypoint.sh
diff --git a/solr/docker/include/scripts/init-var-solr b/solr/docker/scripts/init-var-solr
similarity index 100%
rename from solr/docker/include/scripts/init-var-solr
rename to solr/docker/scripts/init-var-solr
diff --git a/solr/docker/include/scripts/precreate-core b/solr/docker/scripts/precreate-core
similarity index 100%
rename from solr/docker/include/scripts/precreate-core
rename to solr/docker/scripts/precreate-core
diff --git a/solr/docker/include/scripts/run-initdb b/solr/docker/scripts/run-initdb
similarity index 100%
rename from solr/docker/include/scripts/run-initdb
rename to solr/docker/scripts/run-initdb
diff --git a/solr/docker/include/scripts/solr-create b/solr/docker/scripts/solr-create
similarity index 100%
rename from solr/docker/include/scripts/solr-create
rename to solr/docker/scripts/solr-create
diff --git a/solr/docker/include/scripts/solr-demo b/solr/docker/scripts/solr-demo
similarity index 100%
rename from solr/docker/include/scripts/solr-demo
rename to solr/docker/scripts/solr-demo
diff --git a/solr/docker/include/scripts/solr-fg b/solr/docker/scripts/solr-fg
similarity index 100%
rename from solr/docker/include/scripts/solr-fg
rename to solr/docker/scripts/solr-fg
diff --git a/solr/docker/include/scripts/solr-foreground b/solr/docker/scripts/solr-foreground
similarity index 100%
rename from solr/docker/include/scripts/solr-foreground
rename to solr/docker/scripts/solr-foreground
diff --git a/solr/docker/include/scripts/solr-precreate b/solr/docker/scripts/solr-precreate
similarity index 100%
rename from solr/docker/include/scripts/solr-precreate
rename to solr/docker/scripts/solr-precreate
diff --git a/solr/docker/include/scripts/start-local-solr b/solr/docker/scripts/start-local-solr
similarity index 100%
rename from solr/docker/include/scripts/start-local-solr
rename to solr/docker/scripts/start-local-solr
diff --git a/solr/docker/include/scripts/stop-local-solr b/solr/docker/scripts/stop-local-solr
similarity index 100%
rename from solr/docker/include/scripts/stop-local-solr
rename to solr/docker/scripts/stop-local-solr
diff --git a/solr/docker/include/scripts/wait-for-solr.sh b/solr/docker/scripts/wait-for-solr.sh
similarity index 100%
rename from solr/docker/include/scripts/wait-for-solr.sh
rename to solr/docker/scripts/wait-for-solr.sh
diff --git a/solr/docker/include/scripts/wait-for-zookeeper.sh b/solr/docker/scripts/wait-for-zookeeper.sh
similarity index 100%
rename from solr/docker/include/scripts/wait-for-zookeeper.sh
rename to solr/docker/scripts/wait-for-zookeeper.sh
diff --git a/solr/docker/tests/cases/gosu/test.sh b/solr/docker/tests/cases/gosu/test.sh
index ad9444e..29e411f 100755
--- a/solr/docker/tests/cases/gosu/test.sh
+++ b/solr/docker/tests/cases/gosu/test.sh
@@ -59,7 +59,7 @@
 container_cleanup "$container_name"
 
 # chown it back
-docker run --rm --user 0:0 -d -e VERBOSE=yes \
+docker run --rm --user 0:0 -e VERBOSE=yes \
   -v "$myvarsolr:/myvarsolr" "$tag" \
   bash -c "chown -R $(id -u):$(id -g) /myvarsolr; ls -ld /myvarsolr"
 
diff --git a/solr/packaging/build.gradle b/solr/packaging/build.gradle
index 3b242ad..27e8e3a 100644
--- a/solr/packaging/build.gradle
+++ b/solr/packaging/build.gradle
@@ -52,7 +52,6 @@
    ":solr:contrib:langid",
    ":solr:contrib:ltr",
    ":solr:contrib:prometheus-exporter",
-   ":solr:contrib:scripting"
   ].each { contribName ->
     distSolr project(contribName)
     contrib  project(path: contribName, configuration: "packaging")
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml b/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
index 14f094d..8fea9ca 100644
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
+++ b/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
@@ -83,8 +83,6 @@
 
   <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-ltr-\d.*\.jar" />
 
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-scripting-\d.*\.jar" />
-
   <!-- an exact 'path' can be used instead of a 'dir' to specify a
        specific jar file.  This will cause a serious error to be logged
        if it can't be loaded.
@@ -381,11 +379,11 @@
   <query>
 
     <!-- Maximum number of clauses allowed when parsing a boolean query string.
-
+         
          This limit only impacts boolean queries specified by a user as part of a query string,
          and provides per-collection controls on how complex user specified boolean queries can
          be.  Query strings that specify more clauses then this will result in an error.
-
+         
          If this per-collection limit is greater then the global `maxBooleanClauses` limit
          specified in `solr.xml`, it will have no effect, as that setting also limits the size
          of user specified boolean queries.
@@ -658,9 +656,6 @@
          enableRemoteStreaming - enables use of the stream.file
          and stream.url parameters for specifying remote streams.
 
-         enableStreamBody - This attribute controls whether streaming
-         content from the HTTP parameter stream.body is allowed.
-
          multipartUploadLimitInKB - specifies the max size (in KiB) of
          Multipart File Uploads that Solr will allow in a Request.
 
@@ -679,12 +674,12 @@
          *** WARNING ***
          Before enabling remote streaming, you should make sure your
          system has authentication enabled.
-       -->
-    <requestParsers enableRemoteStreaming="true"
-                    enableStreamBody="true"
+
+    <requestParsers enableRemoteStreaming="false"
                     multipartUploadLimitInKB="-1"
                     formdataUploadLimitInKB="-1"
                     addHttpRequestToContext="false"/>
+      -->
 
     <!-- HTTP Caching
 
@@ -1278,17 +1273,19 @@
 
     This example hooks in an update processor implemented using JavaScript.
 
-    See more about script update processor at https://lucene.apache.org/solr/guide/script-update-processor.html
+    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor
   -->
-  <updateRequestProcessorChain name="script">
-    <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
-      <str name="script">update-script.js</str>
-      <lst name="params">
-        <str name="config_param">example config parameter</str>
-      </lst>
-    </processor>
-    <processor class="solr.RunUpdateProcessorFactory" />
-  </updateRequestProcessorChain>
+  <!--
+    <updateRequestProcessorChain name="script">
+      <processor class="solr.StatelessScriptUpdateProcessorFactory">
+        <str name="script">update-script.js</str>
+        <lst name="params">
+          <str name="config_param">example config parameter</str>
+        </lst>
+      </processor>
+      <processor class="solr.RunUpdateProcessorFactory" />
+    </updateRequestProcessorChain>
+  -->
 
   <!-- Response Writers
 
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/update-script.js b/solr/server/solr/configsets/sample_techproducts_configs/conf/update-script.js
index bd36118..49b07f9 100644
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/update-script.js
+++ b/solr/server/solr/configsets/sample_techproducts_configs/conf/update-script.js
@@ -4,14 +4,14 @@
   In order for this to be executed, it must be properly wired into solrconfig.xml; by default it is commented out in
   the example solrconfig.xml and must be uncommented to be enabled.
 
-  See https://lucene.apache.org/solr/guide/script-update-processor.html for more details.
+  See http://wiki.apache.org/solr/ScriptUpdateProcessor for more details.
 */
 
 function processAdd(cmd) {
 
   doc = cmd.solrDoc;  // org.apache.solr.common.SolrInputDocument
   id = doc.getFieldValue("id");
-  logger.warn("update-script#processAdd: id=" + id);  // WARN level messages will show up in Solr Admin Logging UI
+  logger.info("update-script#processAdd: id=" + id);
 
 // Set a field value:
 //  doc.setField("foo_s", "whatever");
diff --git a/solr/solr-ref-guide/src/configsets-api.adoc b/solr/solr-ref-guide/src/configsets-api.adoc
index d5fd3dc..ead84f1 100644
--- a/solr/solr-ref-guide/src/configsets-api.adoc
+++ b/solr/solr-ref-guide/src/configsets-api.adoc
@@ -88,7 +88,7 @@
 A configset is uploaded in a "trusted" mode if authentication is enabled and the upload operation is performed as an authenticated request. Without authentication, a configset is uploaded in an "untrusted" mode. Upon creation of a collection using an "untrusted" configset, the following functionality will not work:
 
 * The XSLT transformer (`tr` parameter) cannot be used at request processing time.
-* If specified in the configset, the ScriptUpdateProcessorFactory will not initialize.
+* If specified in the configset, the StatelessScriptUpdateProcessor will not initialize.
 * Collections won't initialize if <lib> directives are used in the configset. (Note: Libraries added to Solr's classpath don't need the <lib> directive)
 
 If you use any of these parameters or features, you must have enabled security features in your Solr installation and you must upload the configset as an authenticated user.
diff --git a/solr/solr-ref-guide/src/configuring-solrconfig-xml.adoc b/solr/solr-ref-guide/src/configuring-solrconfig-xml.adoc
index 4e83f0e..f1f0fa2 100644
--- a/solr/solr-ref-guide/src/configuring-solrconfig-xml.adoc
+++ b/solr/solr-ref-guide/src/configuring-solrconfig-xml.adoc
@@ -8,7 +8,6 @@
     query-settings-in-solrconfig, \
     requestdispatcher-in-solrconfig, \
     update-request-processors, \
-    script-update-processor, \    
     codec-factory
 
 // Licensed to the Apache Software Foundation (ASF) under one
diff --git a/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc b/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc
index e554c15..60ab785 100644
--- a/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc
+++ b/solr/solr-ref-guide/src/major-changes-in-solr-9.adoc
@@ -144,9 +144,6 @@
 
 * SOLR-14972: The default port of prometheus exporter has changed from 9983 to 8989, so you may need to adjust your configuration after upgrade.
 
-* SOLR-14067: StatelessScriptUpdateProcessorFactory moved to it's own /contrib/scripting/ package instead
- of shipping as part of Solr due to security concerns.  Renamed to ScriptUpdateProcessorFactory for simpler name.
-
 === Upgrade Prerequisites in Solr 9
 
 * Upgrade all collections in stateFormat=1 to stateFormat=2 *before* upgrading to Solr 9, as Solr 9 does not support the
diff --git a/solr/solr-ref-guide/src/script-update-processor.adoc b/solr/solr-ref-guide/src/script-update-processor.adoc
deleted file mode 100644
index 149f201..0000000
--- a/solr/solr-ref-guide/src/script-update-processor.adoc
+++ /dev/null
@@ -1,286 +0,0 @@
-= Script Update Processor
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-The {solr-javadocs}/contrib/scripting/org/apache/solr/scripting/update/ScriptUpdateProcessorFactory.html[ScriptUpdateProcessorFactory] allows Java scripting engines to be used
-during Solr document update processing, allowing dramatic flexibility in
-expressing custom document processing logic before being indexed.  It has hooks to the
-commit, delete, rollback, etc indexing actions, however add is the most common usage.
-It is implemented as an UpdateProcessor to be placed in an UpdateChain.
-
-TIP: This used to be known as the _StatelessScriptingUpdateProcessor_ and was renamed to clarify the key aspect of this update processor is it enables scripting.
-
-The script can be written in any scripting language supported by your JVM (such
-as JavaScript), and executed dynamically so no pre-compilation is necessary.
-
-WARNING: Being able to run a script of your choice as part of the indexing pipeline is a really powerful tool, that I sometimes call the
-_Get out of jail free_ card because you can solve some problems this way that you can't in any other way.  However, you are introducing some
-potential security vulnerabilities.
-
-== Installing the ScriptingUpdateProcessor and Scripting Engines
-
-The scripting update processor lives in the contrib module `/contrib/scripting`, and you need to explicitly add it to your Solr setup.
-
-Java 11 and previous versions come with a JavaScript engine called Nashorn, but Java 12 will require you to add your own JavaScript engine.   Other supported scripting engines like
-JRuby, Jython, Groovy, all require you to add JAR files.
-
-Learn more about adding the `dist/solr-scripting-*.jar` file, and any other needed JAR files (depending on your scripting engine) into Solr's <<libs.adoc#lib-directories,Lib Directories>>.
-
-== Configuration
-
-[source,xml]
-----
-<updateRequestProcessorChain name="script">
-   <processor class="org.apache.solr.scripting.update.ScriptUpdateProcessorFactory">
-     <str name="script">update-script.js</str>
-   </processor>
-   <!--  optional parameters passed to script
-     <lst name="params">
-       <str name="config_param">example config parameter</str>
-     </lst>
-   -->
-   <processor class="solr.LogUpdateProcessorFactory" />
-   <processor class="solr.RunUpdateProcessorFactory" />
- </updateRequestProcessorChain>
-----
-
-NOTE: The processor supports the defaults/appends/invariants concept for its config.
-However, it is also possible to skip this level and configure the parameters directly underneath the `<processor>` tag.
-
-Below follows a list of each configuration parameters and their meaning:
-
-`script`::
-The script file name. The script file must be placed in the `conf/ directory.
-There can be one or more "script" parameters specified; multiple scripts are executed in the order specified.
-
-`engine`::
-Optionally specifies the scripting engine to use. This is only needed if the extension
-of the script file is not a standard mapping to the scripting engine. For example, if your
-script file was coded in JavaScript but the file name was called `update-script.foo`,
-use "javascript" as the engine name.
-
-`params`::
-Optional parameters that are passed into the script execution context. This is
-specified as a named list (`<lst>`) structure with nested typed parameters. If
-specified, the script context will get a "params" object, otherwise there will be no "params" object available.
-
-
-== Script execution context
-
-Every script has some variables provided to it.
-
-`logger`::
-Logger (org.slf4j.Logger) instance. This is useful for logging information from the script.
-
-`req`::
-{solr-javadocs}/core/org/apache/solr/request/SolrQueryRequest.html[SolrQueryRequest] instance.
-
-`rsp`::
-{solr-javadocs}/core/org/apache/solr/response/SolrQueryResponse.html[SolrQueryResponse] instance.
-
-`params`::
-The "params" object, if any specified, from the configuration.
-
-== Examples
-
-The `processAdd()` and the other script methods can return false to skip further
-processing of the document. All methods must be defined, though generally the
-`processAdd()` method is where the action is.
-
-Here's a URL that works with the techproducts example setup demonstrating specifying
-the "script" update chain: `http://localhost:8983/solr/techproducts/update?commit=true&stream.contentType=text/csv&fieldnames=id,description&stream.body=1,foo&update.chain=script`
-which logs the following:
-
-[source,text]
-----
-INFO: update-script#processAdd: id=1
-----
-
-You can see the message recorded in the Solr logging UI.
-
-=== Javascript
-
-Note: There is a JavaScript example `update-script.js` as part of the `techproducts` configset.
-Check `solrconfig.xml` and uncomment the update request processor definition to enable this feature.
-
-[source,javascript]
-----
-function processAdd(cmd) {
-
-  doc = cmd.solrDoc;  // org.apache.solr.common.SolrInputDocument
-  id = doc.getFieldValue("id");
-  logger.info("update-script#processAdd: id=" + id);
-
-// Set a field value:
-//  doc.setField("foo_s", "whatever");
-
-// Get a configuration parameter:
-//  config_param = params.get('config_param');  // "params" only exists if processor configured with <lst name="params">
-
-// Get a request parameter:
-// some_param = req.getParams().get("some_param")
-
-// Add a field of field names that match a pattern:
-//   - Potentially useful to determine the fields/attributes represented in a result set, via faceting on field_name_ss
-//  field_names = doc.getFieldNames().toArray();
-//  for(i=0; i < field_names.length; i++) {
-//    field_name = field_names[i];
-//    if (/attr_.*/.test(field_name)) { doc.addField("attribute_ss", field_names[i]); }
-//  }
-
-}
-
-function processDelete(cmd) {
-  // no-op
-}
-
-function processMergeIndexes(cmd) {
-  // no-op
-}
-
-function processCommit(cmd) {
-  // no-op
-}
-
-function processRollback(cmd) {
-  // no-op
-}
-
-function finish() {
-  // no-op
-}
-----
-
-=== Ruby
-Ruby support is implemented via the https://www.jruby.org/[JRuby] project.
-To use JRuby as the scripting engine, add `jruby.jar` to Solr.
-
-Here's an example of a JRuby update processing script (note that all variables passed in require prefixing with `$`, such as `$logger`):
-
-[source,ruby]
-----
-def processAdd(cmd)
-  doc = cmd.solrDoc  # org.apache.solr.common.SolrInputDocument
-  id = doc.getFieldValue('id')
-
-  $logger.info "update-script#processAdd: id=#{id}"
-
-  doc.setField('source_s', 'ruby')
-
-  $logger.info "update-script#processAdd: config_param=#{$params.get('config_param')}"
-end
-
-def processDelete(cmd)
-  # no-op
-end
-
-def processMergeIndexes(cmd)
-  # no-op
-end
-
-def processCommit(cmd)
-  # no-op
-end
-
-def processRollback(cmd)
-  # no-op
-end
-
-def finish()
-  # no-op
-end
-----
-
-==== Known issues
-
-The following in JRuby does not work as expected for some reason, though it does work properly in JavaScript:
-
-[source,ruby]
-----
-#  $logger.info "update-script#processAdd: request_param=#{$req.params.get('request_param')}"
-#  $rsp.add('script_processed',id)
-----
-
-=== Groovy
-
-Add JARs from a Groovy distro's `lib/` directory to Solr.  All JARs from
-Groovy's distro probably aren't required, but more than just the main `groovy.jar`
-file is needed (at least when this was tested using Groovy 2.0.6)
-
-[source,groovy]
-----
-def processAdd(cmd) {
-  doc = cmd.solrDoc  // org.apache.solr.common.SolrInputDocument
-  id = doc.getFieldValue('id')
-
-  logger.info "update-script#processAdd: id=" + id
-
-  doc.setField('source_s', 'groovy')
-
-  logger.info "update-script#processAdd: config_param=" + params.get('config_param')
-
-  logger.info "update-script#processAdd: request_param=" + req.params.get('request_param')
-  rsp.add('script_processed',id)
-}
-
-def processDelete(cmd) {
- //  no-op
-}
-
-def processMergeIndexes(cmd) {
- // no-op
-}
-
-def processCommit(cmd) {
- //  no-op
-}
-
-def processRollback(cmd) {
- // no-op
-}
-
-def finish() {
- // no-op
-}
-----
-
-=== Python
-Python support is implemented via the https://www.jython.org/[Jython] project.
-Add the *standalone* `jython.jar` (the JAR that contains all the dependencies) into Solr.
-
-[source,python]
-----
-def processAdd(cmd):
-  doc = cmd.solrDoc
-  id = doc.getFieldValue("id")
-  logger.info("update-script#processAdd: id=" + id)
-
-def processDelete(cmd):
-    logger.info("update-script#processDelete")
-
-def processMergeIndexes(cmd):
-    logger.info("update-script#processMergeIndexes")
-
-def processCommit(cmd):
-    logger.info("update-script#processCommit")
-
-def processRollback(cmd):
-    logger.info("update-script#processRollback")
-
-def finish():
-    logger.info("update-script#finish")
-----
diff --git a/solr/solr-ref-guide/src/streaming-expressions.adoc b/solr/solr-ref-guide/src/streaming-expressions.adoc
index 6d8c3f9..e988bb7 100644
--- a/solr/solr-ref-guide/src/streaming-expressions.adoc
+++ b/solr/solr-ref-guide/src/streaming-expressions.adoc
@@ -83,7 +83,7 @@
 Note the last tuple in the above example stream is `{"EOF":true,"RESPONSE_TIME":33}`. The `EOF` indicates the end of the stream. To process the JSON response, you'll need to use a streaming JSON implementation because streaming expressions are designed to return the entire result set which may have millions of records. In your JSON client you'll need to iterate each doc (tuple) and check for the EOF tuple to determine the end of stream.
 
 
-== Elements of the Lanaguage
+== Elements of the Language
 
 === Stream Sources
 
diff --git a/solr/solr-ref-guide/src/update-request-processors.adoc b/solr/solr-ref-guide/src/update-request-processors.adoc
index aeee353..7e9dce8 100644
--- a/solr/solr-ref-guide/src/update-request-processors.adoc
+++ b/solr/solr-ref-guide/src/update-request-processors.adoc
@@ -283,7 +283,7 @@
 
 {solr-javadocs}/core/org/apache/solr/update/processor/SignatureUpdateProcessorFactory.html[SignatureUpdateProcessorFactory]:: Uses a defined set of fields to generate a hash "signature" for the document. Useful for only indexing one copy of "similar" documents.
 
-{solr-javadocs}/contrib/scripting/org/apache/solr/scripting/update/ScriptUpdateProcessorFactory.html[ScriptUpdateProcessorFactory]:: An processor that enables the use of update processors implemented as scripts.  Learn more at the <<script-update-processor.adoc#script-update-processor,script update processor>> page.
+{solr-javadocs}/core/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactory.html[StatelessScriptUpdateProcessorFactory]:: An update request processor factory that enables the use of update processors implemented as scripts.
 
 {solr-javadocs}/core/org/apache/solr/update/processor/TemplateUpdateProcessorFactory.html[TemplateUpdateProcessorFactory]:: Allows adding new fields to documents based on a template pattern. This update processor can also be used at runtime (without defining it in `solrconfig.xml`), see the section <<templateupdateprocessorfactory>> below.
 
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java b/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
index fee3b7a..c38f397 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/CollectionAdminParams.java
@@ -115,4 +115,9 @@
    * for the add replica API. If set to true, a valid "node" should be specified.
    */
   String SKIP_NODE_ASSIGNMENT = "skipNodeAssignment";
+
+  /**
+   * Prefix for arbitrary collection or replica properties.
+   */
+  String PROPERTY_PREFIX = "property.";
 }
diff --git a/solr/test-framework/src/java/org/apache/solr/update/processor/UpdateProcessorTestBase.java b/solr/test-framework/src/java/org/apache/solr/update/processor/UpdateProcessorTestBase.java
index 21c4972..d3aa979 100644
--- a/solr/test-framework/src/java/org/apache/solr/update/processor/UpdateProcessorTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/update/processor/UpdateProcessorTestBase.java
@@ -140,7 +140,7 @@
   /**
    * Convenience method for building up SolrInputDocuments
    */
-  protected final SolrInputDocument doc(SolrInputField... fields) {
+  final SolrInputDocument doc(SolrInputField... fields) {
     SolrInputDocument d = new SolrInputDocument();
     for (SolrInputField f : fields) {
       d.put(f.getName(), f);
@@ -162,7 +162,7 @@
   /**
    * Convenience method for building up SolrInputFields with default boost
    */
-  protected final SolrInputField f(String name, Object... values) {
+  final SolrInputField f(String name, Object... values) {
     return field(name, values);
   }
 }
diff --git a/versions.lock b/versions.lock
index 5727718..b738b79 100644
--- a/versions.lock
+++ b/versions.lock
@@ -204,8 +204,6 @@
 org.apache.kerby:kerb-simplekdc:1.0.1 (1 constraints: 0405f135)
 org.apache.kerby:kerby-kdc:1.0.1 (1 constraints: 0405f135)
 org.apache.logging.log4j:log4j-1.2-api:2.13.2 (1 constraints: 3a053a3b)
-org.apache.lucene:lucene-codecs:8.6.3 (1 constraints: 13052836)
-org.apache.lucene:lucene-core:8.6.3 (1 constraints: 7f0d022f)
 org.asciidoctor:asciidoctorj:1.6.2 (1 constraints: 0b050436)
 org.asciidoctor:asciidoctorj-api:1.6.2 (1 constraints: e30cfb0d)
 org.hsqldb:hsqldb:2.4.0 (1 constraints: 08050136)