mergin
diff --git a/.gitignore b/.gitignore
index 15a2736..f120ec4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,7 +8,6 @@
 lib
 test-lib
 /*~
-/velocity.log
 /build.properties
 /.idea
 lucene/**/*.iml
diff --git a/build.gradle b/build.gradle
index 25a6185..c677384 100644
--- a/build.gradle
+++ b/build.gradle
@@ -86,7 +86,7 @@
   scriptDepVersions = [
       "apache-rat": "0.11",
       "ecj": "3.19.0",
-      "javacc": "5.0",
+      "javacc": "7.0.4",
       "jflex": "1.7.0",
       "jgit": "5.3.0.201903130848-r",
       "flexmark": "0.61.24",
diff --git a/gradle/documentation/render-javadoc.gradle b/gradle/documentation/render-javadoc.gradle
index a34d8a1..3b0b8d4 100644
--- a/gradle/documentation/render-javadoc.gradle
+++ b/gradle/documentation/render-javadoc.gradle
@@ -261,13 +261,6 @@
   }
 }
 
-configure(project(":solr:contrib:velocity")) {
-  project.tasks.withType(RenderJavadocTask) {
-    // TODO: clean up split packages
-    javadocMissingIgnore = [ "org.apache.solr.response" ]
-  }
-}
-
 configure(project(":solr:contrib:analysis-extras")) {
   project.tasks.withType(RenderJavadocTask) {
     // TODO: clean up split packages
diff --git a/gradle/generation/javacc.gradle b/gradle/generation/javacc.gradle
index ff93117..90eed8d 100644
--- a/gradle/generation/javacc.gradle
+++ b/gradle/generation/javacc.gradle
@@ -1,3 +1,6 @@
+import java.nio.charset.Charset
+import java.util.function.Function
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -30,18 +33,279 @@
     description "Regenerate sources for corresponding javacc grammar files."
     group "generation"
 
-    dependsOn ":lucene:queryparser:javaccParserClassic"
-    dependsOn ":lucene:queryparser:javaccParserSurround"
-    dependsOn ":lucene:queryparser:javaccParserFlexible"
-    dependsOn ":solr:core:javaccSolrParser"
+    dependsOn allprojects.collect { prj -> prj.tasks.withType(JavaCCTask) }
   }
 }
 
+/**
+ * Utility function to read a file, apply changes to its content and write it back.
+ */
+def modifyFile = { File path, Function<String, String> modify ->
+  Function<String, String> normalizeEols = { text -> text.replace("\r\n", "\n") }
+  modify = normalizeEols.andThen(modify).andThen(normalizeEols)
+
+  String original = path.getText("UTF-8")
+  String modified = modify.apply(original)
+  if (!original.equals(modified)) {
+    path.write(modified, "UTF-8")
+  }
+}
+
+def commonCleanups = { FileTree generatedFiles ->
+  // This is a minor typo in a comment that nonetheless people have hand-corrected in the past.
+  generatedFiles.matching({ include "CharStream.java" }).each {file ->
+    modifyFile(file, { text ->
+      return text.replace(
+          "implemetation",
+          "implementation");
+    })
+  }
+
+  generatedFiles.each {file ->
+    modifyFile(file, { text ->
+      // Normalize EOLs and tabs (EOLs are a side-effect of modifyFile).
+      text = text.replace("\t", "    ");
+      text = text.replaceAll("JavaCC - OriginalChecksum=[^*]+", "(filtered)")
+      text = text.replace("StringBuffer", "StringBuilder")
+      return text
+    })
+  }
+
+  generatedFiles.matching({ include "*TokenManager.java" }).each { file ->
+    modifyFile(file, { text ->
+      // Eliminates redundant cast message.
+      text = text.replace(
+          "int hiByte = (int)(curChar >> 8);",
+          "int hiByte = curChar >> 8;")
+      // Access to forbidden APIs.
+      text = text.replace(
+          "public  java.io.PrintStream debugStream = System.out;",
+          "// (debugStream omitted).")
+      text = text.replace(
+          "public  void setDebugStream(java.io.PrintStream ds) { debugStream = ds; }",
+          "// (setDebugStream omitted).")
+      return text
+    })
+  }
+}
+
+configure(project(":lucene:queryparser")) {
+  task javaccParserClassic(type: JavaCCTask) {
+    description "Regenerate classic query parser from lucene/queryparser/classic/QueryParser.jj"
+    group "generation"
+
+    javaccFile = file('src/java/org/apache/lucene/queryparser/classic/QueryParser.jj')
+
+    afterGenerate << commonCleanups
+    afterGenerate << { FileTree generatedFiles ->
+      generatedFiles.matching { include "QueryParser.java" }.each { file ->
+        modifyFile(file, { text ->
+          text = text.replace(
+              "public QueryParser(CharStream ",
+              "protected QueryParser(CharStream ")
+          text = text.replace(
+              "public QueryParser(QueryParserTokenManager ",
+              "protected QueryParser(QueryParserTokenManager ")
+          text = text.replace(
+              "new java.util.ArrayList<int[]>",
+              "new java.util.ArrayList<>")
+          return text
+        })
+      }
+
+      generatedFiles.matching { include "*TokenManager.java" }.each { file ->
+        modifyFile(file, { text ->
+          // Remove redundant imports.
+          text = text.replaceAll(
+              /(?m)^import .+/,
+              "")
+          return text
+        })
+      }
+    }
+  }
+
+  task javaccParserSurround(type: JavaCCTask) {
+    description "Regenerate surround query parser from lucene/queryparser/surround/parser/QueryParser.jj"
+    group "generation"
+
+    javaccFile = file('src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.jj')
+
+    afterGenerate << commonCleanups
+    afterGenerate << { FileTree generatedFiles ->
+      generatedFiles.matching { include "QueryParser.java" }.each { file ->
+        modifyFile(file, { text ->
+          text = text.replace(
+              "import org.apache.lucene.analysis.TokenStream;",
+              "")
+          text = text.replace(
+              "new java.util.ArrayList<int[]>",
+              "new java.util.ArrayList<>")
+          return text
+        })
+      }
+
+      generatedFiles.matching { include "*TokenManager.java" }.each { file ->
+        modifyFile(file, { text ->
+          // Remove redundant imports.
+          text = text.replaceAll(
+              /(?m)^import .+/,
+              "")
+          return text
+        })
+      }
+    }
+  }
+
+  task javaccParserFlexible(type: JavaCCTask) {
+    description "Regenerate Flexible query parser from queryparser/flexible/standard/parser/StandardSyntaxParser.jj"
+    group "generation"
+
+    javaccFile = file('src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.jj')
+
+    afterGenerate << commonCleanups
+    afterGenerate << { FileTree generatedFiles ->
+      generatedFiles.matching { include "ParseException.java" }.each { file ->
+        modifyFile(file, { text ->
+          // Modify constructor.
+          text = text.replace(
+              "class ParseException extends Exception",
+              "class ParseException extends QueryNodeParseException")
+
+          // Modify imports.
+          text = text.replace(
+              "package org.apache.lucene.queryparser.flexible.standard.parser;", '''\
+          package org.apache.lucene.queryparser.flexible.standard.parser;
+
+          import org.apache.lucene.queryparser.flexible.messages.*;
+          import org.apache.lucene.queryparser.flexible.core.*;
+          import org.apache.lucene.queryparser.flexible.core.messages.*;
+          ''')
+
+          // Modify constructors and code bits
+          text = text.replaceAll(
+              /(?s)[ ]*public ParseException\(Token currentTokenVal[^}]+[}]/, '''\
+          public ParseException(Token currentTokenVal,
+            int[][] expectedTokenSequencesVal, String[] tokenImageVal) 
+          {
+            super(new MessageImpl(QueryParserMessages.INVALID_SYNTAX, initialise(
+            currentTokenVal, expectedTokenSequencesVal, tokenImageVal)));
+            this.currentToken = currentTokenVal;
+            this.expectedTokenSequences = expectedTokenSequencesVal;
+            this.tokenImage = tokenImageVal;
+          }
+          ''')
+
+          text = text.replaceAll(
+              /(?s)[ ]*public ParseException\(String message\)[^}]+[}]/, '''\
+          public ParseException(Message message) 
+          {
+            super(message);
+          }
+          ''')
+
+          text = text.replaceAll(
+              /(?s)[ ]*public ParseException\(\)[^}]+[}]/, '''\
+          public ParseException() 
+          {
+            super(new MessageImpl(QueryParserMessages.INVALID_SYNTAX, "Error"));
+          }
+          ''')
+          return text
+        })
+      }
+
+      generatedFiles.matching { include "StandardSyntaxParser.java" }.each { file ->
+        modifyFile(file, { text ->
+          // Remove redundant cast
+          text = text.replace(
+              "new java.util.ArrayList<int[]>",
+              "new java.util.ArrayList<>")
+          text = text.replace(
+              "new ArrayList<QueryNode>()",
+              "new ArrayList<>()")
+          text = text.replace(
+              "Collections.<QueryNode> singletonList",
+              "Collections.singletonList")
+          return text
+        })
+      }
+
+      generatedFiles.matching { include "StandardSyntaxParserTokenManager.java" }.each { file ->
+        modifyFile(file, { text ->
+          // Remove redundant imports.
+          text = text.replaceAll(
+              /(?m)^import .+/,
+              "")
+          return text
+        })
+      }
+    }
+  }
+
+  task javacc() {
+    description "Regenerate query parsers (javacc syntax definitions)."
+    group "generation"
+
+    dependsOn javaccParserClassic
+    dependsOn javaccParserSurround
+    dependsOn javaccParserFlexible
+  }
+}
+
+configure(project(":solr:core")) {
+  task javacc(type: JavaCCTask) {
+    description "Regenerate Solr query parser"
+    group "generation"
+
+    javaccFile = file('src/java/org/apache/solr/parser/QueryParser.jj')
+
+
+    afterGenerate << commonCleanups
+    afterGenerate << { FileTree generatedFiles ->
+      generatedFiles.matching { include "QueryParser.java" }.each { file ->
+        modifyFile(file, { text ->
+          text = text.replace(
+              "public QueryParser(CharStream ",
+              "protected QueryParser(CharStream ")
+          text = text.replace(
+              "public QueryParser(QueryParserTokenManager ",
+              "protected QueryParser(QueryParserTokenManager ")
+          text = text.replace(
+              "final private LookaheadSuccess jj_ls =",
+              "static final private LookaheadSuccess jj_ls =")
+          return text
+        })
+      }
+
+      generatedFiles.matching { include "*TokenManager.java" }.each { file ->
+        modifyFile(file, { text ->
+          // Remove redundant imports.
+          text = text.replaceAll(
+              /(?m)^import .+/,
+              "")
+          return text
+        })
+      }
+    }
+  }
+}
+
+
+
 // We always regenerate, no need to declare outputs.
 class JavaCCTask extends DefaultTask {
   @Input
   File javaccFile
 
+  /**
+   * Apply closures to all generated files before they're copied back
+   * to mainline code.
+   */
+  @Optional
+  @Input
+  List<Closure<FileTree>> afterGenerate = new ArrayList<>()
+
   JavaCCTask() {
     dependsOn(project.rootProject.configurations.javacc)
   }
@@ -49,18 +313,17 @@
   @TaskAction
   def generate() {
     if (!javaccFile || !javaccFile.exists()) {
-      throw new RuntimeException("JavaCC input file does not exist: ${javaccFile}")
+      throw new GradleException("Input file does not exist: ${javaccFile}")
     }
 
-    // Remove previous files so we can regenerate them. javacc doesn't want to overwrite
-    // locally modified files.
-    def parentDir = javaccFile.parentFile
-    def toDelete = project.fileTree(parentDir, {
-      include "**/*.java"
-    }).findAll { file -> file.getText("UTF-8").contains("Generated By:JavaCC") }
-    project.delete(toDelete)
+    // Run javacc generation into temporary folder so that we know all the generated files
+    // and can post-process them easily.
+    def tempDir = this.getTemporaryDir()
+    tempDir.mkdirs()
+    project.delete project.fileTree(tempDir, { include: "**/*.java" })
 
-    logger.lifecycle("Regenerating JavaCC:\n  from: ${javaccFile}\n    to: ${parentDir}")
+    def targetDir = javaccFile.parentFile
+    logger.lifecycle("Regenerating JavaCC:\n  from: ${javaccFile}\n    to: ${targetDir}")
 
     def output = new ByteArrayOutputStream()
     def result = project.javaexec {
@@ -74,7 +337,7 @@
 
       main = "org.javacc.parser.Main"
       args += [
-          "-OUTPUT_DIRECTORY=${parentDir}",
+          "-OUTPUT_DIRECTORY=${tempDir}",
           javaccFile
       ]
     }
@@ -84,302 +347,22 @@
       throw new GradleException("JavaCC failed to compile ${javaccFile}, here is the compilation output:\n${output}")
     }
 
-    // Cleanup common to more than one javacc invocation.
-    //
-    // This is a minor typo in a comment that nontheless people have hand-corrected in the past.
-    ant.replace(file: "${parentDir}/CharStream.java",
-        token: "implemetation",
-        value: "implementation",
-        encoding: "UTF-8")
-
-    // StringBuffer -> StringBuilder
-    ant.replace(token: "StringBuffer",
-        value: "StringBuilder",
-        encoding: "UTF-8") {
-      ant.fileset(dir: parentDir, includes: '*.java') {
-        ant.containsregexp(expression: "Generated By:JavaCC:")
-      }
+    // Make sure we don't have warnings.
+    if (output.toString(Charset.defaultCharset()).contains("Warning:")) {
+      throw new GradleException("JavaCC emitted warnings for ${javaccFile}, here is the compilation output:\n${output}")
     }
 
-    // Eliminates redundant cast message
-    ant.replace(token: "int hiByte = (int)(curChar >> 8);",
-        value: "int hiByte = curChar >> 8;",
-        encoding: "UTF-8") {
-      ant.fileset(dir: parentDir, includes: "*TokenManager.java")
+    // Apply any custom modifications.
+    def generatedFiles = project.fileTree(tempDir)
+
+    afterGenerate.each {closure ->
+      closure.call(generatedFiles)
     }
 
-    // So precommit passes
-    ant.replaceregexp(match: "/\\*\\* Debug output.*?Set debug output.*?ds; }",
-        replace: '',
-        flags: 's',
-        encoding: 'UTF-8') {
-      ant.fileset(dir: parentDir, includes: "*TokenManager.java")
-    }
-
-    // Correct line endings for Windows.
-    project.ant.fixcrlf(srcDir: parentDir,
-        includes: "*.java",
-        encoding: "UTF-8",
-        eol: "lf") {
-      ant.containsregexp(expression: "Generated By:JavaCC:")
-    }
-  }
-}
-
-
-configure(project(":lucene:queryparser")) {
-  task javaccParserClassic(type: JavaCCTask) {
-    description "Regenerate classic query parser from lucene/queryparser/classic/QueryParser.jj"
-    group "generation"
-
-    javaccFile = file('src/java/org/apache/lucene/queryparser/classic/QueryParser.jj')
-    def parentDir = javaccFile.parentFile // I'll need this later.
-
-    doLast {
-      // control visibility issues
-      ant.replace(file: file("${parentDir}/QueryParser.java"),
-          token: "public QueryParser(CharStream ",
-          value: "protected QueryParser(CharStream ",
-          encoding: 'UTF-8')
-      ant.replace(file: file("${parentDir}/QueryParser.java"),
-          token: "public QueryParser(QueryParserTokenManager ",
-          value: "protected QueryParser(QueryParserTokenManager ",
-          encoding: 'UTF-8')
-
-      // Some redundant casts etc. in queryparser.java
-      ant.replace(file: file("${parentDir}/QueryParser.java"),
-          token: "new java.util.ArrayList<int[]>",
-          value: "new java.util.ArrayList<>",
-          encoding: 'UTF-8')
-      ant.replace(file: file("${parentDir}/QueryParser.java"),
-          token: "new java.util.ArrayList<int[]>",
-          value: "new java.util.ArrayList<>",
-          encoding: 'UTF-8')
-      ant.replace(file: file("${parentDir}/QueryParser.java"),
-          token: "(int)(curChar >> 8);",
-          value: "curChar >> 8;",
-          encoding: 'UTF-8')
-      // Remove unnecessary imports
-      def separator = System.getProperty('line.separator')
-      [/import java\.io\.StringReader;/,
-       /import java\.util\.ArrayList;/,
-       /import java\.util\.Arrays;/,
-       /import java\.util\.HashSet;/,
-       /import java\.util\.List;/,
-       /import java\.util\.Locale;/,
-       /import java\.util\.Set;/,
-       /import org\.apache\.lucene\.analysis\.Analyzer;/,
-       /import org\.apache\.lucene\.document\.DateTools;/,
-       /import org\.apache\.lucene\.search\.BooleanClause;/,
-       /import org\.apache\.lucene\.search\.Query;/,
-       /import org\.apache\.lucene\.search\.TermRangeQuery/,
-       /import org\.apache\.lucene\.search\.TermRangeQuery;/
-      ].each {
-        ant.replaceregexp(file: file("${parentDir}/QueryParserTokenManager.java"),
-            match: "${it}\\s*${separator}",
-            replace: "",
-            encoding: "UTF-8")
-      }
-    }
-  }
-}
-
-configure(project(":lucene:queryparser")) {
-  task javaccParserSurround(type: JavaCCTask) {
-    description "Regenerate surround query parser from lucene/queryparser/surround/parser/QueryParser.jj"
-    group "generation"
-
-    javaccFile = file('src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.jj')
-    def parentDir = javaccFile.parentFile
-
-    doLast {
-      def separator = System.getProperty('line.separator')
-
-      // Remove unneeded import
-      ant.replaceregexp(match: /import org\.apache\.lucene\.analysis\.TokenStream;\s*${separator}${separator}/,
-          replace: "",
-          encoding: "UTF-8") {
-        ant.fileset(dir: parentDir, includes: "QueryParser.java")
-      }
-
-      // Eliminate compiler warning
-      ant.replace(file: file("${parentDir}/QueryParser.java"),
-          token: "new java.util.ArrayList<int[]>",
-          value: "new java.util.ArrayList<>",
-          encoding: 'UTF-8')
-
-      // There are a bunch of unused imports we need to remove to pass precommit
-      [
-          /import java\.util\.ArrayList;/,
-          /import java\.util\.List;/,
-          /import java\.io\.StringReader;/,
-          /import org\.apache\.lucene\.analysis\.TokenStream;/,
-          /import org\.apache\.lucene\.queryparser\.surround\.query\.SrndQuery;/,
-          /import org\.apache\.lucene\.queryparser\.surround\.query\.FieldsQuery;/,
-          /import org\.apache\.lucene\.queryparser\.surround\.query\.OrQuery;/,
-          /import org\.apache\.lucene\.queryparser\.surround\.query\.AndQuery;/,
-          /import org\.apache\.lucene\.queryparser\.surround\.query\.NotQuery;/,
-          /import org\.apache\.lucene\.queryparser\.surround\.query\.DistanceQuery;/,
-          /import org\.apache\.lucene\.queryparser\.surround\.query\.SrndTermQuery;/,
-          /import org\.apache\.lucene\.queryparser\.surround\.query\.SrndPrefixQuery;/,
-          /import org\.apache\.lucene\.queryparser\.surround\.query\.SrndTruncQuery;/
-      ].each {
-        ant.replaceregexp(file: file("${parentDir}/QueryParserTokenManager.java"),
-            match: "${it}\\s*${separator}",
-            replace: "",
-            encoding: "UTF-8")
-      }
-    }
-  }
-}
-configure(project(":lucene:queryparser")) {
-  task javaccParserFlexible(type: JavaCCTask) {
-    description "Regenerate Flexible query parser from queryparser/flexible/standard/parser/StandardSyntaxParser.jj"
-    group "generation"
-
-    javaccFile = file('src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.jj')
-    def parentDir = javaccFile.parentFile
-
-    doLast {
-      def lineSeparator = System.lineSeparator()
-
-      // extend the proper class
-      ant.replaceregexp(file: "${parentDir}/ParseException.java",
-          match: "public class ParseException extends Exception",
-          replace: "public class ParseException extends QueryNodeParseException",
-          flags: "g",
-          byline: "false",
-          encoding: 'UTF-8')
-
-      // Import correct classes.
-      ant.replaceregexp(file: "${parentDir}/ParseException.java",
-          match: "package org.apache.lucene.queryparser.flexible.standard.parser;",
-          replace: "package org.apache.lucene.queryparser.flexible.standard.parser;${lineSeparator} ${lineSeparator}" +
-              " import org.apache.lucene.queryparser.flexible.messages.Message;${lineSeparator}" +
-              " import org.apache.lucene.queryparser.flexible.messages.MessageImpl;${lineSeparator}" +
-              " import org.apache.lucene.queryparser.flexible.core.*;${lineSeparator}" +
-              " import org.apache.lucene.queryparser.flexible.core.messages.*;",
-          flags: "g",
-          byline: "false",
-          encoding: 'UTF-8')
-
-      // Fill in c'tor code
-      ant.replaceregexp(file: "${parentDir}/ParseException.java",
-          match: "^  public ParseException\\(Token currentTokenVal.*\$(\\s\\s[^}].*\\n)*  \\}",
-          replace: "  public ParseException(Token currentTokenVal,${lineSeparator}" +
-              "     int[][] expectedTokenSequencesVal, String[] tokenImageVal) {${lineSeparator}" +
-              "     super(new MessageImpl(QueryParserMessages.INVALID_SYNTAX, initialise(${lineSeparator}" +
-              "     currentTokenVal, expectedTokenSequencesVal, tokenImageVal)));${lineSeparator}" +
-              "     this.currentToken = currentTokenVal;${lineSeparator}" +
-              "     this.expectedTokenSequences = expectedTokenSequencesVal;${lineSeparator}" +
-              "     this.tokenImage = tokenImageVal;${lineSeparator}" +
-              "   }",
-          flags: "gm",
-          byline: "false",
-          encoding: 'UTF-8')
-
-      // Invoke super, use proper c'tor
-      ant.replaceregexp(file: "${parentDir}/ParseException.java",
-          match: "^  public ParseException\\(String message.*\$(\\s\\s[^}].*\\n)*  \\}",
-          replace: "  public ParseException(Message message) {${lineSeparator}" +
-              "     super(message);${lineSeparator}" +
-              "   }",
-          flags: "gm",
-          byline: "false",
-          encoding: 'UTF-8')
-
-      // Invoke super properly
-      ant.replaceregexp(file: "${parentDir}/ParseException.java",
-          match: "^  public ParseException\\(\\).*\$(\\s\\s[^}].*\\n)*  \\}",
-          replace: "  public ParseException() {${lineSeparator}" +
-              "     super(new MessageImpl(QueryParserMessages.INVALID_SYNTAX, \"Error\"));${lineSeparator}" +
-              "   }",
-          flags: "gm",
-          byline: "false",
-          encoding: 'UTF-8')
-
-      // Redundant cast warning
-      ant.replace(file: file("${parentDir}/StandardSyntaxParser.java"),
-          token: "new java.util.ArrayList<int[]>",
-          value: "new java.util.ArrayList<>",
-          encoding: 'UTF-8')
-
-      // Remove unused imports.
-      def separator = System.getProperty('line.separator')
-      [
-          /import java.io.StringReader;/,
-          /import java.util.Vector;/,
-          /import java.util.Arrays;/,
-          /import org.apache.lucene.queryparser.flexible.messages.Message;/,
-          /import org.apache.lucene.queryparser.flexible.messages.MessageImpl;/,
-          /import org.apache.lucene.queryparser.flexible.core.QueryNodeParseException;/,
-          /import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages;/,
-          /import org.apache.lucene.queryparser.flexible.core.nodes.AndQueryNode;/,
-          /import org.apache.lucene.queryparser.flexible.core.nodes.BooleanQueryNode;/,
-          /import org.apache.lucene.queryparser.flexible.core.nodes.BoostQueryNode;/,
-          /import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode;/,
-          /import org.apache.lucene.queryparser.flexible.core.nodes.FuzzyQueryNode;/,
-          /import org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode;/,
-          /import org.apache.lucene.queryparser.flexible.core.nodes.GroupQueryNode;/,
-          /import org.apache.lucene.queryparser.flexible.core.nodes.OrQueryNode;/,
-          /import org.apache.lucene.queryparser.flexible.standard.nodes.RegexpQueryNode;/,
-          /import org.apache.lucene.queryparser.flexible.core.nodes.SlopQueryNode;/,
-          /import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;/,
-          /import org.apache.lucene.queryparser.flexible.core.nodes.QuotedFieldQueryNode;/,
-          /import org.apache.lucene.queryparser.flexible.core.parser.SyntaxParser;/,
-          /import org.apache.lucene.queryparser.flexible.standard.nodes.TermRangeQueryNode;/
-      ].each {
-        ant.replaceregexp(file: file("${parentDir}/StandardSyntaxParserTokenManager.java"),
-            match: "${it}\\s*${separator}",
-            replace: "",
-            encoding: "UTF-8")
-      }
-    }
-  }
-}
-configure(project(":solr:core")) {
-  task javaccSolrParser(type: JavaCCTask) {
-    description "Regenerate Solr query parser from solr/parser/QueryParser.jj"
-    group "generation"
-
-    javaccFile = file('src/java/org/apache/solr/parser/QueryParser.jj')
-
-    doLast {
-      def separator = System.getProperty('line.separator')
-      def parentDir = javaccFile.parentFile
-
-      [/import java\.io\.StringReader;/,
-       /import java\.util\.ArrayList;/,
-       /import java\.util\.Arrays;/,
-       /import java\.util\.HashSet;/,
-       /import java\.util\.List;/,
-       /import java\.util\.Set;/,
-       /import org\.apache\.lucene\.analysis\.Analyzer;/,
-       /import org\.apache\.lucene\.search\.BooleanClause;/,
-       /import org\.apache\.lucene\.search\.Query;/,
-       /import org\.apache\.solr\.search\.SyntaxError;/,
-       /import org\.apache\.solr\.search\.QParser;/
-      ].each {
-        ant.replaceregexp(file: file("${parentDir}/QueryParserTokenManager.java"),
-            match: "${it}\\s*${separator}",
-            replace: "",
-            encoding: "UTF-8")
-      }
-
-      ant.replace(file: "${parentDir}/QueryParser.java",
-          token: "public QueryParser(CharStream ",
-          value: "protected QueryParser(CharStream ",
-          encoding: "UTF-8")
-
-      ant.replace(file: "${parentDir}/QueryParser.java",
-          token: "public QueryParser(QueryParserTokenManager ",
-          value: "protected QueryParser(QueryParserTokenManager ",
-          encoding: "UTF-8")
-
-      ant.replace(file: "${parentDir}/QueryParser.java",
-          token: "final private LookaheadSuccess jj_ls =",
-          value: "static final private LookaheadSuccess jj_ls =",
-          encoding: "UTF-8")
+    // Copy back to mainline sources.
+    project.copy {
+      from tempDir
+      into targetDir
     }
   }
 }
diff --git a/gradle/maven/defaults-maven.gradle b/gradle/maven/defaults-maven.gradle
index 570d011..5c260f3 100644
--- a/gradle/maven/defaults-maven.gradle
+++ b/gradle/maven/defaults-maven.gradle
@@ -66,7 +66,6 @@
         ":solr:contrib:langid",
         ":solr:contrib:jaegertracer-configurator",
         ":solr:contrib:prometheus-exporter",
-        ":solr:contrib:velocity",
         ":solr:test-framework",
     ]
   }
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 9282d5a..b18091d 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -132,6 +132,7 @@
   "after". Also redesign numeric comparators to provide skipping functionality
   by default. (Mayya Sharipova, Jim Ferenczi)
 
+* LUCENE-9527: Upgrade javacc to 7.0.4, regenerate query parsers. (Dawid Weiss)
 
 Bug fixes
 
@@ -243,6 +244,9 @@
 * LUCENE-9373: FunctionMatchQuery now accepts a "matchCost" optimization hint.
   (Maxim Glazkov, David Smiley)
 
+* LUCENE-9510: Indexing with an index sort is now faster by not compressing
+  temporary representations of the data. (Adrien Grand)
+
 Bug Fixes
 ---------------------
 
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java
index cd9197f..1258eb7 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java
@@ -69,7 +69,7 @@
 import org.apache.lucene.util.BitUtil;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.LongsRef;
 import org.apache.lucene.util.packed.PackedInts;
 
 /**
@@ -90,8 +90,8 @@
   private final int numDocs;
   private final boolean merging;
   private final BlockState state;
-  private final long numChunks; // number of compressed blocks written
   private final long numDirtyChunks; // number of incomplete compressed blocks written
+  private final long numDirtyDocs; // cumulative number of missing docs in incomplete chunks
   private boolean closed;
 
   // used by clone
@@ -106,8 +106,8 @@
     this.compressionMode = reader.compressionMode;
     this.decompressor = reader.decompressor.clone();
     this.numDocs = reader.numDocs;
-    this.numChunks = reader.numChunks;
     this.numDirtyChunks = reader.numDirtyChunks;
+    this.numDirtyDocs = reader.numDirtyDocs;
     this.merging = merging;
     this.state = new BlockState();
     this.closed = false;
@@ -187,15 +187,13 @@
       this.indexReader = indexReader;
 
       if (version >= VERSION_META) {
-        numChunks = metaIn.readVLong();
         numDirtyChunks = metaIn.readVLong();
+        numDirtyDocs = metaIn.readVLong();
       } else {
-        fieldsStream.seek(maxPointer);
-        numChunks = fieldsStream.readVLong();
-        numDirtyChunks = fieldsStream.readVLong();
-      }
-      if (numDirtyChunks > numChunks) {
-        throw new CorruptIndexException("invalid chunk counts: dirty=" + numDirtyChunks + ", total=" + numChunks, fieldsStream);
+        // Old versions of this format did not record numDirtyDocs. Since bulk
+        // merges are disabled on version increments anyway, we make no effort
+        // to get valid values of numDirtyChunks and numDirtyDocs.
+        numDirtyChunks = numDirtyDocs = -1;
       }
 
       if (metaIn != null) {
@@ -399,8 +397,8 @@
     // whether the block has been sliced, this happens for large documents
     private boolean sliced;
 
-    private int[] offsets = IntsRef.EMPTY_INTS;
-    private int[] numStoredFields = IntsRef.EMPTY_INTS;
+    private long[] offsets = LongsRef.EMPTY_LONGS;
+    private long[] numStoredFields = LongsRef.EMPTY_LONGS;
 
     // the start pointer at which you can read the compressed documents
     private long startPointer;
@@ -469,9 +467,11 @@
         } else if (bitsPerStoredFields > 31) {
           throw new CorruptIndexException("bitsPerStoredFields=" + bitsPerStoredFields, fieldsStream);
         } else {
-          final PackedInts.ReaderIterator it = PackedInts.getReaderIteratorNoHeader(fieldsStream, PackedInts.Format.PACKED, packedIntsVersion, chunkDocs, bitsPerStoredFields, 1);
-          for (int i = 0; i < chunkDocs; ++i) {
-            numStoredFields[i] = (int) it.next();
+          final PackedInts.ReaderIterator it = PackedInts.getReaderIteratorNoHeader(fieldsStream, PackedInts.Format.PACKED, packedIntsVersion, chunkDocs, bitsPerStoredFields, 1024);
+          for (int i = 0; i < chunkDocs; ) {
+            final LongsRef next = it.next(Integer.MAX_VALUE);
+            System.arraycopy(next.longs, next.offset, numStoredFields, i, next.length);
+            i += next.length;
           }
         }
 
@@ -486,9 +486,11 @@
         } else if (bitsPerStoredFields > 31) {
           throw new CorruptIndexException("bitsPerLength=" + bitsPerLength, fieldsStream);
         } else {
-          final PackedInts.ReaderIterator it = PackedInts.getReaderIteratorNoHeader(fieldsStream, PackedInts.Format.PACKED, packedIntsVersion, chunkDocs, bitsPerLength, 1);
-          for (int i = 0; i < chunkDocs; ++i) {
-            offsets[i + 1] = (int) it.next();
+          final PackedInts.ReaderIterator it = PackedInts.getReaderIteratorNoHeader(fieldsStream, PackedInts.Format.PACKED, packedIntsVersion, chunkDocs, bitsPerLength, 1024);
+          for (int i = 0; i < chunkDocs; ) {
+            final LongsRef next = it.next(Integer.MAX_VALUE);
+            System.arraycopy(next.longs, next.offset, offsets, i + 1, next.length);
+            i += next.length;
           }
           for (int i = 0; i < chunkDocs; ++i) {
             offsets[i + 1] += offsets[i];
@@ -497,8 +499,8 @@
 
         // Additional validation: only the empty document has a serialized length of 0
         for (int i = 0; i < chunkDocs; ++i) {
-          final int len = offsets[i + 1] - offsets[i];
-          final int storedFields = numStoredFields[i];
+          final long len = offsets[i + 1] - offsets[i];
+          final long storedFields = numStoredFields[i];
           if ((len == 0) != (storedFields == 0)) {
             throw new CorruptIndexException("length=" + len + ", numStoredFields=" + storedFields, fieldsStream);
           }
@@ -509,7 +511,7 @@
       startPointer = fieldsStream.getFilePointer();
 
       if (merging) {
-        final int totalLength = offsets[chunkDocs];
+        final int totalLength = Math.toIntExact(offsets[chunkDocs]);
         // decompress eagerly
         if (sliced) {
           bytes.offset = bytes.length = 0;
@@ -540,10 +542,10 @@
       }
 
       final int index = docID - docBase;
-      final int offset = offsets[index];
-      final int length = offsets[index+1] - offset;
-      final int totalLength = offsets[chunkDocs];
-      final int numStoredFields = this.numStoredFields[index];
+      final int offset = Math.toIntExact(offsets[index]);
+      final int length = Math.toIntExact(offsets[index+1]) - offset;
+      final int totalLength = Math.toIntExact(offsets[chunkDocs]);
+      final int numStoredFields = Math.toIntExact(this.numStoredFields[index]);
 
       final BytesRef bytes;
       if (merging) {
@@ -686,14 +688,26 @@
     return chunkSize;
   }
   
-  long getNumChunks() {
-    return numChunks;
+  long getNumDirtyDocs() {
+    if (version != VERSION_CURRENT) {
+      throw new IllegalStateException("getNumDirtyDocs should only ever get called when the reader is on the current version");
+    }
+    assert numDirtyDocs >= 0;
+    return numDirtyDocs;
   }
   
   long getNumDirtyChunks() {
+    if (version != VERSION_CURRENT) {
+      throw new IllegalStateException("getNumDirtyChunks should only ever get called when the reader is on the current version");
+    }
+    assert numDirtyChunks >= 0;
     return numDirtyChunks;
   }
 
+  int getNumDocs() {
+    return numDocs;
+  }
+
   int getPackedIntsVersion() {
     return packedIntsVersion;
   }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java
index 27fc3af..dfd15f0 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsWriter.java
@@ -95,8 +95,8 @@
   private int docBase; // doc ID at the beginning of the chunk
   private int numBufferedDocs; // docBase + numBufferedDocs == current doc ID
   
-  private long numChunks; // number of compressed blocks written
   private long numDirtyChunks; // number of incomplete compressed blocks written
+  private long numDirtyDocs; // cumulative number of missing docs in incomplete chunks
 
   /** Sole constructor. */
   CompressingStoredFieldsWriter(Directory directory, SegmentInfo si, String segmentSuffix, IOContext context,
@@ -252,7 +252,6 @@
     docBase += numBufferedDocs;
     numBufferedDocs = 0;
     bufferedDocs.reset();
-    numChunks++;
   }
   
   @Override
@@ -468,8 +467,10 @@
   @Override
   public void finish(FieldInfos fis, int numDocs) throws IOException {
     if (numBufferedDocs > 0) {
-      flush();
       numDirtyChunks++; // incomplete: we had to force this flush
+      final long expectedChunkDocs = Math.min(maxDocsPerChunk, (long) ((double) chunkSize / bufferedDocs.size() * numBufferedDocs));
+      numDirtyDocs += expectedChunkDocs - numBufferedDocs;
+      flush();
     } else {
       assert bufferedDocs.size() == 0;
     }
@@ -477,8 +478,8 @@
       throw new RuntimeException("Wrote " + docBase + " docs, finish called with numDocs=" + numDocs);
     }
     indexWriter.finish(numDocs, fieldsStream.getFilePointer(), metaStream);
-    metaStream.writeVLong(numChunks);
     metaStream.writeVLong(numDirtyChunks);
+    metaStream.writeVLong(numDirtyDocs);
     CodecUtil.writeFooter(metaStream);
     CodecUtil.writeFooter(fieldsStream);
     assert bufferedDocs.size() == 0;
@@ -632,8 +633,8 @@
         }
         
         // since we bulk merged all chunks, we inherit any dirty ones from this segment.
-        numChunks += matchingFieldsReader.getNumChunks();
         numDirtyChunks += matchingFieldsReader.getNumDirtyChunks();
+        numDirtyDocs += matchingFieldsReader.getNumDirtyDocs();
       } else {
         // optimized merge, we copy serialized (but decompressed) bytes directly
         // even on simple docs (1 stored field), it seems to help by about 20%
@@ -669,7 +670,7 @@
   boolean tooDirty(CompressingStoredFieldsReader candidate) {
     // more than 1% dirty, or more than hard limit of 1024 dirty chunks
     return candidate.getNumDirtyChunks() > 1024 || 
-           candidate.getNumDirtyChunks() * 100 > candidate.getNumChunks();
+           candidate.getNumDirtyDocs() * 100 > candidate.getNumDocs();
   }
 
   private static class CompressingStoredFieldsMergeSub extends DocIDMerger.Sub {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
index d3bdc06..0d555a5 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
@@ -85,8 +85,8 @@
   private final int numDocs;
   private boolean closed;
   private final BlockPackedReaderIterator reader;
-  private final long numChunks; // number of compressed blocks written
   private final long numDirtyChunks; // number of incomplete compressed blocks written
+  private final long numDirtyDocs; // cumulative number of missing docs in incomplete chunks
   private final long maxPointer; // end of the data section
 
   // used by clone
@@ -101,8 +101,8 @@
     this.numDocs = reader.numDocs;
     this.reader = new BlockPackedReaderIterator(vectorsStream, packedIntsVersion, PACKED_BLOCK_SIZE, 0);
     this.version = reader.version;
-    this.numChunks = reader.numChunks;
     this.numDirtyChunks = reader.numDirtyChunks;
+    this.numDirtyDocs = reader.numDirtyDocs;
     this.maxPointer = reader.maxPointer;
     this.closed = false;
   }
@@ -178,15 +178,13 @@
       this.maxPointer = maxPointer;
 
       if (version >= VERSION_META) {
-        numChunks = metaIn.readVLong();
         numDirtyChunks = metaIn.readVLong();
+        numDirtyDocs = metaIn.readVLong();
       } else {
-        vectorsStream.seek(maxPointer);
-        numChunks = vectorsStream.readVLong();
-        numDirtyChunks = vectorsStream.readVLong();
-      }
-      if (numDirtyChunks > numChunks) {
-        throw new CorruptIndexException("invalid chunk counts: dirty=" + numDirtyChunks + ", total=" + numChunks, vectorsStream);
+        // Old versions of this format did not record numDirtyDocs. Since bulk
+        // merges are disabled on version increments anyway, we make no effort
+        // to get valid values of numDirtyChunks and numDirtyDocs.
+        numDirtyChunks = numDirtyDocs = -1;
       }
 
       decompressor = compressionMode.newDecompressor();
@@ -240,14 +238,26 @@
     return maxPointer;
   }
   
-  long getNumChunks() {
-    return numChunks;
+  long getNumDirtyDocs() {
+    if (version != VERSION_CURRENT) {
+      throw new IllegalStateException("getNumDirtyDocs should only ever get called when the reader is on the current version");
+    }
+    assert numDirtyDocs >= 0;
+    return numDirtyDocs;
   }
   
   long getNumDirtyChunks() {
+    if (version != VERSION_CURRENT) {
+      throw new IllegalStateException("getNumDirtyChunks should only ever get called when the reader is on the current version");
+    }
+    assert numDirtyChunks >= 0;
     return numDirtyChunks;
   }
 
+  int getNumDocs() {
+    return numDocs;
+  }
+
   /**
    * @throws AlreadyClosedException if this TermVectorsReader is closed
    */
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
index 1232c46..4c348cd 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
@@ -87,9 +87,9 @@
   private final CompressionMode compressionMode;
   private final Compressor compressor;
   private final int chunkSize;
-  
-  private long numChunks; // number of compressed blocks written
+
   private long numDirtyChunks; // number of incomplete compressed blocks written
+  private long numDirtyDocs; // cumulative number of missing docs in incomplete chunks
 
   /** a pending doc */
   private class DocData {
@@ -376,7 +376,6 @@
     curDoc = null;
     curField = null;
     termSuffixes.reset();
-    numChunks++;
   }
 
   private int flushNumFields(int chunkDocs) throws IOException {
@@ -650,15 +649,17 @@
   @Override
   public void finish(FieldInfos fis, int numDocs) throws IOException {
     if (!pendingDocs.isEmpty()) {
-      flush();
       numDirtyChunks++; // incomplete: we had to force this flush
+      final long expectedChunkDocs = Math.min(MAX_DOCUMENTS_PER_CHUNK, (long) ((double) chunkSize / termSuffixes.size() * pendingDocs.size()));
+      numDirtyDocs += expectedChunkDocs - pendingDocs.size();
+      flush();
     }
     if (numDocs != this.numDocs) {
       throw new RuntimeException("Wrote " + this.numDocs + " docs, finish called with numDocs=" + numDocs);
     }
     indexWriter.finish(numDocs, vectorsStream.getFilePointer(), metaStream);
-    metaStream.writeVLong(numChunks);
     metaStream.writeVLong(numDirtyChunks);
+    metaStream.writeVLong(numDirtyDocs);
     CodecUtil.writeFooter(metaStream);
     CodecUtil.writeFooter(vectorsStream);
   }
@@ -822,8 +823,8 @@
         }
         
         // since we bulk merged all chunks, we inherit any dirty ones from this segment.
-        numChunks += matchingVectorsReader.getNumChunks();
         numDirtyChunks += matchingVectorsReader.getNumDirtyChunks();
+        numDirtyDocs += matchingVectorsReader.getNumDirtyDocs();
       } else {        
         // naive merge...
         if (vectorsReader != null) {
@@ -858,7 +859,7 @@
   boolean tooDirty(CompressingTermVectorsReader candidate) {
     // more than 1% dirty, or more than hard limit of 1024 dirty chunks
     return candidate.getNumDirtyChunks() > 1024 || 
-           candidate.getNumDirtyChunks() * 100 > candidate.getNumChunks();
+           candidate.getNumDirtyDocs() * 100 > candidate.getNumDocs();
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene87/BugfixDeflater_JDK8252739.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene87/BugfixDeflater_JDK8252739.java
index 582715e..7125123 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene87/BugfixDeflater_JDK8252739.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene87/BugfixDeflater_JDK8252739.java
@@ -21,6 +21,7 @@
 import java.util.zip.Deflater;

 import java.util.zip.Inflater;

 

+import org.apache.lucene.util.BytesRefBuilder;

 import org.apache.lucene.util.SuppressForbidden;

 

 /**

@@ -39,16 +40,14 @@
    * on a {@code Deflater}.

    * */

   @SuppressForbidden(reason = "Works around bug, so it must call forbidden method")

-  public static BugfixDeflater_JDK8252739 createBugfix(Deflater deflater, int dictLength) {

-    if (dictLength < 0) {

-      throw new IllegalArgumentException("dictLength must be >= 0");

-    }

+  public static BugfixDeflater_JDK8252739 createBugfix(Deflater deflater) {

     if (IS_BUGGY_JDK) {

-      final byte[] dictBytesScratch = new byte[dictLength];

+      final BytesRefBuilder dictBytesScratch = new BytesRefBuilder();

       return (dictBytes, off, len) -> {

         if (off > 0) {

-          System.arraycopy(dictBytes, off, dictBytesScratch, 0, len);

-          deflater.setDictionary(dictBytesScratch, 0, len);

+          dictBytesScratch.grow(len);

+          System.arraycopy(dictBytes, off, dictBytesScratch.bytes(), 0, len);

+          deflater.setDictionary(dictBytesScratch.bytes(), 0, len);

         } else {

           deflater.setDictionary(dictBytes, off, len);

         }

diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene87/DeflateWithPresetDictCompressionMode.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene87/DeflateWithPresetDictCompressionMode.java
index 4269740..5b54f7d 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene87/DeflateWithPresetDictCompressionMode.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene87/DeflateWithPresetDictCompressionMode.java
@@ -39,20 +39,20 @@
  */
 public final class DeflateWithPresetDictCompressionMode extends CompressionMode {
 
-  private final int dictLength, subBlockLength;
+  // Shoot for 10 sub blocks
+  private static final int NUM_SUB_BLOCKS = 10;
+  // And a dictionary whose size is about 6x smaller than sub blocks
+  private static final int DICT_SIZE_FACTOR = 6;
 
   /** Sole constructor. */
-  public DeflateWithPresetDictCompressionMode(int dictLength, int subBlockLength) {
-    this.dictLength = dictLength;
-    this.subBlockLength = subBlockLength;
-  }
+  public DeflateWithPresetDictCompressionMode() {}
 
   @Override
   public Compressor newCompressor() {
     // notes:
     // 3 is the highest level that doesn't have lazy match evaluation
     // 6 is the default, higher than that is just a waste of cpu
-    return new DeflateWithPresetDictCompressor(6, dictLength, subBlockLength);
+    return new DeflateWithPresetDictCompressor(6);
   }
 
   @Override
@@ -155,18 +155,15 @@
 
   private static class DeflateWithPresetDictCompressor extends Compressor {
 
-    private final int dictLength, blockLength;
     final Deflater compressor;
     final BugfixDeflater_JDK8252739 deflaterBugfix;
     byte[] compressed;
     boolean closed;
 
-    DeflateWithPresetDictCompressor(int level, int dictLength, int blockLength) {
+    DeflateWithPresetDictCompressor(int level) {
       compressor = new Deflater(level, true);
-      deflaterBugfix = BugfixDeflater_JDK8252739.createBugfix(compressor, dictLength);
+      deflaterBugfix = BugfixDeflater_JDK8252739.createBugfix(compressor);
       compressed = new byte[64];
-      this.dictLength = dictLength;
-      this.blockLength = blockLength;
     }
 
     private void doCompress(byte[] bytes, int off, int len, DataOutput out) throws IOException {
@@ -198,7 +195,8 @@
 
     @Override
     public void compress(byte[] bytes, int off, int len, DataOutput out) throws IOException {
-      final int dictLength = Math.min(this.dictLength, len);
+      final int dictLength = len / (NUM_SUB_BLOCKS * DICT_SIZE_FACTOR);
+      final int blockLength = (len - dictLength + NUM_SUB_BLOCKS - 1) / NUM_SUB_BLOCKS;
       out.writeVInt(dictLength);
       out.writeVInt(blockLength);
       final int end = off + len;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene87/LZ4WithPresetDictCompressionMode.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene87/LZ4WithPresetDictCompressionMode.java
index 0d10cfd..15b961d 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene87/LZ4WithPresetDictCompressionMode.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene87/LZ4WithPresetDictCompressionMode.java
@@ -36,17 +36,17 @@
  */
 public final class LZ4WithPresetDictCompressionMode extends CompressionMode {
 
-  private final int dictLength, subBlockLength;
+  // Shoot for 10 sub blocks
+  private static final int NUM_SUB_BLOCKS = 10;
+  // And a dictionary whose size is about 16x smaller than sub blocks
+  private static final int DICT_SIZE_FACTOR = 16;
 
   /** Sole constructor. */
-  public LZ4WithPresetDictCompressionMode(int dictLength, int subBlockLength) {
-    this.dictLength = dictLength;
-    this.subBlockLength = subBlockLength;
-  }
+  public LZ4WithPresetDictCompressionMode() {}
 
   @Override
   public Compressor newCompressor() {
-    return new LZ4WithPresetDictCompressor(dictLength, subBlockLength);
+    return new LZ4WithPresetDictCompressor();
   }
 
   @Override
@@ -147,18 +147,14 @@
 
   private static class LZ4WithPresetDictCompressor extends Compressor {
 
-    final int dictLength;
-    final int blockLength;
     final ByteBuffersDataOutput compressed;
     final LZ4.FastCompressionHashTable hashTable;
-    final byte[] buffer;
+    byte[] buffer;
 
-    LZ4WithPresetDictCompressor(int dictLength, int blockLength) {
+    LZ4WithPresetDictCompressor() {
       compressed = ByteBuffersDataOutput.newResettableInstance();
       hashTable = new LZ4.FastCompressionHashTable();
-      this.dictLength = dictLength;
-      this.blockLength = blockLength;
-      buffer = new byte[dictLength + blockLength];
+      buffer = BytesRef.EMPTY_BYTES;
     }
 
     private void doCompress(byte[] bytes, int dictLen, int len, DataOutput out) throws IOException {
@@ -170,7 +166,9 @@
 
     @Override
     public void compress(byte[] bytes, int off, int len, DataOutput out) throws IOException {
-      final int dictLength = Math.min(this.dictLength, len);
+      final int dictLength = len / (NUM_SUB_BLOCKS * DICT_SIZE_FACTOR);
+      final int blockLength = (len - dictLength + NUM_SUB_BLOCKS - 1) / NUM_SUB_BLOCKS;
+      buffer = ArrayUtil.grow(buffer, dictLength + blockLength);
       out.writeVInt(dictLength);
       out.writeVInt(blockLength);
       final int end = off + len;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene87/Lucene87StoredFieldsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene87/Lucene87StoredFieldsFormat.java
index 93ae5da..56648c7 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene87/Lucene87StoredFieldsFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene87/Lucene87StoredFieldsFormat.java
@@ -144,42 +144,23 @@
   StoredFieldsFormat impl(Mode mode) {
     switch (mode) {
       case BEST_SPEED:
-        return new CompressingStoredFieldsFormat("Lucene87StoredFieldsFastData", BEST_SPEED_MODE, BEST_SPEED_BLOCK_LENGTH, 512, 10);
+        return new CompressingStoredFieldsFormat("Lucene87StoredFieldsFastData", BEST_SPEED_MODE, BEST_SPEED_BLOCK_LENGTH, 1024, 10);
       case BEST_COMPRESSION:
-        return new CompressingStoredFieldsFormat("Lucene87StoredFieldsHighData", BEST_COMPRESSION_MODE, BEST_COMPRESSION_BLOCK_LENGTH, 512, 10);
+        return new CompressingStoredFieldsFormat("Lucene87StoredFieldsHighData", BEST_COMPRESSION_MODE, BEST_COMPRESSION_BLOCK_LENGTH, 4096, 10);
       default: throw new AssertionError();
     }
   }
 
-  // 8kB seems to be a good trade-off between higher compression rates by not
-  // having to fully bootstrap a dictionary, and indexing rate by not spending
-  // too much CPU initializing data-structures to find strings in this preset
-  // dictionary.
-  private static final int BEST_COMPRESSION_DICT_LENGTH = 8 * 1024;
-  // 48kB seems like a nice trade-off because it's small enough to keep
-  // retrieval fast, yet sub blocks can find strings in a window of 26kB of
-  // data on average (the window grows from 8kB to 32kB in the first 24kB, and
-  // then DEFLATE can use 32kB for the last 24kB) which is close enough to the
-  // maximum window length of DEFLATE of 32kB.
-  private static final int BEST_COMPRESSION_SUB_BLOCK_LENGTH = 48 * 1024;
-  // We shoot for 10 sub blocks per block, which should hopefully amortize the
-  // space overhead of having the first 8kB compressed without any preset dict,
-  // and then remove 8kB in order to avoid creating a tiny 11th sub block if
-  // documents are small.
-  private static final int BEST_COMPRESSION_BLOCK_LENGTH = BEST_COMPRESSION_DICT_LENGTH + 10 * BEST_COMPRESSION_SUB_BLOCK_LENGTH - 8 * 1024;
+  // Shoot for 10 sub blocks of 48kB each.
+  private static final int BEST_COMPRESSION_BLOCK_LENGTH = 10 * 48 * 1024;
 
   /** Compression mode for {@link Mode#BEST_COMPRESSION} */
-  public static final CompressionMode BEST_COMPRESSION_MODE = new DeflateWithPresetDictCompressionMode(BEST_COMPRESSION_DICT_LENGTH, BEST_COMPRESSION_SUB_BLOCK_LENGTH);
+  public static final CompressionMode BEST_COMPRESSION_MODE = new DeflateWithPresetDictCompressionMode();
 
-  // We need to re-initialize the hash table for every sub block with the
-  // content of the dictionary, so we keep it small to not hurt indexing.
-  private static final int BEST_SPEED_DICT_LENGTH = 4 * 1024;
-  // 60kB so that dict_length + block_length == max window size
-  private static final int BEST_SPEED_SUB_BLOCK_LENGTH = 60 * 1024;
-  // shoot for 10 sub blocks in addition to the dictionary
-  private static final int BEST_SPEED_BLOCK_LENGTH = BEST_SPEED_DICT_LENGTH + 10 * BEST_SPEED_SUB_BLOCK_LENGTH - 8 * 1024;
+  // Shoot for 10 sub blocks of 60kB each.
+  private static final int BEST_SPEED_BLOCK_LENGTH = 10 * 60 * 1024;
 
   /** Compression mode for {@link Mode#BEST_SPEED} */
-  public static final CompressionMode BEST_SPEED_MODE = new LZ4WithPresetDictCompressionMode(BEST_SPEED_DICT_LENGTH, BEST_SPEED_SUB_BLOCK_LENGTH);
+  public static final CompressionMode BEST_SPEED_MODE = new LZ4WithPresetDictCompressionMode();
 
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortingStoredFieldsConsumer.java b/lucene/core/src/java/org/apache/lucene/index/SortingStoredFieldsConsumer.java
index 5753ba4..c0506a8 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortingStoredFieldsConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortingStoredFieldsConsumer.java
@@ -19,21 +19,65 @@
 
 import java.io.IOException;
 import java.io.Reader;
-import java.util.Map;
 import java.util.Objects;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.StoredFieldsReader;
 import org.apache.lucene.codecs.StoredFieldsWriter;
+import org.apache.lucene.codecs.compressing.CompressingStoredFieldsFormat;
+import org.apache.lucene.codecs.compressing.CompressionMode;
+import org.apache.lucene.codecs.compressing.Compressor;
+import org.apache.lucene.codecs.compressing.Decompressor;
 import org.apache.lucene.document.StoredField;
+import org.apache.lucene.store.DataInput;
+import org.apache.lucene.store.DataOutput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 
 final class SortingStoredFieldsConsumer extends StoredFieldsConsumer {
+
+  static final CompressionMode NO_COMPRESSION = new CompressionMode() {
+    @Override
+    public Compressor newCompressor() {
+      return new Compressor() {
+        @Override
+        public void close() throws IOException {}
+
+        @Override
+        public void compress(byte[] bytes, int off, int len, DataOutput out) throws IOException {
+          out.writeBytes(bytes, off, len);
+        }
+      };
+    }
+
+    @Override
+    public Decompressor newDecompressor() {
+      return new Decompressor() {
+        @Override
+        public void decompress(DataInput in, int originalLength, int offset, int length, BytesRef bytes)
+            throws IOException {
+          bytes.bytes = ArrayUtil.grow(bytes.bytes, length);
+          in.skipBytes(offset);
+          in.readBytes(bytes.bytes, 0, length);
+          bytes.offset = 0;
+          bytes.length = length;
+        }
+
+        @Override
+        public Decompressor clone() {
+          return this;
+        }
+      };
+    }
+  };
+  private static final StoredFieldsFormat TEMP_STORED_FIELDS_FORMAT = new CompressingStoredFieldsFormat(
+      "TempStoredFields", NO_COMPRESSION, 128*1024, 1, 10);
   TrackingTmpOutputDirectoryWrapper tmpDirectory;
 
   SortingStoredFieldsConsumer(Codec codec, Directory directory, SegmentInfo info) {
@@ -44,21 +88,14 @@
   protected void initStoredFieldsWriter() throws IOException {
     if (writer == null) {
       this.tmpDirectory = new TrackingTmpOutputDirectoryWrapper(directory);
-      this.writer = codec.storedFieldsFormat().fieldsWriter(tmpDirectory, info, IOContext.DEFAULT);
+      this.writer = TEMP_STORED_FIELDS_FORMAT.fieldsWriter(tmpDirectory, info, IOContext.DEFAULT);
     }
   }
 
   @Override
   void flush(SegmentWriteState state, Sorter.DocMap sortMap) throws IOException {
     super.flush(state, sortMap);
-    if (sortMap == null) {
-      // we're lucky the index is already sorted, just rename the temporary file and return
-      for (Map.Entry<String, String> entry : tmpDirectory.getTemporaryFiles().entrySet()) {
-        tmpDirectory.rename(entry.getValue(), entry.getKey());
-      }
-      return;
-    }
-    StoredFieldsReader reader = codec.storedFieldsFormat()
+    StoredFieldsReader reader = TEMP_STORED_FIELDS_FORMAT
         .fieldsReader(tmpDirectory, state.segmentInfo, state.fieldInfos, IOContext.DEFAULT);
     // Don't pull a merge instance, since merge instances optimize for
     // sequential access while we consume stored fields in random order here.
@@ -69,7 +106,7 @@
       CopyVisitor visitor = new CopyVisitor(sortWriter);
       for (int docID = 0; docID < state.segmentInfo.maxDoc(); docID++) {
         sortWriter.startDocument();
-        reader.visitDocument(sortMap.newToOld(docID), visitor);
+        reader.visitDocument(sortMap == null ? docID : sortMap.newToOld(docID), visitor);
         sortWriter.finishDocument();
       }
       sortWriter.finish(state.fieldInfos, state.segmentInfo.maxDoc());
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortingTermVectorsConsumer.java b/lucene/core/src/java/org/apache/lucene/index/SortingTermVectorsConsumer.java
index 5162f39..0499081 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortingTermVectorsConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortingTermVectorsConsumer.java
@@ -23,8 +23,10 @@
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.NormsProducer;
+import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.TermVectorsReader;
 import org.apache.lucene.codecs.TermVectorsWriter;
+import org.apache.lucene.codecs.compressing.CompressingTermVectorsFormat;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FlushInfo;
@@ -35,6 +37,9 @@
 import org.apache.lucene.util.IntBlockPool;
 
 final class SortingTermVectorsConsumer extends TermVectorsConsumer {
+
+  private static final TermVectorsFormat TEMP_TERM_VECTORS_FORMAT = new CompressingTermVectorsFormat(
+      "TempTermVectors", "", SortingStoredFieldsConsumer.NO_COMPRESSION, 8*1024, 10);
   TrackingTmpOutputDirectoryWrapper tmpDirectory;
 
   SortingTermVectorsConsumer(final IntBlockPool.Allocator intBlockAllocator, final ByteBlockPool.Allocator byteBlockAllocator, Directory directory, SegmentInfo info, Codec codec) {
@@ -45,22 +50,17 @@
   void flush(Map<String, TermsHashPerField> fieldsToFlush, final SegmentWriteState state, Sorter.DocMap sortMap, NormsProducer norms) throws IOException {
     super.flush(fieldsToFlush, state, sortMap, norms);
     if (tmpDirectory != null) {
-      if (sortMap == null) {
-        // we're lucky the index is already sorted, just rename the temporary file and return
-        for (Map.Entry<String, String> entry : tmpDirectory.getTemporaryFiles().entrySet()) {
-          tmpDirectory.rename(entry.getValue(), entry.getKey());
-        }
-        return;
-      }
-      TermVectorsReader reader = codec.termVectorsFormat()
+      TermVectorsReader reader = TEMP_TERM_VECTORS_FORMAT
           .vectorsReader(tmpDirectory, state.segmentInfo, state.fieldInfos, IOContext.DEFAULT);
-      TermVectorsReader mergeReader = reader.getMergeInstance();
+      // Don't pull a merge instance, since merge instances optimize for
+      // sequential access while term vectors will likely be accessed in random
+      // order here.
       TermVectorsWriter writer = codec.termVectorsFormat()
           .vectorsWriter(state.directory, state.segmentInfo, IOContext.DEFAULT);
       try {
         reader.checkIntegrity();
         for (int docID = 0; docID < state.segmentInfo.maxDoc(); docID++) {
-          Fields vectors = mergeReader.get(sortMap.newToOld(docID));
+          Fields vectors = reader.get(sortMap == null ? docID : sortMap.newToOld(docID));
           writeTermVectors(writer, vectors, state.fieldInfos);
         }
         writer.finish(state.fieldInfos, state.segmentInfo.maxDoc());
@@ -77,7 +77,7 @@
     if (writer == null) {
       IOContext context = new IOContext(new FlushInfo(lastDocID, bytesUsed.get()));
       tmpDirectory = new TrackingTmpOutputDirectoryWrapper(directory);
-      writer = codec.termVectorsFormat().vectorsWriter(tmpDirectory, info, context);
+      writer = TEMP_TERM_VECTORS_FORMAT.vectorsWriter(tmpDirectory, info, context);
       lastDocID = 0;
     }
   }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/CharStream.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/CharStream.java
index 443117f..81567ff 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/CharStream.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/CharStream.java
@@ -1,4 +1,4 @@
-/* Generated By:JavaCC: Do not edit this line. CharStream.java Version 5.0 */
+/* Generated By:JavaCC: Do not edit this line. CharStream.java Version 7.0 */
 /* JavaCCOptions:STATIC=false,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */
 package org.apache.lucene.queryparser.classic;
 
@@ -111,5 +111,10 @@
    */
   void Done();
 
+
+  void setTabSize(int i);
+  int getTabSize();
+  boolean getTrackLineColumn();
+  void setTrackLineColumn(boolean trackLineColumn);
 }
-/* JavaCC - OriginalChecksum=30b94cad7b10d0d81e3a59a1083939d0 (do not edit this line) */
+/* (filtered)*/
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/FastCharStream.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/FastCharStream.java
index 9439bc0..e01b90a 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/FastCharStream.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/FastCharStream.java
@@ -138,4 +138,24 @@
   public final int getBeginLine() {
     return 1;
   }
+
+  @Override
+  public void setTabSize(int i) {
+    throw new RuntimeException("Tab size not implemented.");
+  }
+
+  @Override
+  public int getTabSize() {
+    throw new RuntimeException("Tab size not implemented.");
+  }
+
+  @Override
+  public boolean getTrackLineColumn() {
+    return false;
+  }
+
+  @Override
+  public void setTrackLineColumn(boolean trackLineColumn) {
+    throw new RuntimeException("Line/Column tracking not implemented.");
+  }
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/ParseException.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/ParseException.java
index 3c02be3..54a2d35 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/ParseException.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/ParseException.java
@@ -1,5 +1,5 @@
-/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 5.0 */
-/* JavaCCOptions:KEEP_LINE_COL=null */
+/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 7.0 */
+/* JavaCCOptions:KEEP_LINE_COLUMN=true */
 package org.apache.lucene.queryparser.classic;
 
 /**
@@ -21,6 +21,11 @@
   private static final long serialVersionUID = 1L;
 
   /**
+   * The end of line string for this machine.
+   */
+  protected static String EOL = System.getProperty("line.separator", "\n");
+
+  /**
    * This constructor is used by the method "generateParseException"
    * in the generated parser.  Calling this constructor generates
    * a new object of this type with the fields "currentToken",
@@ -88,7 +93,7 @@
   private static String initialise(Token currentToken,
                            int[][] expectedTokenSequences,
                            String[] tokenImage) {
-    String eol = System.getProperty("line.separator", "\n");
+
     StringBuilder expected = new StringBuilder();
     int maxSize = 0;
     for (int i = 0; i < expectedTokenSequences.length; i++) {
@@ -101,7 +106,7 @@
       if (expectedTokenSequences[i][expectedTokenSequences[i].length - 1] != 0) {
         expected.append("...");
       }
-      expected.append(eol).append("    ");
+      expected.append(EOL).append("    ");
     }
     String retval = "Encountered \"";
     Token tok = currentToken.next;
@@ -118,20 +123,23 @@
       tok = tok.next;
     }
     retval += "\" at line " + currentToken.next.beginLine + ", column " + currentToken.next.beginColumn;
-    retval += "." + eol;
-    if (expectedTokenSequences.length == 1) {
-      retval += "Was expecting:" + eol + "    ";
+    retval += "." + EOL;
+    
+    
+    if (expectedTokenSequences.length == 0) {
+        // Nothing to add here
     } else {
-      retval += "Was expecting one of:" + eol + "    ";
+        if (expectedTokenSequences.length == 1) {
+          retval += "Was expecting:" + EOL + "    ";
+        } else {
+          retval += "Was expecting one of:" + EOL + "    ";
+        }
+        retval += expected.toString();
     }
-    retval += expected.toString();
+    
     return retval;
   }
 
-  /**
-   * The end of line string for this machine.
-   */
-  protected String eol = System.getProperty("line.separator", "\n");
 
   /**
    * Used to convert raw characters to their escaped version
@@ -144,8 +152,6 @@
       for (int i = 0; i < str.length(); i++) {
         switch (str.charAt(i))
         {
-           case 0 :
-              continue;
            case '\b':
               retval.append("\\b");
               continue;
@@ -184,4 +190,4 @@
    }
 
 }
-/* JavaCC - OriginalChecksum=b187d97d5bb75c3fc63d642c1c26ac6e (do not edit this line) */
+/* (filtered)*/
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParser.java
index dcdfa6e..f7da7e0 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParser.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParser.java
@@ -1,3 +1,4 @@
+/* QueryParser.java */
 /* Generated By:JavaCC: Do not edit this line. QueryParser.java */
 package org.apache.lucene.queryparser.classic;
 
@@ -148,84 +149,88 @@
 
 // *   Query  ::= ( Clause )*
 // *   Clause ::= ["+", "-"] [<TERM> ":"] ( <TERM> | "(" Query ")" )
-  final public int Conjunction() throws ParseException {
-  int ret = CONJ_NONE;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+  final public 
+int Conjunction() throws ParseException {int ret = CONJ_NONE;
+    switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
     case AND:
-    case OR:
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case AND:
+    case OR:{
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case AND:{
         jj_consume_token(AND);
-            ret = CONJ_AND;
+ret = CONJ_AND;
         break;
-      case OR:
+        }
+      case OR:{
         jj_consume_token(OR);
-              ret = CONJ_OR;
+ret = CONJ_OR;
         break;
+        }
       default:
         jj_la1[0] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
       break;
+      }
     default:
       jj_la1[1] = jj_gen;
       ;
     }
-    {if (true) return ret;}
+{if ("" != null) return ret;}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public int Modifiers() throws ParseException {
-  int ret = MOD_NONE;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+  final public int Modifiers() throws ParseException {int ret = MOD_NONE;
+    switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
     case NOT:
     case PLUS:
-    case MINUS:
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case PLUS:
+    case MINUS:{
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case PLUS:{
         jj_consume_token(PLUS);
-             ret = MOD_REQ;
+ret = MOD_REQ;
         break;
-      case MINUS:
+        }
+      case MINUS:{
         jj_consume_token(MINUS);
-                ret = MOD_NOT;
+ret = MOD_NOT;
         break;
-      case NOT:
+        }
+      case NOT:{
         jj_consume_token(NOT);
-              ret = MOD_NOT;
+ret = MOD_NOT;
         break;
+        }
       default:
         jj_la1[2] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
       break;
+      }
     default:
       jj_la1[3] = jj_gen;
       ;
     }
-    {if (true) return ret;}
+{if ("" != null) return ret;}
     throw new Error("Missing return statement in function");
-  }
+}
 
 // This makes sure that there is no garbage after the query string
-  final public Query TopLevelQuery(String field) throws ParseException {
-  Query q;
+  final public Query TopLevelQuery(String field) throws ParseException {Query q;
     q = Query(field);
     jj_consume_token(0);
-    {if (true) return q;}
+{if ("" != null) return q;}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public Query Query(String field) throws ParseException {
-  List<BooleanClause> clauses = new ArrayList<BooleanClause>();
+  final public Query Query(String field) throws ParseException {List<BooleanClause> clauses = new ArrayList<BooleanClause>();
   Query q, firstQuery=null;
   int conj, mods;
     if (jj_2_1(2)) {
       firstQuery = MultiTerm(field, clauses);
     } else {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
       case NOT:
       case PLUS:
       case MINUS:
@@ -239,14 +244,15 @@
       case REGEXPTERM:
       case RANGEIN_START:
       case RANGEEX_START:
-      case NUMBER:
+      case NUMBER:{
         mods = Modifiers();
         q = Clause(field);
-        addClause(clauses, CONJ_NONE, mods, q);
+addClause(clauses, CONJ_NONE, mods, q);
         if (mods == MOD_NONE) {
           firstQuery = q;
         }
         break;
+        }
       default:
         jj_la1[4] = jj_gen;
         jj_consume_token(-1);
@@ -255,7 +261,7 @@
     }
     label_1:
     while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
       case AND:
       case OR:
       case NOT:
@@ -271,9 +277,10 @@
       case REGEXPTERM:
       case RANGEIN_START:
       case RANGEEX_START:
-      case NUMBER:
+      case NUMBER:{
         ;
         break;
+        }
       default:
         jj_la1[5] = jj_gen;
         break label_1;
@@ -281,7 +288,7 @@
       if (jj_2_2(2)) {
         MultiTerm(field, clauses);
       } else {
-        switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+        switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
         case AND:
         case OR:
         case NOT:
@@ -297,12 +304,13 @@
         case REGEXPTERM:
         case RANGEIN_START:
         case RANGEEX_START:
-        case NUMBER:
+        case NUMBER:{
           conj = Conjunction();
           mods = Modifiers();
           q = Clause(field);
-        addClause(clauses, conj, mods, q);
+addClause(clauses, conj, mods, q);
           break;
+          }
         default:
           jj_la1[6] = jj_gen;
           jj_consume_token(-1);
@@ -310,29 +318,30 @@
         }
       }
     }
-    if (clauses.size() == 1 && firstQuery != null) {
-      {if (true) return firstQuery;}
+if (clauses.size() == 1 && firstQuery != null) {
+      {if ("" != null) return firstQuery;}
     } else {
-      {if (true) return getBooleanQuery(clauses);}
+      {if ("" != null) return getBooleanQuery(clauses);}
     }
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public Query Clause(String field) throws ParseException {
-  Query q;
+  final public Query Clause(String field) throws ParseException {Query q;
   Token fieldToken=null, boost=null;
     if (jj_2_3(2)) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case TERM:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case TERM:{
         fieldToken = jj_consume_token(TERM);
         jj_consume_token(COLON);
-                                 field=discardEscapeChar(fieldToken.image);
+field=discardEscapeChar(fieldToken.image);
         break;
-      case STAR:
+        }
+      case STAR:{
         jj_consume_token(STAR);
         jj_consume_token(COLON);
-                        field="*";
+field="*";
         break;
+        }
       default:
         jj_la1[7] = jj_gen;
         jj_consume_token(-1);
@@ -341,7 +350,7 @@
     } else {
       ;
     }
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+    switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
     case BAREOPER:
     case STAR:
     case QUOTED:
@@ -351,34 +360,36 @@
     case REGEXPTERM:
     case RANGEIN_START:
     case RANGEEX_START:
-    case NUMBER:
+    case NUMBER:{
       q = Term(field);
       break;
-    case LPAREN:
+      }
+    case LPAREN:{
       jj_consume_token(LPAREN);
       q = Query(field);
       jj_consume_token(RPAREN);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case CARAT:{
         jj_consume_token(CARAT);
         boost = jj_consume_token(NUMBER);
         break;
+        }
       default:
         jj_la1[8] = jj_gen;
         ;
       }
       break;
+      }
     default:
       jj_la1[9] = jj_gen;
       jj_consume_token(-1);
       throw new ParseException();
     }
-    {if (true) return handleBoost(q, boost);}
+{if ("" != null) return handleBoost(q, boost);}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public Query Term(String field) throws ParseException {
-  Token term, boost=null, fuzzySlop=null, goop1, goop2;
+  final public Query Term(String field) throws ParseException {Token term, boost=null, fuzzySlop=null, goop1, goop2;
   boolean prefix = false;
   boolean wildcard = false;
   boolean fuzzy = false;
@@ -386,157 +397,181 @@
   boolean startInc=false;
   boolean endInc=false;
   Query q;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+    switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
     case BAREOPER:
     case STAR:
     case TERM:
     case PREFIXTERM:
     case WILDTERM:
     case REGEXPTERM:
-    case NUMBER:
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case TERM:
+    case NUMBER:{
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case TERM:{
         term = jj_consume_token(TERM);
         break;
-      case STAR:
+        }
+      case STAR:{
         term = jj_consume_token(STAR);
-                      wildcard=true;
+wildcard=true;
         break;
-      case PREFIXTERM:
+        }
+      case PREFIXTERM:{
         term = jj_consume_token(PREFIXTERM);
-                            prefix=true;
+prefix=true;
         break;
-      case WILDTERM:
+        }
+      case WILDTERM:{
         term = jj_consume_token(WILDTERM);
-                          wildcard=true;
+wildcard=true;
         break;
-      case REGEXPTERM:
+        }
+      case REGEXPTERM:{
         term = jj_consume_token(REGEXPTERM);
-                            regexp=true;
+regexp=true;
         break;
-      case NUMBER:
+        }
+      case NUMBER:{
         term = jj_consume_token(NUMBER);
         break;
-      case BAREOPER:
+        }
+      case BAREOPER:{
         term = jj_consume_token(BAREOPER);
-                          term.image = term.image.substring(0,1);
+term.image = term.image.substring(0,1);
         break;
+        }
       default:
         jj_la1[10] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
       case CARAT:
-      case FUZZY_SLOP:
-        switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-        case CARAT:
+      case FUZZY_SLOP:{
+        switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+        case CARAT:{
           jj_consume_token(CARAT);
           boost = jj_consume_token(NUMBER);
-          switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-          case FUZZY_SLOP:
+          switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+          case FUZZY_SLOP:{
             fuzzySlop = jj_consume_token(FUZZY_SLOP);
-                                                        fuzzy=true;
+fuzzy=true;
             break;
+            }
           default:
             jj_la1[11] = jj_gen;
             ;
           }
           break;
-        case FUZZY_SLOP:
+          }
+        case FUZZY_SLOP:{
           fuzzySlop = jj_consume_token(FUZZY_SLOP);
-                                 fuzzy=true;
-          switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-          case CARAT:
+fuzzy=true;
+          switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+          case CARAT:{
             jj_consume_token(CARAT);
             boost = jj_consume_token(NUMBER);
             break;
+            }
           default:
             jj_la1[12] = jj_gen;
             ;
           }
           break;
+          }
         default:
           jj_la1[13] = jj_gen;
           jj_consume_token(-1);
           throw new ParseException();
         }
         break;
+        }
       default:
         jj_la1[14] = jj_gen;
         ;
       }
-      q = handleBareTokenQuery(field, term, fuzzySlop, prefix, wildcard, fuzzy, regexp);
+q = handleBareTokenQuery(field, term, fuzzySlop, prefix, wildcard, fuzzy, regexp);
       break;
+      }
     case RANGEIN_START:
-    case RANGEEX_START:
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGEIN_START:
+    case RANGEEX_START:{
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case RANGEIN_START:{
         jj_consume_token(RANGEIN_START);
-                        startInc = true;
+startInc = true;
         break;
-      case RANGEEX_START:
+        }
+      case RANGEEX_START:{
         jj_consume_token(RANGEEX_START);
         break;
+        }
       default:
         jj_la1[15] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGE_GOOP:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case RANGE_GOOP:{
         goop1 = jj_consume_token(RANGE_GOOP);
         break;
-      case RANGE_QUOTED:
+        }
+      case RANGE_QUOTED:{
         goop1 = jj_consume_token(RANGE_QUOTED);
         break;
-      case RANGE_TO:
+        }
+      case RANGE_TO:{
         goop1 = jj_consume_token(RANGE_TO);
         break;
+        }
       default:
         jj_la1[16] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
       jj_consume_token(RANGE_TO);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGE_GOOP:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case RANGE_GOOP:{
         goop2 = jj_consume_token(RANGE_GOOP);
         break;
-      case RANGE_QUOTED:
+        }
+      case RANGE_QUOTED:{
         goop2 = jj_consume_token(RANGE_QUOTED);
         break;
-      case RANGE_TO:
+        }
+      case RANGE_TO:{
         goop2 = jj_consume_token(RANGE_TO);
         break;
+        }
       default:
         jj_la1[17] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGEIN_END:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case RANGEIN_END:{
         jj_consume_token(RANGEIN_END);
-                      endInc = true;
+endInc = true;
         break;
-      case RANGEEX_END:
+        }
+      case RANGEEX_END:{
         jj_consume_token(RANGEEX_END);
         break;
+        }
       default:
         jj_la1[18] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case CARAT:{
         jj_consume_token(CARAT);
         boost = jj_consume_token(NUMBER);
         break;
+        }
       default:
         jj_la1[19] = jj_gen;
         ;
       }
-      boolean startOpen=false;
+boolean startOpen=false;
       boolean endOpen=false;
       if (goop1.kind == RANGE_QUOTED) {
         goop1.image = goop1.image.substring(1, goop1.image.length()-1);
@@ -550,65 +585,71 @@
       }
       q = getRangeQuery(field, startOpen ? null : discardEscapeChar(goop1.image), endOpen ? null : discardEscapeChar(goop2.image), startInc, endInc);
       break;
-    case QUOTED:
+      }
+    case QUOTED:{
       term = jj_consume_token(QUOTED);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
       case CARAT:
-      case FUZZY_SLOP:
-        switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-        case CARAT:
+      case FUZZY_SLOP:{
+        switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+        case CARAT:{
           jj_consume_token(CARAT);
           boost = jj_consume_token(NUMBER);
-          switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-          case FUZZY_SLOP:
+          switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+          case FUZZY_SLOP:{
             fuzzySlop = jj_consume_token(FUZZY_SLOP);
-                                                        fuzzy=true;
+fuzzy=true;
             break;
+            }
           default:
             jj_la1[20] = jj_gen;
             ;
           }
           break;
-        case FUZZY_SLOP:
+          }
+        case FUZZY_SLOP:{
           fuzzySlop = jj_consume_token(FUZZY_SLOP);
-                                 fuzzy=true;
-          switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-          case CARAT:
+fuzzy=true;
+          switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+          case CARAT:{
             jj_consume_token(CARAT);
             boost = jj_consume_token(NUMBER);
             break;
+            }
           default:
             jj_la1[21] = jj_gen;
             ;
           }
           break;
+          }
         default:
           jj_la1[22] = jj_gen;
           jj_consume_token(-1);
           throw new ParseException();
         }
         break;
+        }
       default:
         jj_la1[23] = jj_gen;
         ;
       }
-      q = handleQuotedTerm(field, term, fuzzySlop);
+q = handleQuotedTerm(field, term, fuzzySlop);
       break;
+      }
     default:
       jj_la1[24] = jj_gen;
       jj_consume_token(-1);
       throw new ParseException();
     }
-    {if (true) return handleBoost(q, boost);}
+{if ("" != null) return handleBoost(q, boost);}
     throw new Error("Missing return statement in function");
-  }
+}
 
 /** Returns the first query if splitOnWhitespace=true or otherwise the entire produced query */
-  final public Query MultiTerm(String field, List<BooleanClause> clauses) throws ParseException {
-  Token text, whitespace, followingText;
+  final public Query MultiTerm(String field, List<BooleanClause> clauses) throws ParseException {Token text, whitespace, followingText;
   Query firstQuery = null;
     text = jj_consume_token(TERM);
-    if (splitOnWhitespace) {
+if (splitOnWhitespace) {
       firstQuery = getFieldQuery(field, discardEscapeChar(text.image), false);
       addClause(clauses, CONJ_NONE, MOD_NONE, firstQuery);
     }
@@ -621,7 +662,7 @@
     label_2:
     while (true) {
       followingText = jj_consume_token(TERM);
-      if (splitOnWhitespace) {
+if (splitOnWhitespace) {
         Query q = getFieldQuery(field, discardEscapeChar(followingText.image), false);
         addClause(clauses, CONJ_NONE, MOD_NONE, q);
       } else { // build up the text to send to analysis
@@ -633,36 +674,40 @@
         break label_2;
       }
     }
-    if (splitOnWhitespace == false) {
+if (splitOnWhitespace == false) {
       firstQuery = getFieldQuery(field, discardEscapeChar(text.image), false);
       addMultiTermClauses(clauses, firstQuery);
     }
-    {if (true) return firstQuery;}
+    {if ("" != null) return firstQuery;}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  private boolean jj_2_1(int xla) {
+  private boolean jj_2_1(int xla)
+ {
     jj_la = xla; jj_lastpos = jj_scanpos = token;
-    try { return !jj_3_1(); }
+    try { return (!jj_3_1()); }
     catch(LookaheadSuccess ls) { return true; }
     finally { jj_save(0, xla); }
   }
 
-  private boolean jj_2_2(int xla) {
+  private boolean jj_2_2(int xla)
+ {
     jj_la = xla; jj_lastpos = jj_scanpos = token;
-    try { return !jj_3_2(); }
+    try { return (!jj_3_2()); }
     catch(LookaheadSuccess ls) { return true; }
     finally { jj_save(1, xla); }
   }
 
-  private boolean jj_2_3(int xla) {
+  private boolean jj_2_3(int xla)
+ {
     jj_la = xla; jj_lastpos = jj_scanpos = token;
-    try { return !jj_3_3(); }
+    try { return (!jj_3_3()); }
     catch(LookaheadSuccess ls) { return true; }
     finally { jj_save(2, xla); }
   }
 
-  private boolean jj_3R_3() {
+  private boolean jj_3R_3()
+ {
     if (jj_scan_token(TERM)) return true;
     jj_lookingAhead = true;
     jj_semLA = getToken(1).kind == TERM && allowedPostMultiTerm(getToken(2).kind);
@@ -677,38 +722,45 @@
     return false;
   }
 
-  private boolean jj_3R_6() {
+  private boolean jj_3R_6()
+ {
     return false;
   }
 
-  private boolean jj_3R_5() {
+  private boolean jj_3R_5()
+ {
     if (jj_scan_token(STAR)) return true;
     if (jj_scan_token(COLON)) return true;
     return false;
   }
 
-  private boolean jj_3R_4() {
+  private boolean jj_3R_4()
+ {
     if (jj_scan_token(TERM)) return true;
     if (jj_scan_token(COLON)) return true;
     return false;
   }
 
-  private boolean jj_3_2() {
+  private boolean jj_3_2()
+ {
     if (jj_3R_3()) return true;
     return false;
   }
 
-  private boolean jj_3_1() {
+  private boolean jj_3_1()
+ {
     if (jj_3R_3()) return true;
     return false;
   }
 
-  private boolean jj_3R_7() {
+  private boolean jj_3R_7()
+ {
     if (jj_scan_token(TERM)) return true;
     return false;
   }
 
-  private boolean jj_3_3() {
+  private boolean jj_3_3()
+ {
     Token xsp;
     xsp = jj_scanpos;
     if (jj_3R_4()) {
@@ -735,132 +787,133 @@
   static private int[] jj_la1_0;
   static private int[] jj_la1_1;
   static {
-      jj_la1_init_0();
-      jj_la1_init_1();
-   }
-   private static void jj_la1_init_0() {
-      jj_la1_0 = new int[] {0x300,0x300,0x1c00,0x1c00,0xfda7c00,0xfda7f00,0xfda7f00,0x120000,0x40000,0xfda6000,0x9d22000,0x200000,0x40000,0x240000,0x240000,0x6000000,0x90000000,0x90000000,0x60000000,0x40000,0x200000,0x40000,0x240000,0x240000,0xfda2000,};
-   }
-   private static void jj_la1_init_1() {
-      jj_la1_1 = new int[] {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1,0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,};
-   }
+       jj_la1_init_0();
+       jj_la1_init_1();
+    }
+    private static void jj_la1_init_0() {
+       jj_la1_0 = new int[] {0x300,0x300,0x1c00,0x1c00,0xfda7c00,0xfda7f00,0xfda7f00,0x120000,0x40000,0xfda6000,0x9d22000,0x200000,0x40000,0x240000,0x240000,0x6000000,0x90000000,0x90000000,0x60000000,0x40000,0x200000,0x40000,0x240000,0x240000,0xfda2000,};
+    }
+    private static void jj_la1_init_1() {
+       jj_la1_1 = new int[] {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1,0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,};
+    }
   final private JJCalls[] jj_2_rtns = new JJCalls[3];
   private boolean jj_rescan = false;
   private int jj_gc = 0;
 
   /** Constructor with user supplied CharStream. */
   protected QueryParser(CharStream stream) {
-    token_source = new QueryParserTokenManager(stream);
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 25; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source = new QueryParserTokenManager(stream);
+     token = new Token();
+     jj_ntk = -1;
+     jj_gen = 0;
+     for (int i = 0; i < 25; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   /** Reinitialise. */
   public void ReInit(CharStream stream) {
-    token_source.ReInit(stream);
-    token = new Token();
-    jj_ntk = -1;
-    jj_lookingAhead = false;
-    jj_gen = 0;
-    for (int i = 0; i < 25; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source.ReInit(stream);
+     token = new Token();
+     jj_ntk = -1;
+     jj_lookingAhead = false;
+     jj_gen = 0;
+     for (int i = 0; i < 25; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   /** Constructor with generated Token Manager. */
   protected QueryParser(QueryParserTokenManager tm) {
-    token_source = tm;
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 25; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source = tm;
+     token = new Token();
+     jj_ntk = -1;
+     jj_gen = 0;
+     for (int i = 0; i < 25; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   /** Reinitialise. */
   public void ReInit(QueryParserTokenManager tm) {
-    token_source = tm;
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 25; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source = tm;
+     token = new Token();
+     jj_ntk = -1;
+     jj_gen = 0;
+     for (int i = 0; i < 25; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   private Token jj_consume_token(int kind) throws ParseException {
-    Token oldToken;
-    if ((oldToken = token).next != null) token = token.next;
-    else token = token.next = token_source.getNextToken();
-    jj_ntk = -1;
-    if (token.kind == kind) {
-      jj_gen++;
-      if (++jj_gc > 100) {
-        jj_gc = 0;
-        for (int i = 0; i < jj_2_rtns.length; i++) {
-          JJCalls c = jj_2_rtns[i];
-          while (c != null) {
-            if (c.gen < jj_gen) c.first = null;
-            c = c.next;
-          }
-        }
-      }
-      return token;
-    }
-    token = oldToken;
-    jj_kind = kind;
-    throw generateParseException();
+     Token oldToken;
+     if ((oldToken = token).next != null) token = token.next;
+     else token = token.next = token_source.getNextToken();
+     jj_ntk = -1;
+     if (token.kind == kind) {
+       jj_gen++;
+       if (++jj_gc > 100) {
+         jj_gc = 0;
+         for (int i = 0; i < jj_2_rtns.length; i++) {
+           JJCalls c = jj_2_rtns[i];
+           while (c != null) {
+             if (c.gen < jj_gen) c.first = null;
+             c = c.next;
+           }
+         }
+       }
+       return token;
+     }
+     token = oldToken;
+     jj_kind = kind;
+     throw generateParseException();
   }
 
+  @SuppressWarnings("serial")
   static private final class LookaheadSuccess extends java.lang.Error { }
   final private LookaheadSuccess jj_ls = new LookaheadSuccess();
   private boolean jj_scan_token(int kind) {
-    if (jj_scanpos == jj_lastpos) {
-      jj_la--;
-      if (jj_scanpos.next == null) {
-        jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.getNextToken();
-      } else {
-        jj_lastpos = jj_scanpos = jj_scanpos.next;
-      }
-    } else {
-      jj_scanpos = jj_scanpos.next;
-    }
-    if (jj_rescan) {
-      int i = 0; Token tok = token;
-      while (tok != null && tok != jj_scanpos) { i++; tok = tok.next; }
-      if (tok != null) jj_add_error_token(kind, i);
-    }
-    if (jj_scanpos.kind != kind) return true;
-    if (jj_la == 0 && jj_scanpos == jj_lastpos) throw jj_ls;
-    return false;
+     if (jj_scanpos == jj_lastpos) {
+       jj_la--;
+       if (jj_scanpos.next == null) {
+         jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.getNextToken();
+       } else {
+         jj_lastpos = jj_scanpos = jj_scanpos.next;
+       }
+     } else {
+       jj_scanpos = jj_scanpos.next;
+     }
+     if (jj_rescan) {
+       int i = 0; Token tok = token;
+       while (tok != null && tok != jj_scanpos) { i++; tok = tok.next; }
+       if (tok != null) jj_add_error_token(kind, i);
+     }
+     if (jj_scanpos.kind != kind) return true;
+     if (jj_la == 0 && jj_scanpos == jj_lastpos) throw jj_ls;
+     return false;
   }
 
 
 /** Get the next Token. */
   final public Token getNextToken() {
-    if (token.next != null) token = token.next;
-    else token = token.next = token_source.getNextToken();
-    jj_ntk = -1;
-    jj_gen++;
-    return token;
+     if (token.next != null) token = token.next;
+     else token = token.next = token_source.getNextToken();
+     jj_ntk = -1;
+     jj_gen++;
+     return token;
   }
 
 /** Get the specific Token. */
   final public Token getToken(int index) {
-    Token t = jj_lookingAhead ? jj_scanpos : token;
-    for (int i = 0; i < index; i++) {
-      if (t.next != null) t = t.next;
-      else t = t.next = token_source.getNextToken();
-    }
-    return t;
+     Token t = jj_lookingAhead ? jj_scanpos : token;
+     for (int i = 0; i < index; i++) {
+       if (t.next != null) t = t.next;
+       else t = t.next = token_source.getNextToken();
+     }
+     return t;
   }
 
-  private int jj_ntk() {
-    if ((jj_nt=token.next) == null)
-      return (jj_ntk = (token.next=token_source.getNextToken()).kind);
-    else
-      return (jj_ntk = jj_nt.kind);
+  private int jj_ntk_f() {
+     if ((jj_nt=token.next) == null)
+       return (jj_ntk = (token.next=token_source.getNextToken()).kind);
+     else
+       return (jj_ntk = jj_nt.kind);
   }
 
   private java.util.List<int[]> jj_expentries = new java.util.ArrayList<>();
@@ -870,65 +923,86 @@
   private int jj_endpos;
 
   private void jj_add_error_token(int kind, int pos) {
-    if (pos >= 100) return;
-    if (pos == jj_endpos + 1) {
-      jj_lasttokens[jj_endpos++] = kind;
-    } else if (jj_endpos != 0) {
-      jj_expentry = new int[jj_endpos];
-      for (int i = 0; i < jj_endpos; i++) {
-        jj_expentry[i] = jj_lasttokens[i];
-      }
-      jj_entries_loop: for (java.util.Iterator<?> it = jj_expentries.iterator(); it.hasNext();) {
-        int[] oldentry = (int[])(it.next());
-        if (oldentry.length == jj_expentry.length) {
-          for (int i = 0; i < jj_expentry.length; i++) {
-            if (oldentry[i] != jj_expentry[i]) {
-              continue jj_entries_loop;
-            }
-          }
-          jj_expentries.add(jj_expentry);
-          break jj_entries_loop;
-        }
-      }
-      if (pos != 0) jj_lasttokens[(jj_endpos = pos) - 1] = kind;
-    }
+     if (pos >= 100) {
+        return;
+     }
+
+     if (pos == jj_endpos + 1) {
+       jj_lasttokens[jj_endpos++] = kind;
+     } else if (jj_endpos != 0) {
+       jj_expentry = new int[jj_endpos];
+
+       for (int i = 0; i < jj_endpos; i++) {
+         jj_expentry[i] = jj_lasttokens[i];
+       }
+
+       for (int[] oldentry : jj_expentries) {
+         if (oldentry.length == jj_expentry.length) {
+           boolean isMatched = true;
+
+           for (int i = 0; i < jj_expentry.length; i++) {
+             if (oldentry[i] != jj_expentry[i]) {
+               isMatched = false;
+               break;
+             }
+
+           }
+           if (isMatched) {
+             jj_expentries.add(jj_expentry);
+             break;
+           }
+         }
+       }
+
+       if (pos != 0) {
+         jj_lasttokens[(jj_endpos = pos) - 1] = kind;
+       }
+     }
   }
 
   /** Generate ParseException. */
   public ParseException generateParseException() {
-    jj_expentries.clear();
-    boolean[] la1tokens = new boolean[33];
-    if (jj_kind >= 0) {
-      la1tokens[jj_kind] = true;
-      jj_kind = -1;
-    }
-    for (int i = 0; i < 25; i++) {
-      if (jj_la1[i] == jj_gen) {
-        for (int j = 0; j < 32; j++) {
-          if ((jj_la1_0[i] & (1<<j)) != 0) {
-            la1tokens[j] = true;
-          }
-          if ((jj_la1_1[i] & (1<<j)) != 0) {
-            la1tokens[32+j] = true;
-          }
-        }
-      }
-    }
-    for (int i = 0; i < 33; i++) {
-      if (la1tokens[i]) {
-        jj_expentry = new int[1];
-        jj_expentry[0] = i;
-        jj_expentries.add(jj_expentry);
-      }
-    }
-    jj_endpos = 0;
-    jj_rescan_token();
-    jj_add_error_token(0, 0);
-    int[][] exptokseq = new int[jj_expentries.size()][];
-    for (int i = 0; i < jj_expentries.size(); i++) {
-      exptokseq[i] = jj_expentries.get(i);
-    }
-    return new ParseException(token, exptokseq, tokenImage);
+     jj_expentries.clear();
+     boolean[] la1tokens = new boolean[33];
+     if (jj_kind >= 0) {
+       la1tokens[jj_kind] = true;
+       jj_kind = -1;
+     }
+     for (int i = 0; i < 25; i++) {
+       if (jj_la1[i] == jj_gen) {
+         for (int j = 0; j < 32; j++) {
+           if ((jj_la1_0[i] & (1<<j)) != 0) {
+             la1tokens[j] = true;
+           }
+           if ((jj_la1_1[i] & (1<<j)) != 0) {
+             la1tokens[32+j] = true;
+           }
+         }
+       }
+     }
+     for (int i = 0; i < 33; i++) {
+       if (la1tokens[i]) {
+         jj_expentry = new int[1];
+         jj_expentry[0] = i;
+         jj_expentries.add(jj_expentry);
+       }
+     }
+     jj_endpos = 0;
+     jj_rescan_token();
+     jj_add_error_token(0, 0);
+     int[][] exptokseq = new int[jj_expentries.size()][];
+     for (int i = 0; i < jj_expentries.size(); i++) {
+       exptokseq[i] = jj_expentries.get(i);
+     }
+     return new ParseException(token, exptokseq, tokenImage);
+  }
+
+  private int trace_indent = 0;
+  private boolean trace_enabled;
+
+/** Trace enabled. */
+  final public boolean trace_enabled() {
+     return trace_enabled;
   }
 
   /** Enable tracing. */
@@ -940,40 +1014,45 @@
   }
 
   private void jj_rescan_token() {
-    jj_rescan = true;
-    for (int i = 0; i < 3; i++) {
-    try {
-      JJCalls p = jj_2_rtns[i];
-      do {
-        if (p.gen > jj_gen) {
-          jj_la = p.arg; jj_lastpos = jj_scanpos = p.first;
-          switch (i) {
-            case 0: jj_3_1(); break;
-            case 1: jj_3_2(); break;
-            case 2: jj_3_3(); break;
-          }
-        }
-        p = p.next;
-      } while (p != null);
-      } catch(LookaheadSuccess ls) { }
-    }
-    jj_rescan = false;
+     jj_rescan = true;
+     for (int i = 0; i < 3; i++) {
+       try {
+         JJCalls p = jj_2_rtns[i];
+
+         do {
+           if (p.gen > jj_gen) {
+             jj_la = p.arg; jj_lastpos = jj_scanpos = p.first;
+             switch (i) {
+               case 0: jj_3_1(); break;
+               case 1: jj_3_2(); break;
+               case 2: jj_3_3(); break;
+             }
+           }
+           p = p.next;
+         } while (p != null);
+
+         } catch(LookaheadSuccess ls) { }
+     }
+     jj_rescan = false;
   }
 
   private void jj_save(int index, int xla) {
-    JJCalls p = jj_2_rtns[index];
-    while (p.gen > jj_gen) {
-      if (p.next == null) { p = p.next = new JJCalls(); break; }
-      p = p.next;
-    }
-    p.gen = jj_gen + xla - jj_la; p.first = token; p.arg = xla;
+     JJCalls p = jj_2_rtns[index];
+     while (p.gen > jj_gen) {
+       if (p.next == null) { p = p.next = new JJCalls(); break; }
+       p = p.next;
+     }
+
+     p.gen = jj_gen + xla - jj_la; 
+     p.first = token;
+     p.arg = xla;
   }
 
   static final class JJCalls {
-    int gen;
-    Token first;
-    int arg;
-    JJCalls next;
+     int gen;
+     Token first;
+     int arg;
+     JJCalls next;
   }
 
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserTokenManager.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserTokenManager.java
index 39cac02..db84cc7 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserTokenManager.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserTokenManager.java
@@ -1,20 +1,34 @@
+/* QueryParserTokenManager.java */
 /* Generated By:JavaCC: Do not edit this line. QueryParserTokenManager.java */
 package org.apache.lucene.queryparser.classic;
-/** Token Manager. */
-public class QueryParserTokenManager implements QueryParserConstants
-{
 
-  
-private final int jjStopStringLiteralDfa_2(int pos, long active0)
-{
+
+
+
+
+
+
+
+
+
+
+
+
+/** Token Manager. */
+public class QueryParserTokenManager implements QueryParserConstants {
+
+  /** Debug output. */
+  // (debugStream omitted).
+  /** Set debug output. */
+  // (setDebugStream omitted).
+private final int jjStopStringLiteralDfa_2(int pos, long active0){
    switch (pos)
    {
       default :
          return -1;
    }
 }
-private final int jjStartNfa_2(int pos, long active0)
-{
+private final int jjStartNfa_2(int pos, long active0){
    return jjMoveNfa_2(jjStopStringLiteralDfa_2(pos, active0), pos + 1);
 }
 private int jjStopAtPos(int pos, int kind)
@@ -23,8 +37,7 @@
    jjmatchedPos = pos;
    return pos + 1;
 }
-private int jjMoveStringLiteralDfa0_2()
-{
+private int jjMoveStringLiteralDfa0_2(){
    switch(curChar)
    {
       case 40:
@@ -93,14 +106,14 @@
                      break;
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(33, 34);
+                  { jjCheckNAddTwoStates(33, 34); }
                   break;
                case 0:
                   if ((0xfbff54f8ffffd9ffL & l) != 0L)
                   {
                      if (kind > 23)
                         kind = 23;
-                     jjCheckNAddTwoStates(33, 34);
+                     { jjCheckNAddTwoStates(33, 34); }
                   }
                   else if ((0x100002600L & l) != 0L)
                   {
@@ -110,14 +123,14 @@
                   else if ((0x280200000000L & l) != 0L)
                      jjstateSet[jjnewStateCnt++] = 15;
                   else if (curChar == 47)
-                     jjCheckNAddStates(0, 2);
+                     { jjCheckNAddStates(0, 2); }
                   else if (curChar == 34)
-                     jjCheckNAddStates(3, 5);
+                     { jjCheckNAddStates(3, 5); }
                   if ((0x7bff50f8ffffd9ffL & l) != 0L)
                   {
                      if (kind > 20)
                         kind = 20;
-                     jjCheckNAddStates(6, 10);
+                     { jjCheckNAddStates(6, 10); }
                   }
                   else if (curChar == 42)
                   {
@@ -154,14 +167,14 @@
                   break;
                case 16:
                   if (curChar == 34)
-                     jjCheckNAddStates(3, 5);
+                     { jjCheckNAddStates(3, 5); }
                   break;
                case 17:
                   if ((0xfffffffbffffffffL & l) != 0L)
-                     jjCheckNAddStates(3, 5);
+                     { jjCheckNAddStates(3, 5); }
                   break;
                case 19:
-                  jjCheckNAddStates(3, 5);
+                  { jjCheckNAddStates(3, 5); }
                   break;
                case 20:
                   if (curChar == 34 && kind > 19)
@@ -172,42 +185,42 @@
                      break;
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddStates(11, 14);
+                  { jjCheckNAddStates(11, 14); }
                   break;
                case 23:
                   if (curChar == 46)
-                     jjCheckNAdd(24);
+                     { jjCheckNAdd(24); }
                   break;
                case 24:
                   if ((0x3ff000000000000L & l) == 0L)
                      break;
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddStates(15, 17);
+                  { jjCheckNAddStates(15, 17); }
                   break;
                case 25:
                   if ((0x7bff78f8ffffd9ffL & l) == 0L)
                      break;
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddTwoStates(25, 26);
+                  { jjCheckNAddTwoStates(25, 26); }
                   break;
                case 27:
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddTwoStates(25, 26);
+                  { jjCheckNAddTwoStates(25, 26); }
                   break;
                case 28:
                   if ((0x7bff78f8ffffd9ffL & l) == 0L)
                      break;
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddTwoStates(28, 29);
+                  { jjCheckNAddTwoStates(28, 29); }
                   break;
                case 30:
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddTwoStates(28, 29);
+                  { jjCheckNAddTwoStates(28, 29); }
                   break;
                case 31:
                   if (curChar == 42 && kind > 22)
@@ -218,21 +231,21 @@
                      break;
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(33, 34);
+                  { jjCheckNAddTwoStates(33, 34); }
                   break;
                case 35:
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(33, 34);
+                  { jjCheckNAddTwoStates(33, 34); }
                   break;
                case 36:
                case 38:
                   if (curChar == 47)
-                     jjCheckNAddStates(0, 2);
+                     { jjCheckNAddStates(0, 2); }
                   break;
                case 37:
                   if ((0xffff7fffffffffffL & l) != 0L)
-                     jjCheckNAddStates(0, 2);
+                     { jjCheckNAddStates(0, 2); }
                   break;
                case 40:
                   if (curChar == 47 && kind > 24)
@@ -243,26 +256,26 @@
                      break;
                   if (kind > 20)
                      kind = 20;
-                  jjCheckNAddStates(6, 10);
+                  { jjCheckNAddStates(6, 10); }
                   break;
                case 42:
                   if ((0x7bff78f8ffffd9ffL & l) == 0L)
                      break;
                   if (kind > 20)
                      kind = 20;
-                  jjCheckNAddTwoStates(42, 43);
+                  { jjCheckNAddTwoStates(42, 43); }
                   break;
                case 44:
                   if (kind > 20)
                      kind = 20;
-                  jjCheckNAddTwoStates(42, 43);
+                  { jjCheckNAddTwoStates(42, 43); }
                   break;
                case 45:
                   if ((0x7bff78f8ffffd9ffL & l) != 0L)
-                     jjCheckNAddStates(18, 20);
+                     { jjCheckNAddStates(18, 20); }
                   break;
                case 47:
-                  jjCheckNAddStates(18, 20);
+                  { jjCheckNAddStates(18, 20); }
                   break;
                default : break;
             }
@@ -280,31 +293,31 @@
                   {
                      if (kind > 23)
                         kind = 23;
-                     jjCheckNAddTwoStates(33, 34);
+                     { jjCheckNAddTwoStates(33, 34); }
                   }
                   else if (curChar == 92)
-                     jjCheckNAdd(35);
+                     { jjCheckNAdd(35); }
                   break;
                case 0:
                   if ((0x97ffffff87ffffffL & l) != 0L)
                   {
                      if (kind > 20)
                         kind = 20;
-                     jjCheckNAddStates(6, 10);
+                     { jjCheckNAddStates(6, 10); }
                   }
                   else if (curChar == 92)
-                     jjCheckNAddStates(21, 23);
+                     { jjCheckNAddStates(21, 23); }
                   else if (curChar == 126)
                   {
                      if (kind > 21)
                         kind = 21;
-                     jjCheckNAddStates(24, 26);
+                     { jjCheckNAddStates(24, 26); }
                   }
                   if ((0x97ffffff87ffffffL & l) != 0L)
                   {
                      if (kind > 23)
                         kind = 23;
-                     jjCheckNAddTwoStates(33, 34);
+                     { jjCheckNAddTwoStates(33, 34); }
                   }
                   if (curChar == 78)
                      jjstateSet[jjnewStateCnt++] = 11;
@@ -357,28 +370,28 @@
                   break;
                case 17:
                   if ((0xffffffffefffffffL & l) != 0L)
-                     jjCheckNAddStates(3, 5);
+                     { jjCheckNAddStates(3, 5); }
                   break;
                case 18:
                   if (curChar == 92)
                      jjstateSet[jjnewStateCnt++] = 19;
                   break;
                case 19:
-                  jjCheckNAddStates(3, 5);
+                  { jjCheckNAddStates(3, 5); }
                   break;
                case 21:
                   if (curChar != 126)
                      break;
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddStates(24, 26);
+                  { jjCheckNAddStates(24, 26); }
                   break;
                case 25:
                   if ((0x97ffffff87ffffffL & l) == 0L)
                      break;
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddTwoStates(25, 26);
+                  { jjCheckNAddTwoStates(25, 26); }
                   break;
                case 26:
                   if (curChar == 92)
@@ -387,14 +400,14 @@
                case 27:
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddTwoStates(25, 26);
+                  { jjCheckNAddTwoStates(25, 26); }
                   break;
                case 28:
                   if ((0x97ffffff87ffffffL & l) == 0L)
                      break;
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddTwoStates(28, 29);
+                  { jjCheckNAddTwoStates(28, 29); }
                   break;
                case 29:
                   if (curChar == 92)
@@ -403,33 +416,33 @@
                case 30:
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddTwoStates(28, 29);
+                  { jjCheckNAddTwoStates(28, 29); }
                   break;
                case 32:
                   if ((0x97ffffff87ffffffL & l) == 0L)
                      break;
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(33, 34);
+                  { jjCheckNAddTwoStates(33, 34); }
                   break;
                case 33:
                   if ((0x97ffffff87ffffffL & l) == 0L)
                      break;
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(33, 34);
+                  { jjCheckNAddTwoStates(33, 34); }
                   break;
                case 34:
                   if (curChar == 92)
-                     jjCheckNAdd(35);
+                     { jjCheckNAdd(35); }
                   break;
                case 35:
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(33, 34);
+                  { jjCheckNAddTwoStates(33, 34); }
                   break;
                case 37:
-                  jjAddStates(0, 2);
+                  { jjAddStates(0, 2); }
                   break;
                case 39:
                   if (curChar == 92)
@@ -440,38 +453,38 @@
                      break;
                   if (kind > 20)
                      kind = 20;
-                  jjCheckNAddStates(6, 10);
+                  { jjCheckNAddStates(6, 10); }
                   break;
                case 42:
                   if ((0x97ffffff87ffffffL & l) == 0L)
                      break;
                   if (kind > 20)
                      kind = 20;
-                  jjCheckNAddTwoStates(42, 43);
+                  { jjCheckNAddTwoStates(42, 43); }
                   break;
                case 43:
                   if (curChar == 92)
-                     jjCheckNAdd(44);
+                     { jjCheckNAdd(44); }
                   break;
                case 44:
                   if (kind > 20)
                      kind = 20;
-                  jjCheckNAddTwoStates(42, 43);
+                  { jjCheckNAddTwoStates(42, 43); }
                   break;
                case 45:
                   if ((0x97ffffff87ffffffL & l) != 0L)
-                     jjCheckNAddStates(18, 20);
+                     { jjCheckNAddStates(18, 20); }
                   break;
                case 46:
                   if (curChar == 92)
-                     jjCheckNAdd(47);
+                     { jjCheckNAdd(47); }
                   break;
                case 47:
-                  jjCheckNAddStates(18, 20);
+                  { jjCheckNAddStates(18, 20); }
                   break;
                case 48:
                   if (curChar == 92)
-                     jjCheckNAddStates(21, 23);
+                     { jjCheckNAddStates(21, 23); }
                   break;
                default : break;
             }
@@ -479,7 +492,7 @@
       }
       else
       {
-         int hiByte = curChar >> 8;
+         int hiByte = (curChar >> 8);
          int i1 = hiByte >> 6;
          long l1 = 1L << (hiByte & 077);
          int i2 = (curChar & 0xff) >> 6;
@@ -494,7 +507,7 @@
                      break;
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(33, 34);
+                  { jjCheckNAddTwoStates(33, 34); }
                   break;
                case 0:
                   if (jjCanMove_0(hiByte, i1, i2, l1, l2))
@@ -506,13 +519,13 @@
                   {
                      if (kind > 23)
                         kind = 23;
-                     jjCheckNAddTwoStates(33, 34);
+                     { jjCheckNAddTwoStates(33, 34); }
                   }
                   if (jjCanMove_2(hiByte, i1, i2, l1, l2))
                   {
                      if (kind > 20)
                         kind = 20;
-                     jjCheckNAddStates(6, 10);
+                     { jjCheckNAddStates(6, 10); }
                   }
                   break;
                case 15:
@@ -522,84 +535,84 @@
                case 17:
                case 19:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddStates(3, 5);
+                     { jjCheckNAddStates(3, 5); }
                   break;
                case 25:
                   if (!jjCanMove_2(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddTwoStates(25, 26);
+                  { jjCheckNAddTwoStates(25, 26); }
                   break;
                case 27:
                   if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddTwoStates(25, 26);
+                  { jjCheckNAddTwoStates(25, 26); }
                   break;
                case 28:
                   if (!jjCanMove_2(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddTwoStates(28, 29);
+                  { jjCheckNAddTwoStates(28, 29); }
                   break;
                case 30:
                   if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddTwoStates(28, 29);
+                  { jjCheckNAddTwoStates(28, 29); }
                   break;
                case 32:
                   if (!jjCanMove_2(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(33, 34);
+                  { jjCheckNAddTwoStates(33, 34); }
                   break;
                case 35:
                   if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(33, 34);
+                  { jjCheckNAddTwoStates(33, 34); }
                   break;
                case 37:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjAddStates(0, 2);
+                     { jjAddStates(0, 2); }
                   break;
                case 41:
                   if (!jjCanMove_2(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 20)
                      kind = 20;
-                  jjCheckNAddStates(6, 10);
+                  { jjCheckNAddStates(6, 10); }
                   break;
                case 42:
                   if (!jjCanMove_2(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 20)
                      kind = 20;
-                  jjCheckNAddTwoStates(42, 43);
+                  { jjCheckNAddTwoStates(42, 43); }
                   break;
                case 44:
                   if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 20)
                      kind = 20;
-                  jjCheckNAddTwoStates(42, 43);
+                  { jjCheckNAddTwoStates(42, 43); }
                   break;
                case 45:
                   if (jjCanMove_2(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddStates(18, 20);
+                     { jjCheckNAddStates(18, 20); }
                   break;
                case 47:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddStates(18, 20);
+                     { jjCheckNAddStates(18, 20); }
                   break;
-               default : break;
+               default : if (i1 == 0 || l1 == 0 || i2 == 0 ||  l2 == 0) break; else break;
             }
          } while(i != startsAt);
       }
@@ -643,18 +656,18 @@
                      break;
                   if (kind > 27)
                      kind = 27;
-                  jjAddStates(27, 28);
+                  { jjAddStates(27, 28); }
                   break;
                case 1:
                   if (curChar == 46)
-                     jjCheckNAdd(2);
+                     { jjCheckNAdd(2); }
                   break;
                case 2:
                   if ((0x3ff000000000000L & l) == 0L)
                      break;
                   if (kind > 27)
                      kind = 27;
-                  jjCheckNAdd(2);
+                  { jjCheckNAdd(2); }
                   break;
                default : break;
             }
@@ -673,7 +686,7 @@
       }
       else
       {
-         int hiByte = curChar >> 8;
+         int hiByte = (curChar >> 8);
          int i1 = hiByte >> 6;
          long l1 = 1L << (hiByte & 077);
          int i2 = (curChar & 0xff) >> 6;
@@ -682,7 +695,7 @@
          {
             switch(jjstateSet[--i])
             {
-               default : break;
+               default : if (i1 == 0 || l1 == 0 || i2 == 0 ||  l2 == 0) break; else break;
             }
          } while(i != startsAt);
       }
@@ -699,8 +712,7 @@
       catch(java.io.IOException e) { return curPos; }
    }
 }
-private final int jjStopStringLiteralDfa_1(int pos, long active0)
-{
+private final int jjStopStringLiteralDfa_1(int pos, long active0){
    switch (pos)
    {
       case 0:
@@ -714,12 +726,10 @@
          return -1;
    }
 }
-private final int jjStartNfa_1(int pos, long active0)
-{
+private final int jjStartNfa_1(int pos, long active0){
    return jjMoveNfa_1(jjStopStringLiteralDfa_1(pos, active0), pos + 1);
 }
-private int jjMoveStringLiteralDfa0_1()
-{
+private int jjMoveStringLiteralDfa0_1(){
    switch(curChar)
    {
       case 84:
@@ -732,8 +742,7 @@
          return jjMoveNfa_1(0, 0);
    }
 }
-private int jjMoveStringLiteralDfa1_1(long active0)
-{
+private int jjMoveStringLiteralDfa1_1(long active0){
    try { curChar = input_stream.readChar(); }
    catch(java.io.IOException e) {
       jjStopStringLiteralDfa_1(0, active0);
@@ -781,7 +790,7 @@
                   {
                      if (kind > 32)
                         kind = 32;
-                     jjCheckNAdd(6);
+                     { jjCheckNAdd(6); }
                   }
                   if ((0x100002600L & l) != 0L)
                   {
@@ -789,19 +798,19 @@
                         kind = 7;
                   }
                   else if (curChar == 34)
-                     jjCheckNAddTwoStates(2, 4);
+                     { jjCheckNAddTwoStates(2, 4); }
                   break;
                case 1:
                   if (curChar == 34)
-                     jjCheckNAddTwoStates(2, 4);
+                     { jjCheckNAddTwoStates(2, 4); }
                   break;
                case 2:
                   if ((0xfffffffbffffffffL & l) != 0L)
-                     jjCheckNAddStates(29, 31);
+                     { jjCheckNAddStates(29, 31); }
                   break;
                case 3:
                   if (curChar == 34)
-                     jjCheckNAddStates(29, 31);
+                     { jjCheckNAddStates(29, 31); }
                   break;
                case 5:
                   if (curChar == 34 && kind > 31)
@@ -812,7 +821,7 @@
                      break;
                   if (kind > 32)
                      kind = 32;
-                  jjCheckNAdd(6);
+                  { jjCheckNAdd(6); }
                   break;
                default : break;
             }
@@ -831,10 +840,10 @@
                      break;
                   if (kind > 32)
                      kind = 32;
-                  jjCheckNAdd(6);
+                  { jjCheckNAdd(6); }
                   break;
                case 2:
-                  jjAddStates(29, 31);
+                  { jjAddStates(29, 31); }
                   break;
                case 4:
                   if (curChar == 92)
@@ -846,7 +855,7 @@
       }
       else
       {
-         int hiByte = curChar >> 8;
+         int hiByte = (curChar >> 8);
          int i1 = hiByte >> 6;
          long l1 = 1L << (hiByte & 077);
          int i2 = (curChar & 0xff) >> 6;
@@ -865,21 +874,21 @@
                   {
                      if (kind > 32)
                         kind = 32;
-                     jjCheckNAdd(6);
+                     { jjCheckNAdd(6); }
                   }
                   break;
                case 2:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjAddStates(29, 31);
+                     { jjAddStates(29, 31); }
                   break;
                case 6:
                   if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 32)
                      kind = 32;
-                  jjCheckNAdd(6);
+                  { jjCheckNAdd(6); }
                   break;
-               default : break;
+               default : if (i1 == 0 || l1 == 0 || i2 == 0 ||  l2 == 0) break; else break;
             }
          } while(i != startsAt);
       }
@@ -896,6 +905,37 @@
       catch(java.io.IOException e) { return curPos; }
    }
 }
+
+/** Token literal values. */
+public static final String[] jjstrLiteralImages = {
+"", null, null, null, null, null, null, null, null, null, null, "\53", "\55", 
+null, "\50", "\51", "\72", "\52", "\136", null, null, null, null, null, null, 
+"\133", "\173", null, "\124\117", "\135", "\175", null, null, };
+protected Token jjFillToken()
+{
+   final Token t;
+   final String curTokenImage;
+   final int beginLine;
+   final int endLine;
+   final int beginColumn;
+   final int endColumn;
+   String im = jjstrLiteralImages[jjmatchedKind];
+   curTokenImage = (im == null) ? input_stream.GetImage() : im;
+   beginLine = input_stream.getBeginLine();
+   beginColumn = input_stream.getBeginColumn();
+   endLine = input_stream.getEndLine();
+   endColumn = input_stream.getEndColumn();
+   t = Token.newToken(jjmatchedKind);
+   t.kind = jjmatchedKind;
+   t.image = curTokenImage;
+
+   t.beginLine = beginLine;
+   t.endLine = endLine;
+   t.beginColumn = beginColumn;
+   t.endColumn = endColumn;
+
+   return t;
+}
 static final int[] jjnextStates = {
    37, 39, 40, 17, 18, 20, 42, 43, 45, 46, 31, 22, 23, 25, 26, 24, 
    25, 26, 45, 46, 31, 44, 47, 35, 22, 28, 29, 0, 1, 2, 4, 5, 
@@ -937,101 +977,6 @@
    }
 }
 
-/** Token literal values. */
-public static final String[] jjstrLiteralImages = {
-"", null, null, null, null, null, null, null, null, null, null, "\53", "\55", 
-null, "\50", "\51", "\72", "\52", "\136", null, null, null, null, null, null, 
-"\133", "\173", null, "\124\117", "\135", "\175", null, null, };
-
-/** Lexer state names. */
-public static final String[] lexStateNames = {
-   "Boost",
-   "Range",
-   "DEFAULT",
-};
-
-/** Lex State array. */
-public static final int[] jjnewLexState = {
-   -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, 
-   1, 1, 2, -1, 2, 2, -1, -1, 
-};
-static final long[] jjtoToken = {
-   0x1ffffff01L, 
-};
-static final long[] jjtoSkip = {
-   0x80L, 
-};
-protected CharStream input_stream;
-private final int[] jjrounds = new int[49];
-private final int[] jjstateSet = new int[98];
-protected char curChar;
-/** Constructor. */
-public QueryParserTokenManager(CharStream stream){
-   input_stream = stream;
-}
-
-/** Constructor. */
-public QueryParserTokenManager(CharStream stream, int lexState){
-   this(stream);
-   SwitchTo(lexState);
-}
-
-/** Reinitialise parser. */
-public void ReInit(CharStream stream)
-{
-   jjmatchedPos = jjnewStateCnt = 0;
-   curLexState = defaultLexState;
-   input_stream = stream;
-   ReInitRounds();
-}
-private void ReInitRounds()
-{
-   int i;
-   jjround = 0x80000001;
-   for (i = 49; i-- > 0;)
-      jjrounds[i] = 0x80000000;
-}
-
-/** Reinitialise parser. */
-public void ReInit(CharStream stream, int lexState)
-{
-   ReInit(stream);
-   SwitchTo(lexState);
-}
-
-/** Switch to specified lex state. */
-public void SwitchTo(int lexState)
-{
-   if (lexState >= 3 || lexState < 0)
-      throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE);
-   else
-      curLexState = lexState;
-}
-
-protected Token jjFillToken()
-{
-   final Token t;
-   final String curTokenImage;
-   final int beginLine;
-   final int endLine;
-   final int beginColumn;
-   final int endColumn;
-   String im = jjstrLiteralImages[jjmatchedKind];
-   curTokenImage = (im == null) ? input_stream.GetImage() : im;
-   beginLine = input_stream.getBeginLine();
-   beginColumn = input_stream.getBeginColumn();
-   endLine = input_stream.getEndLine();
-   endColumn = input_stream.getEndColumn();
-   t = Token.newToken(jjmatchedKind, curTokenImage);
-
-   t.beginLine = beginLine;
-   t.endLine = endLine;
-   t.beginColumn = beginColumn;
-   t.endColumn = endColumn;
-
-   return t;
-}
-
 int curLexState = 2;
 int defaultLexState = 2;
 int jjnewStateCnt;
@@ -1052,9 +997,10 @@
    {
       curChar = input_stream.BeginToken();
    }
-   catch(java.io.IOException e)
+   catch(Exception e)
    {
       jjmatchedKind = 0;
+      jjmatchedPos = -1;
       matchedToken = jjFillToken();
       return matchedToken;
    }
@@ -1118,6 +1064,31 @@
   }
 }
 
+void SkipLexicalActions(Token matchedToken)
+{
+   switch(jjmatchedKind)
+   {
+      default :
+         break;
+   }
+}
+void MoreLexicalActions()
+{
+   jjimageLen += (lengthOfMatch = jjmatchedPos + 1);
+   switch(jjmatchedKind)
+   {
+      default :
+         break;
+   }
+}
+void TokenLexicalActions(Token matchedToken)
+{
+   switch(jjmatchedKind)
+   {
+      default :
+         break;
+   }
+}
 private void jjCheckNAdd(int state)
 {
    if (jjrounds[state] != jjround)
@@ -1145,4 +1116,90 @@
    } while (start++ != end);
 }
 
+    /** Constructor. */
+    public QueryParserTokenManager(CharStream stream){
+
+
+    input_stream = stream;
+  }
+
+  /** Constructor. */
+  public QueryParserTokenManager (CharStream stream, int lexState){
+    ReInit(stream);
+    SwitchTo(lexState);
+  }
+
+  /** Reinitialise parser. */
+  
+  public void ReInit(CharStream stream)
+  {
+
+
+    jjmatchedPos =
+    jjnewStateCnt =
+    0;
+    curLexState = defaultLexState;
+    input_stream = stream;
+    ReInitRounds();
+  }
+
+  private void ReInitRounds()
+  {
+    int i;
+    jjround = 0x80000001;
+    for (i = 49; i-- > 0;)
+      jjrounds[i] = 0x80000000;
+  }
+
+  /** Reinitialise parser. */
+  public void ReInit(CharStream stream, int lexState)
+  
+  {
+    ReInit(stream);
+    SwitchTo(lexState);
+  }
+
+  /** Switch to specified lex state. */
+  public void SwitchTo(int lexState)
+  {
+    if (lexState >= 3 || lexState < 0)
+      throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE);
+    else
+      curLexState = lexState;
+  }
+
+
+/** Lexer state names. */
+public static final String[] lexStateNames = {
+   "Boost",
+   "Range",
+   "DEFAULT",
+};
+
+/** Lex State array. */
+public static final int[] jjnewLexState = {
+   -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, 
+   1, 1, 2, -1, 2, 2, -1, -1, 
+};
+static final long[] jjtoToken = {
+   0x1ffffff01L, 
+};
+static final long[] jjtoSkip = {
+   0x80L, 
+};
+static final long[] jjtoSpecial = {
+   0x0L, 
+};
+static final long[] jjtoMore = {
+   0x0L, 
+};
+    protected CharStream  input_stream;
+
+    private final int[] jjrounds = new int[49];
+    private final int[] jjstateSet = new int[2 * 49];
+    private final StringBuilder jjimage = new StringBuilder();
+    private StringBuilder image = jjimage;
+    private int jjimageLen;
+    private int lengthOfMatch;
+    protected int curChar;
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/Token.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/Token.java
index 0e52ec2..1b9ccd8 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/Token.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/Token.java
@@ -1,5 +1,5 @@
-/* Generated By:JavaCC: Do not edit this line. Token.java Version 5.0 */
-/* JavaCCOptions:TOKEN_EXTENDS=,KEEP_LINE_COL=null,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */
+/* Generated By:JavaCC: Do not edit this line. Token.java Version 7.0 */
+/* JavaCCOptions:TOKEN_EXTENDS=,KEEP_LINE_COLUMN=true,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */
 package org.apache.lucene.queryparser.classic;
 
 /**
@@ -97,6 +97,7 @@
   /**
    * Returns the image.
    */
+  @Override
   public String toString()
   {
     return image;
@@ -128,4 +129,4 @@
   }
 
 }
-/* JavaCC - OriginalChecksum=405bb5d2fcd84e94ac1c8f0b12c1f914 (do not edit this line) */
+/* (filtered)*/
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/TokenMgrError.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/TokenMgrError.java
index ad111d0..4f07f36 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/TokenMgrError.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/TokenMgrError.java
@@ -1,4 +1,4 @@
-/* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 5.0 */
+/* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 7.0 */
 /* JavaCCOptions: */
 package org.apache.lucene.queryparser.classic;
 
@@ -20,22 +20,22 @@
   /**
    * Lexical error occurred.
    */
-  static final int LEXICAL_ERROR = 0;
+  public static final int LEXICAL_ERROR = 0;
 
   /**
    * An attempt was made to create a second instance of a static token manager.
    */
-  static final int STATIC_LEXER_ERROR = 1;
+  public static final int STATIC_LEXER_ERROR = 1;
 
   /**
    * Tried to change to an invalid lexical state.
    */
-  static final int INVALID_LEXICAL_STATE = 2;
+  public static final int INVALID_LEXICAL_STATE = 2;
 
   /**
    * Detected (and bailed out of) an infinite loop in the token manager.
    */
-  static final int LOOP_DETECTED = 3;
+  public static final int LOOP_DETECTED = 3;
 
   /**
    * Indicates the reason why the exception is thrown. It will have
@@ -53,8 +53,6 @@
     for (int i = 0; i < str.length(); i++) {
       switch (str.charAt(i))
       {
-        case 0 :
-          continue;
         case '\b':
           retval.append("\\b");
           continue;
@@ -104,11 +102,12 @@
    *    curchar     : the offending character
    * Note: You can customize the lexical error message by modifying this method.
    */
-  protected static String LexicalError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar) {
+  protected static String LexicalErr(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, int curChar) {
+    char curChar1 = (char)curChar;
     return("Lexical error at line " +
           errorLine + ", column " +
           errorColumn + ".  Encountered: " +
-          (EOFSeen ? "<EOF> " : ("\"" + addEscapes(String.valueOf(curChar)) + "\"") + " (" + (int)curChar + "), ") +
+          (EOFSeen ? "<EOF> " : ("\"" + addEscapes(String.valueOf(curChar1)) + "\"") + " (" + curChar + "), ") +
           "after : \"" + addEscapes(errorAfter) + "\"");
   }
 
@@ -121,6 +120,7 @@
    *
    * from this method for such cases in the release version of your parser.
    */
+  @Override
   public String getMessage() {
     return super.getMessage();
   }
@@ -140,8 +140,8 @@
   }
 
   /** Full Constructor. */
-  public TokenMgrError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar, int reason) {
-    this(LexicalError(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
+  public TokenMgrError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, int curChar, int reason) {
+    this(LexicalErr(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
   }
 }
-/* JavaCC - OriginalChecksum=f433e1a52b8eadbf12f3fbbbf87fd140 (do not edit this line) */
+/* (filtered)*/
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/CharStream.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/CharStream.java
index 2dee00b..575be8e 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/CharStream.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/CharStream.java
@@ -1,4 +1,4 @@
-/* Generated By:JavaCC: Do not edit this line. CharStream.java Version 5.0 */
+/* Generated By:JavaCC: Do not edit this line. CharStream.java Version 7.0 */
 /* JavaCCOptions:STATIC=false,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */
 package org.apache.lucene.queryparser.flexible.standard.parser;
 
@@ -111,5 +111,10 @@
    */
   void Done();
 
+
+  void setTabSize(int i);
+  int getTabSize();
+  boolean getTrackLineColumn();
+  void setTrackLineColumn(boolean trackLineColumn);
 }
-/* JavaCC - OriginalChecksum=53b2ec7502d50e2290e86187a6c01270 (do not edit this line) */
+/* (filtered)*/
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/FastCharStream.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/FastCharStream.java
index df27356..7b206fc 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/FastCharStream.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/FastCharStream.java
@@ -138,4 +138,24 @@
   public final int getBeginLine() {
     return 1;
   }
+
+  @Override
+  public void setTabSize(int i) {
+    throw new RuntimeException("Tab size not implemented.");
+  }
+
+  @Override
+  public int getTabSize() {
+    throw new RuntimeException("Tab size not implemented.");
+  }
+
+  @Override
+  public boolean getTrackLineColumn() {
+    return false;
+  }
+
+  @Override
+  public void setTrackLineColumn(boolean trackLineColumn) {
+    throw new RuntimeException("Line/Column tracking not implemented.");
+  }
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/ParseException.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/ParseException.java
index 07ebe1b..a782e8b 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/ParseException.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/ParseException.java
@@ -1,11 +1,11 @@
-/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 5.0 */
-/* JavaCCOptions:KEEP_LINE_COL=null */
-package org.apache.lucene.queryparser.flexible.standard.parser;
- 
- import org.apache.lucene.queryparser.flexible.messages.Message;
- import org.apache.lucene.queryparser.flexible.messages.MessageImpl;
- import org.apache.lucene.queryparser.flexible.core.*;
- import org.apache.lucene.queryparser.flexible.core.messages.*;
+/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 7.0 */
+/* JavaCCOptions:KEEP_LINE_COLUMN=true */
+          package org.apache.lucene.queryparser.flexible.standard.parser;
+
+          import org.apache.lucene.queryparser.flexible.messages.*;
+          import org.apache.lucene.queryparser.flexible.core.*;
+          import org.apache.lucene.queryparser.flexible.core.messages.*;
+          
 
 /**
  * This exception is thrown when parse errors are encountered.
@@ -26,19 +26,26 @@
   private static final long serialVersionUID = 1L;
 
   /**
+   * The end of line string for this machine.
+   */
+  protected static String EOL = System.getProperty("line.separator", "\n");
+
+  /**
    * This constructor is used by the method "generateParseException"
    * in the generated parser.  Calling this constructor generates
    * a new object of this type with the fields "currentToken",
    * "expectedTokenSequences", and "tokenImage" set.
    */
-  public ParseException(Token currentTokenVal,
-     int[][] expectedTokenSequencesVal, String[] tokenImageVal) {
-     super(new MessageImpl(QueryParserMessages.INVALID_SYNTAX, initialise(
-     currentTokenVal, expectedTokenSequencesVal, tokenImageVal)));
-     this.currentToken = currentTokenVal;
-     this.expectedTokenSequences = expectedTokenSequencesVal;
-     this.tokenImage = tokenImageVal;
-   }
+          public ParseException(Token currentTokenVal,
+            int[][] expectedTokenSequencesVal, String[] tokenImageVal) 
+          {
+            super(new MessageImpl(QueryParserMessages.INVALID_SYNTAX, initialise(
+            currentTokenVal, expectedTokenSequencesVal, tokenImageVal)));
+            this.currentToken = currentTokenVal;
+            this.expectedTokenSequences = expectedTokenSequencesVal;
+            this.tokenImage = tokenImageVal;
+          }
+          
 
   /**
    * The following constructors are for use by you for whatever
@@ -50,14 +57,18 @@
    * these constructors.
    */
 
-  public ParseException() {
-     super(new MessageImpl(QueryParserMessages.INVALID_SYNTAX, "Error"));
-   }
+          public ParseException() 
+          {
+            super(new MessageImpl(QueryParserMessages.INVALID_SYNTAX, "Error"));
+          }
+          
 
   /** Constructor with message. */
-  public ParseException(Message message) {
-     super(message);
-   }
+          public ParseException(Message message) 
+          {
+            super(message);
+          }
+          
 
 
   /**
@@ -91,7 +102,7 @@
   private static String initialise(Token currentToken,
                            int[][] expectedTokenSequences,
                            String[] tokenImage) {
-    String eol = System.getProperty("line.separator", "\n");
+
     StringBuilder expected = new StringBuilder();
     int maxSize = 0;
     for (int i = 0; i < expectedTokenSequences.length; i++) {
@@ -104,7 +115,7 @@
       if (expectedTokenSequences[i][expectedTokenSequences[i].length - 1] != 0) {
         expected.append("...");
       }
-      expected.append(eol).append("    ");
+      expected.append(EOL).append("    ");
     }
     String retval = "Encountered \"";
     Token tok = currentToken.next;
@@ -121,20 +132,23 @@
       tok = tok.next;
     }
     retval += "\" at line " + currentToken.next.beginLine + ", column " + currentToken.next.beginColumn;
-    retval += "." + eol;
-    if (expectedTokenSequences.length == 1) {
-      retval += "Was expecting:" + eol + "    ";
+    retval += "." + EOL;
+    
+    
+    if (expectedTokenSequences.length == 0) {
+        // Nothing to add here
     } else {
-      retval += "Was expecting one of:" + eol + "    ";
+        if (expectedTokenSequences.length == 1) {
+          retval += "Was expecting:" + EOL + "    ";
+        } else {
+          retval += "Was expecting one of:" + EOL + "    ";
+        }
+        retval += expected.toString();
     }
-    retval += expected.toString();
+    
     return retval;
   }
 
-  /**
-   * The end of line string for this machine.
-   */
-  protected String eol = System.getProperty("line.separator", "\n");
 
   /**
    * Used to convert raw characters to their escaped version
@@ -147,8 +161,6 @@
       for (int i = 0; i < str.length(); i++) {
         switch (str.charAt(i))
         {
-           case 0 :
-              continue;
            case '\b':
               retval.append("\\b");
               continue;
@@ -187,4 +199,4 @@
    }
 
 }
-/* JavaCC - OriginalChecksum=4263a02db9988d7a863aa97ad2f6dc67 (do not edit this line) */
+/* (filtered)*/
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java
index 57d7231..66500fa 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java
@@ -1,3 +1,4 @@
+/* StandardSyntaxParser.java */
 /* Generated By:JavaCC: Do not edit this line. StandardSyntaxParser.java */
 package org.apache.lucene.queryparser.flexible.standard.parser;
 
@@ -75,47 +76,49 @@
       }
     }
 
-  final public ModifierQueryNode.Modifier Modifiers() throws ParseException {
-  ModifierQueryNode.Modifier ret = ModifierQueryNode.Modifier.MOD_NONE;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+  final public ModifierQueryNode.Modifier Modifiers() throws ParseException {ModifierQueryNode.Modifier ret = ModifierQueryNode.Modifier.MOD_NONE;
+    switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
     case NOT:
     case PLUS:
-    case MINUS:
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case PLUS:
+    case MINUS:{
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case PLUS:{
         jj_consume_token(PLUS);
-              ret = ModifierQueryNode.Modifier.MOD_REQ;
+ret = ModifierQueryNode.Modifier.MOD_REQ;
         break;
-      case MINUS:
+        }
+      case MINUS:{
         jj_consume_token(MINUS);
-                 ret = ModifierQueryNode.Modifier.MOD_NOT;
+ret = ModifierQueryNode.Modifier.MOD_NOT;
         break;
-      case NOT:
+        }
+      case NOT:{
         jj_consume_token(NOT);
-               ret = ModifierQueryNode.Modifier.MOD_NOT;
+ret = ModifierQueryNode.Modifier.MOD_NOT;
         break;
+        }
       default:
         jj_la1[0] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
       break;
+      }
     default:
       jj_la1[1] = jj_gen;
       ;
     }
-    {if (true) return ret;}
+{if ("" != null) return ret;}
     throw new Error("Missing return statement in function");
-  }
+}
 
 // This makes sure that there is no garbage after the query string
-  final public QueryNode TopLevelQuery(CharSequence field) throws ParseException {
-  QueryNode q;
+  final public QueryNode TopLevelQuery(CharSequence field) throws ParseException {QueryNode q;
     q = Query(field);
     jj_consume_token(0);
-     {if (true) return q;}
+{if ("" != null) return q;}
     throw new Error("Missing return statement in function");
-  }
+}
 
 // These changes were made to introduce operator precedence:
 // - Clause() now returns a QueryNode. 
@@ -127,13 +130,14 @@
 //   DisjQuery ::= ConjQuery ( OR ConjQuery )* 
 //   ConjQuery ::= Clause ( AND Clause )*
 //      Clause ::= [ Modifier ] ... 
-  final public QueryNode Query(CharSequence field) throws ParseException {
-  Vector<QueryNode> clauses = null;
+  final public 
+
+QueryNode Query(CharSequence field) throws ParseException {Vector<QueryNode> clauses = null;
   QueryNode c, first=null;
     first = DisjQuery(field);
     label_1:
     while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
       case NOT:
       case PLUS:
       case MINUS:
@@ -143,22 +147,23 @@
       case REGEXPTERM:
       case RANGEIN_START:
       case RANGEEX_START:
-      case NUMBER:
+      case NUMBER:{
         ;
         break;
+        }
       default:
         jj_la1[2] = jj_gen;
         break label_1;
       }
       c = DisjQuery(field);
-       if (clauses == null) {
+if (clauses == null) {
            clauses = new Vector<QueryNode>();
            clauses.addElement(first);
         }
         clauses.addElement(c);
     }
-        if (clauses != null) {
-        {if (true) return new BooleanQueryNode(clauses);}
+if (clauses != null) {
+        {if ("" != null) return new BooleanQueryNode(clauses);}
       } else {
           // Handle the case of a "pure" negation query which
           // needs to be wrapped as a boolean query, otherwise
@@ -166,73 +171,73 @@
           if (first instanceof ModifierQueryNode) {
             ModifierQueryNode m = (ModifierQueryNode) first;
             if (m.getModifier() == ModifierQueryNode.Modifier.MOD_NOT) {
-              {if (true) return new BooleanQueryNode(Arrays.<QueryNode> asList(m));}
+              {if ("" != null) return new BooleanQueryNode(Arrays.asList(m));}
             }
           }
-          {if (true) return first;}
+          {if ("" != null) return first;}
       }
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public QueryNode DisjQuery(CharSequence field) throws ParseException {
-  QueryNode first, c;
+  final public QueryNode DisjQuery(CharSequence field) throws ParseException {QueryNode first, c;
   Vector<QueryNode> clauses = null;
     first = ConjQuery(field);
     label_2:
     while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case OR:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case OR:{
         ;
         break;
+        }
       default:
         jj_la1[3] = jj_gen;
         break label_2;
       }
       jj_consume_token(OR);
       c = ConjQuery(field);
-     if (clauses == null) {
+if (clauses == null) {
          clauses = new Vector<QueryNode>();
          clauses.addElement(first);
      }
      clauses.addElement(c);
     }
-    if (clauses != null) {
-      {if (true) return new OrQueryNode(clauses);}
+if (clauses != null) {
+      {if ("" != null) return new OrQueryNode(clauses);}
     } else {
-        {if (true) return first;}
+        {if ("" != null) return first;}
     }
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public QueryNode ConjQuery(CharSequence field) throws ParseException {
-  QueryNode first, c;
+  final public QueryNode ConjQuery(CharSequence field) throws ParseException {QueryNode first, c;
   Vector<QueryNode> clauses = null;
     first = ModClause(field);
     label_3:
     while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case AND:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case AND:{
         ;
         break;
+        }
       default:
         jj_la1[4] = jj_gen;
         break label_3;
       }
       jj_consume_token(AND);
       c = ModClause(field);
-     if (clauses == null) {
+if (clauses == null) {
          clauses = new Vector<QueryNode>();
          clauses.addElement(first);
      }
      clauses.addElement(c);
     }
-    if (clauses != null) {
-      {if (true) return new AndQueryNode(clauses);}
+if (clauses != null) {
+      {if ("" != null) return new AndQueryNode(clauses);}
     } else {
-        {if (true) return first;}
+        {if ("" != null) return first;}
     }
     throw new Error("Missing return statement in function");
-  }
+}
 
 // QueryNode Query(CharSequence field) :
 // {
@@ -272,20 +277,19 @@
 //       }
 //     }
 // }
-  final public QueryNode ModClause(CharSequence field) throws ParseException {
-  QueryNode q;
+  final public 
+QueryNode ModClause(CharSequence field) throws ParseException {QueryNode q;
   ModifierQueryNode.Modifier mods;
     mods = Modifiers();
     q = Clause(field);
-        if (mods != ModifierQueryNode.Modifier.MOD_NONE) {
+if (mods != ModifierQueryNode.Modifier.MOD_NONE) {
            q = new ModifierQueryNode(q, mods);
         }
-        {if (true) return q;}
+        {if ("" != null) return q;}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public QueryNode Clause(CharSequence field) throws ParseException {
-  QueryNode q;
+  final public QueryNode Clause(CharSequence field) throws ParseException {QueryNode q;
   Token fieldToken=null, boost=null, operator=null, term=null;
   FieldQueryNode qLower, qUpper;
   boolean lowerInclusive, upperInclusive;
@@ -293,63 +297,73 @@
   boolean group = false;
     if (jj_2_2(3)) {
       fieldToken = jj_consume_token(TERM);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
       case OP_COLON:
-      case OP_EQUAL:
-        switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-        case OP_COLON:
+      case OP_EQUAL:{
+        switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+        case OP_COLON:{
           jj_consume_token(OP_COLON);
           break;
-        case OP_EQUAL:
+          }
+        case OP_EQUAL:{
           jj_consume_token(OP_EQUAL);
           break;
+          }
         default:
           jj_la1[5] = jj_gen;
           jj_consume_token(-1);
           throw new ParseException();
         }
-                                 field=EscapeQuerySyntaxImpl.discardEscapeChar(fieldToken.image);
+field=EscapeQuerySyntaxImpl.discardEscapeChar(fieldToken.image);
         q = Term(field);
         break;
+        }
       case OP_LESSTHAN:
       case OP_LESSTHANEQ:
       case OP_MORETHAN:
-      case OP_MORETHANEQ:
-        switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-        case OP_LESSTHAN:
+      case OP_MORETHANEQ:{
+        switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+        case OP_LESSTHAN:{
           operator = jj_consume_token(OP_LESSTHAN);
           break;
-        case OP_LESSTHANEQ:
+          }
+        case OP_LESSTHANEQ:{
           operator = jj_consume_token(OP_LESSTHANEQ);
           break;
-        case OP_MORETHAN:
+          }
+        case OP_MORETHAN:{
           operator = jj_consume_token(OP_MORETHAN);
           break;
-        case OP_MORETHANEQ:
+          }
+        case OP_MORETHANEQ:{
           operator = jj_consume_token(OP_MORETHANEQ);
           break;
+          }
         default:
           jj_la1[6] = jj_gen;
           jj_consume_token(-1);
           throw new ParseException();
         }
-                                                                                                               field=EscapeQuerySyntaxImpl.discardEscapeChar(fieldToken.image);
-        switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-        case TERM:
+field=EscapeQuerySyntaxImpl.discardEscapeChar(fieldToken.image);
+        switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+        case TERM:{
           term = jj_consume_token(TERM);
           break;
-        case QUOTED:
+          }
+        case QUOTED:{
           term = jj_consume_token(QUOTED);
           break;
-        case NUMBER:
+          }
+        case NUMBER:{
           term = jj_consume_token(NUMBER);
           break;
+          }
         default:
           jj_la1[7] = jj_gen;
           jj_consume_token(-1);
           throw new ParseException();
         }
-        if (term.kind == QUOTED) {
+if (term.kind == QUOTED) {
             term.image = term.image.substring(1, term.image.length()-1);
         }
         switch (operator.kind) {
@@ -395,75 +409,70 @@
         }
         q = new TermRangeQueryNode(qLower, qUpper, lowerInclusive, upperInclusive);
         break;
+        }
       default:
         jj_la1[8] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
-    } else {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case LPAREN:
+    } else if (jj_2_3(3)) {
+      if (jj_2_1(3)) {
+        fieldToken = jj_consume_token(TERM);
+        switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+        case OP_COLON:{
+          jj_consume_token(OP_COLON);
+          break;
+          }
+        case OP_EQUAL:{
+          jj_consume_token(OP_EQUAL);
+          break;
+          }
+        default:
+          jj_la1[9] = jj_gen;
+          jj_consume_token(-1);
+          throw new ParseException();
+        }
+field=EscapeQuerySyntaxImpl.discardEscapeChar(fieldToken.image);
+      } else {
+        ;
+      }
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
       case QUOTED:
       case TERM:
       case REGEXPTERM:
       case RANGEIN_START:
       case RANGEEX_START:
-      case NUMBER:
-        if (jj_2_1(2)) {
-          fieldToken = jj_consume_token(TERM);
-          switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-          case OP_COLON:
-            jj_consume_token(OP_COLON);
-            break;
-          case OP_EQUAL:
-            jj_consume_token(OP_EQUAL);
-            break;
-          default:
-            jj_la1[9] = jj_gen;
-            jj_consume_token(-1);
-            throw new ParseException();
+      case NUMBER:{
+        q = Term(field);
+        break;
+        }
+      case LPAREN:{
+        jj_consume_token(LPAREN);
+        q = Query(field);
+        jj_consume_token(RPAREN);
+        switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+        case CARAT:{
+          jj_consume_token(CARAT);
+          boost = jj_consume_token(NUMBER);
+          break;
           }
-                                 field=EscapeQuerySyntaxImpl.discardEscapeChar(fieldToken.image);
-        } else {
+        default:
+          jj_la1[10] = jj_gen;
           ;
         }
-        switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-        case QUOTED:
-        case TERM:
-        case REGEXPTERM:
-        case RANGEIN_START:
-        case RANGEEX_START:
-        case NUMBER:
-          q = Term(field);
-          break;
-        case LPAREN:
-          jj_consume_token(LPAREN);
-          q = Query(field);
-          jj_consume_token(RPAREN);
-          switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-          case CARAT:
-            jj_consume_token(CARAT);
-            boost = jj_consume_token(NUMBER);
-            break;
-          default:
-            jj_la1[10] = jj_gen;
-            ;
-          }
-                                                                 group=true;
-          break;
-        default:
-          jj_la1[11] = jj_gen;
-          jj_consume_token(-1);
-          throw new ParseException();
-        }
+group=true;
         break;
+        }
       default:
-        jj_la1[12] = jj_gen;
+        jj_la1[11] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
+    } else {
+      jj_consume_token(-1);
+      throw new ParseException();
     }
-      if (boost != null) {
+if (boost != null) {
       float f = (float)1.0;
       try {
         f = Float.parseFloat(boost.image);
@@ -478,12 +487,11 @@
       }
       }
       if (group) { q = new GroupQueryNode(q);}
-      {if (true) return q;}
+      {if ("" != null) return q;}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public QueryNode Term(CharSequence field) throws ParseException {
-  Token term, boost=null, fuzzySlop=null, goop1, goop2;
+  final public QueryNode Term(CharSequence field) throws ParseException {Token term, boost=null, fuzzySlop=null, goop1, goop2;
   boolean fuzzy = false;
   boolean regexp = false;
   boolean startInc=false;
@@ -491,55 +499,61 @@
   QueryNode q =null;
   FieldQueryNode qLower, qUpper;
   float defaultMinSimilarity = org.apache.lucene.search.FuzzyQuery.defaultMaxEdits;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+    switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
     case TERM:
     case REGEXPTERM:
-    case NUMBER:
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case TERM:
+    case NUMBER:{
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case TERM:{
         term = jj_consume_token(TERM);
-                    q = new FieldQueryNode(field, EscapeQuerySyntaxImpl.discardEscapeChar(term.image), term.beginColumn, term.endColumn);
+q = new FieldQueryNode(field, EscapeQuerySyntaxImpl.discardEscapeChar(term.image), term.beginColumn, term.endColumn);
         break;
-      case REGEXPTERM:
+        }
+      case REGEXPTERM:{
         term = jj_consume_token(REGEXPTERM);
-                             regexp=true;
+regexp=true;
         break;
-      case NUMBER:
+        }
+      case NUMBER:{
         term = jj_consume_token(NUMBER);
         break;
+        }
       default:
-        jj_la1[13] = jj_gen;
+        jj_la1[12] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case FUZZY_SLOP:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case FUZZY_SLOP:{
         fuzzySlop = jj_consume_token(FUZZY_SLOP);
-                                fuzzy=true;
+fuzzy=true;
         break;
+        }
       default:
-        jj_la1[14] = jj_gen;
+        jj_la1[13] = jj_gen;
         ;
       }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case CARAT:{
         jj_consume_token(CARAT);
         boost = jj_consume_token(NUMBER);
-        switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-        case FUZZY_SLOP:
+        switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+        case FUZZY_SLOP:{
           fuzzySlop = jj_consume_token(FUZZY_SLOP);
-                                                         fuzzy=true;
+fuzzy=true;
           break;
+          }
         default:
-          jj_la1[15] = jj_gen;
+          jj_la1[14] = jj_gen;
           ;
         }
         break;
+        }
       default:
-        jj_la1[16] = jj_gen;
+        jj_la1[15] = jj_gen;
         ;
       }
-       if (fuzzy) {
+if (fuzzy) {
            float fms = defaultMinSimilarity;
            try {
             fms = Float.parseFloat(fuzzySlop.image.substring(1));
@@ -555,75 +569,87 @@
          q = new RegexpQueryNode(field, re, 0, re.length());
        }
       break;
+      }
     case RANGEIN_START:
-    case RANGEEX_START:
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGEIN_START:
+    case RANGEEX_START:{
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case RANGEIN_START:{
         jj_consume_token(RANGEIN_START);
-                            startInc=true;
+startInc=true;
         break;
-      case RANGEEX_START:
+        }
+      case RANGEEX_START:{
         jj_consume_token(RANGEEX_START);
         break;
+        }
+      default:
+        jj_la1[16] = jj_gen;
+        jj_consume_token(-1);
+        throw new ParseException();
+      }
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case RANGE_GOOP:{
+        goop1 = jj_consume_token(RANGE_GOOP);
+        break;
+        }
+      case RANGE_QUOTED:{
+        goop1 = jj_consume_token(RANGE_QUOTED);
+        break;
+        }
+      case RANGE_TO:{
+        goop1 = jj_consume_token(RANGE_TO);
+        break;
+        }
       default:
         jj_la1[17] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGE_GOOP:
-        goop1 = jj_consume_token(RANGE_GOOP);
+      jj_consume_token(RANGE_TO);
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case RANGE_GOOP:{
+        goop2 = jj_consume_token(RANGE_GOOP);
         break;
-      case RANGE_QUOTED:
-        goop1 = jj_consume_token(RANGE_QUOTED);
+        }
+      case RANGE_QUOTED:{
+        goop2 = jj_consume_token(RANGE_QUOTED);
         break;
-      case RANGE_TO:
-        goop1 = jj_consume_token(RANGE_TO);
+        }
+      case RANGE_TO:{
+        goop2 = jj_consume_token(RANGE_TO);
         break;
+        }
       default:
         jj_la1[18] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
-      jj_consume_token(RANGE_TO);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGE_GOOP:
-        goop2 = jj_consume_token(RANGE_GOOP);
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case RANGEIN_END:{
+        jj_consume_token(RANGEIN_END);
+endInc=true;
         break;
-      case RANGE_QUOTED:
-        goop2 = jj_consume_token(RANGE_QUOTED);
+        }
+      case RANGEEX_END:{
+        jj_consume_token(RANGEEX_END);
         break;
-      case RANGE_TO:
-        goop2 = jj_consume_token(RANGE_TO);
-        break;
+        }
       default:
         jj_la1[19] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGEIN_END:
-        jj_consume_token(RANGEIN_END);
-                          endInc=true;
-        break;
-      case RANGEEX_END:
-        jj_consume_token(RANGEEX_END);
-        break;
-      default:
-        jj_la1[20] = jj_gen;
-        jj_consume_token(-1);
-        throw new ParseException();
-      }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case CARAT:{
         jj_consume_token(CARAT);
         boost = jj_consume_token(NUMBER);
         break;
+        }
       default:
-        jj_la1[21] = jj_gen;
+        jj_la1[20] = jj_gen;
         ;
       }
-          if (goop1.kind == RANGE_QUOTED) {
+if (goop1.kind == RANGE_QUOTED) {
             goop1.image = goop1.image.substring(1, goop1.image.length()-1);
           }
           if (goop2.kind == RANGE_QUOTED) {
@@ -636,27 +662,30 @@
                                    EscapeQuerySyntaxImpl.discardEscapeChar(goop2.image), goop2.beginColumn, goop2.endColumn);
           q = new TermRangeQueryNode(qLower, qUpper, startInc ? true : false, endInc ? true : false);
       break;
-    case QUOTED:
+      }
+    case QUOTED:{
       term = jj_consume_token(QUOTED);
-                      q = new QuotedFieldQueryNode(field, EscapeQuerySyntaxImpl.discardEscapeChar(term.image.substring(1, term.image.length()-1)), term.beginColumn + 1, term.endColumn - 1);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case FUZZY_SLOP:
+q = new QuotedFieldQueryNode(field, EscapeQuerySyntaxImpl.discardEscapeChar(term.image.substring(1, term.image.length()-1)), term.beginColumn + 1, term.endColumn - 1);
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case FUZZY_SLOP:{
         fuzzySlop = jj_consume_token(FUZZY_SLOP);
         break;
+        }
+      default:
+        jj_la1[21] = jj_gen;
+        ;
+      }
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case CARAT:{
+        jj_consume_token(CARAT);
+        boost = jj_consume_token(NUMBER);
+        break;
+        }
       default:
         jj_la1[22] = jj_gen;
         ;
       }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
-        jj_consume_token(CARAT);
-        boost = jj_consume_token(NUMBER);
-        break;
-      default:
-        jj_la1[23] = jj_gen;
-        ;
-      }
-         int phraseSlop = 0;
+int phraseSlop = 0;
 
          if (fuzzySlop != null) {
            try {
@@ -670,12 +699,13 @@
            }
          }
       break;
+      }
     default:
-      jj_la1[24] = jj_gen;
+      jj_la1[23] = jj_gen;
       jj_consume_token(-1);
       throw new ParseException();
     }
-    if (boost != null) {
+if (boost != null) {
       float f = (float)1.0;
       try {
         f = Float.parseFloat(boost.image);
@@ -689,35 +719,187 @@
            */
       }
     }
-      {if (true) return q;}
+      {if ("" != null) return q;}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  private boolean jj_2_1(int xla) {
+  private boolean jj_2_1(int xla)
+ {
     jj_la = xla; jj_lastpos = jj_scanpos = token;
-    try { return !jj_3_1(); }
+    try { return (!jj_3_1()); }
     catch(LookaheadSuccess ls) { return true; }
     finally { jj_save(0, xla); }
   }
 
-  private boolean jj_2_2(int xla) {
+  private boolean jj_2_2(int xla)
+ {
     jj_la = xla; jj_lastpos = jj_scanpos = token;
-    try { return !jj_3_2(); }
+    try { return (!jj_3_2()); }
     catch(LookaheadSuccess ls) { return true; }
     finally { jj_save(1, xla); }
   }
 
-  private boolean jj_3R_12() {
+  private boolean jj_2_3(int xla)
+ {
+    jj_la = xla; jj_lastpos = jj_scanpos = token;
+    try { return (!jj_3_3()); }
+    catch(LookaheadSuccess ls) { return true; }
+    finally { jj_save(2, xla); }
+  }
+
+  private boolean jj_3R_26()
+ {
+    Token xsp;
+    xsp = jj_scanpos;
+    if (jj_3_2()) {
+    jj_scanpos = xsp;
+    if (jj_3_3()) return true;
+    }
+    return false;
+  }
+
+  private boolean jj_3R_22()
+ {
+    if (jj_scan_token(OR)) return true;
+    return false;
+  }
+
+  private boolean jj_3R_13()
+ {
+    if (jj_3R_21()) return true;
+    Token xsp;
+    while (true) {
+      xsp = jj_scanpos;
+      if (jj_3R_22()) { jj_scanpos = xsp; break; }
+    }
+    return false;
+  }
+
+  private boolean jj_3R_17()
+ {
     if (jj_scan_token(RANGEIN_START)) return true;
     return false;
   }
 
-  private boolean jj_3R_11() {
+  private boolean jj_3R_23()
+ {
+    if (jj_3R_25()) return true;
+    if (jj_3R_26()) return true;
+    return false;
+  }
+
+  private boolean jj_3R_11()
+ {
+    Token xsp;
+    xsp = jj_scanpos;
+    if (jj_3R_17()) {
+    jj_scanpos = xsp;
+    if (jj_scan_token(27)) return true;
+    }
+    xsp = jj_scanpos;
+    if (jj_scan_token(33)) {
+    jj_scanpos = xsp;
+    if (jj_scan_token(32)) {
+    jj_scanpos = xsp;
+    if (jj_scan_token(29)) return true;
+    }
+    }
+    if (jj_scan_token(RANGE_TO)) return true;
+    return false;
+  }
+
+  private boolean jj_3R_30()
+ {
+    if (jj_scan_token(NOT)) return true;
+    return false;
+  }
+
+  private boolean jj_3R_29()
+ {
+    if (jj_scan_token(MINUS)) return true;
+    return false;
+  }
+
+  private boolean jj_3R_24()
+ {
+    if (jj_scan_token(AND)) return true;
+    return false;
+  }
+
+  private boolean jj_3R_7()
+ {
+    if (jj_scan_token(LPAREN)) return true;
+    if (jj_3R_9()) return true;
+    if (jj_scan_token(RPAREN)) return true;
+    return false;
+  }
+
+  private boolean jj_3R_28()
+ {
+    if (jj_scan_token(PLUS)) return true;
+    return false;
+  }
+
+  private boolean jj_3R_27()
+ {
+    Token xsp;
+    xsp = jj_scanpos;
+    if (jj_3R_28()) {
+    jj_scanpos = xsp;
+    if (jj_3R_29()) {
+    jj_scanpos = xsp;
+    if (jj_3R_30()) return true;
+    }
+    }
+    return false;
+  }
+
+  private boolean jj_3R_21()
+ {
+    if (jj_3R_23()) return true;
+    Token xsp;
+    while (true) {
+      xsp = jj_scanpos;
+      if (jj_3R_24()) { jj_scanpos = xsp; break; }
+    }
+    return false;
+  }
+
+  private boolean jj_3R_19()
+ {
+    if (jj_scan_token(CARAT)) return true;
+    if (jj_scan_token(NUMBER)) return true;
+    return false;
+  }
+
+  private boolean jj_3R_6()
+ {
+    if (jj_3R_8()) return true;
+    return false;
+  }
+
+  private boolean jj_3R_18()
+ {
+    if (jj_scan_token(FUZZY_SLOP)) return true;
+    return false;
+  }
+
+  private boolean jj_3R_25()
+ {
+    Token xsp;
+    xsp = jj_scanpos;
+    if (jj_3R_27()) jj_scanpos = xsp;
+    return false;
+  }
+
+  private boolean jj_3R_16()
+ {
     if (jj_scan_token(REGEXPTERM)) return true;
     return false;
   }
 
-  private boolean jj_3_1() {
+  private boolean jj_3_1()
+ {
     if (jj_scan_token(TERM)) return true;
     Token xsp;
     xsp = jj_scanpos;
@@ -728,40 +910,40 @@
     return false;
   }
 
-  private boolean jj_3R_8() {
+  private boolean jj_3R_20()
+ {
+    if (jj_scan_token(CARAT)) return true;
+    if (jj_scan_token(NUMBER)) return true;
+    return false;
+  }
+
+  private boolean jj_3_3()
+ {
     Token xsp;
     xsp = jj_scanpos;
-    if (jj_3R_12()) {
+    if (jj_3_1()) jj_scanpos = xsp;
+    xsp = jj_scanpos;
+    if (jj_3R_6()) {
     jj_scanpos = xsp;
-    if (jj_scan_token(27)) return true;
+    if (jj_3R_7()) return true;
     }
     return false;
   }
 
-  private boolean jj_3R_10() {
+  private boolean jj_3R_15()
+ {
     if (jj_scan_token(TERM)) return true;
     return false;
   }
 
-  private boolean jj_3R_7() {
-    Token xsp;
-    xsp = jj_scanpos;
-    if (jj_3R_10()) {
-    jj_scanpos = xsp;
-    if (jj_3R_11()) {
-    jj_scanpos = xsp;
-    if (jj_scan_token(28)) return true;
-    }
-    }
+  private boolean jj_3R_14()
+ {
+    if (jj_3R_13()) return true;
     return false;
   }
 
-  private boolean jj_3R_9() {
-    if (jj_scan_token(QUOTED)) return true;
-    return false;
-  }
-
-  private boolean jj_3R_5() {
+  private boolean jj_3R_5()
+ {
     Token xsp;
     xsp = jj_scanpos;
     if (jj_scan_token(17)) {
@@ -785,31 +967,60 @@
     return false;
   }
 
-  private boolean jj_3R_4() {
+  private boolean jj_3R_4()
+ {
     Token xsp;
     xsp = jj_scanpos;
     if (jj_scan_token(15)) {
     jj_scanpos = xsp;
     if (jj_scan_token(16)) return true;
     }
-    if (jj_3R_6()) return true;
+    if (jj_3R_8()) return true;
     return false;
   }
 
-  private boolean jj_3R_6() {
+  private boolean jj_3R_10()
+ {
     Token xsp;
     xsp = jj_scanpos;
-    if (jj_3R_7()) {
+    if (jj_3R_15()) {
     jj_scanpos = xsp;
-    if (jj_3R_8()) {
+    if (jj_3R_16()) {
     jj_scanpos = xsp;
-    if (jj_3R_9()) return true;
+    if (jj_scan_token(28)) return true;
     }
     }
+    xsp = jj_scanpos;
+    if (jj_3R_18()) jj_scanpos = xsp;
+    xsp = jj_scanpos;
+    if (jj_3R_19()) jj_scanpos = xsp;
+    return false;
+  }
+
+  private boolean jj_3R_12()
+ {
+    if (jj_scan_token(QUOTED)) return true;
+    Token xsp;
+    xsp = jj_scanpos;
+    if (jj_scan_token(24)) jj_scanpos = xsp;
+    xsp = jj_scanpos;
+    if (jj_3R_20()) jj_scanpos = xsp;
+    return false;
+  }
+
+  private boolean jj_3R_9()
+ {
+    if (jj_3R_13()) return true;
+    Token xsp;
+    while (true) {
+      xsp = jj_scanpos;
+      if (jj_3R_14()) { jj_scanpos = xsp; break; }
+    }
     return false;
   }
 
-  private boolean jj_3_2() {
+  private boolean jj_3_2()
+ {
     if (jj_scan_token(TERM)) return true;
     Token xsp;
     xsp = jj_scanpos;
@@ -820,6 +1031,20 @@
     return false;
   }
 
+  private boolean jj_3R_8()
+ {
+    Token xsp;
+    xsp = jj_scanpos;
+    if (jj_3R_10()) {
+    jj_scanpos = xsp;
+    if (jj_3R_11()) {
+    jj_scanpos = xsp;
+    if (jj_3R_12()) return true;
+    }
+    }
+    return false;
+  }
+
   /** Generated Token Manager. */
   public StandardSyntaxParserTokenManager token_source;
   /** Current token. */
@@ -830,135 +1055,136 @@
   private Token jj_scanpos, jj_lastpos;
   private int jj_la;
   private int jj_gen;
-  final private int[] jj_la1 = new int[25];
+  final private int[] jj_la1 = new int[24];
   static private int[] jj_la1_0;
   static private int[] jj_la1_1;
   static {
-      jj_la1_init_0();
-      jj_la1_init_1();
-   }
-   private static void jj_la1_init_0() {
-      jj_la1_0 = new int[] {0x1c00,0x1c00,0x1ec03c00,0x200,0x100,0x18000,0x1e0000,0x10c00000,0x1f8000,0x18000,0x200000,0x1ec02000,0x1ec02000,0x12800000,0x1000000,0x1000000,0x200000,0xc000000,0x20000000,0x20000000,0xc0000000,0x200000,0x1000000,0x200000,0x1ec00000,};
-   }
-   private static void jj_la1_init_1() {
-      jj_la1_1 = new int[] {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x3,0x3,0x0,0x0,0x0,0x0,0x0,};
-   }
-  final private JJCalls[] jj_2_rtns = new JJCalls[2];
+       jj_la1_init_0();
+       jj_la1_init_1();
+    }
+    private static void jj_la1_init_0() {
+       jj_la1_0 = new int[] {0x1c00,0x1c00,0x1ec03c00,0x200,0x100,0x18000,0x1e0000,0x10c00000,0x1f8000,0x18000,0x200000,0x1ec02000,0x12800000,0x1000000,0x1000000,0x200000,0xc000000,0x20000000,0x20000000,0xc0000000,0x200000,0x1000000,0x200000,0x1ec00000,};
+    }
+    private static void jj_la1_init_1() {
+       jj_la1_1 = new int[] {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x3,0x3,0x0,0x0,0x0,0x0,0x0,};
+    }
+  final private JJCalls[] jj_2_rtns = new JJCalls[3];
   private boolean jj_rescan = false;
   private int jj_gc = 0;
 
   /** Constructor with user supplied CharStream. */
   public StandardSyntaxParser(CharStream stream) {
-    token_source = new StandardSyntaxParserTokenManager(stream);
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 25; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source = new StandardSyntaxParserTokenManager(stream);
+     token = new Token();
+     jj_ntk = -1;
+     jj_gen = 0;
+     for (int i = 0; i < 24; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   /** Reinitialise. */
   public void ReInit(CharStream stream) {
-    token_source.ReInit(stream);
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 25; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source.ReInit(stream);
+     token = new Token();
+     jj_ntk = -1;
+     jj_gen = 0;
+     for (int i = 0; i < 24; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   /** Constructor with generated Token Manager. */
   public StandardSyntaxParser(StandardSyntaxParserTokenManager tm) {
-    token_source = tm;
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 25; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source = tm;
+     token = new Token();
+     jj_ntk = -1;
+     jj_gen = 0;
+     for (int i = 0; i < 24; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   /** Reinitialise. */
   public void ReInit(StandardSyntaxParserTokenManager tm) {
-    token_source = tm;
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 25; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source = tm;
+     token = new Token();
+     jj_ntk = -1;
+     jj_gen = 0;
+     for (int i = 0; i < 24; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   private Token jj_consume_token(int kind) throws ParseException {
-    Token oldToken;
-    if ((oldToken = token).next != null) token = token.next;
-    else token = token.next = token_source.getNextToken();
-    jj_ntk = -1;
-    if (token.kind == kind) {
-      jj_gen++;
-      if (++jj_gc > 100) {
-        jj_gc = 0;
-        for (int i = 0; i < jj_2_rtns.length; i++) {
-          JJCalls c = jj_2_rtns[i];
-          while (c != null) {
-            if (c.gen < jj_gen) c.first = null;
-            c = c.next;
-          }
-        }
-      }
-      return token;
-    }
-    token = oldToken;
-    jj_kind = kind;
-    throw generateParseException();
+     Token oldToken;
+     if ((oldToken = token).next != null) token = token.next;
+     else token = token.next = token_source.getNextToken();
+     jj_ntk = -1;
+     if (token.kind == kind) {
+       jj_gen++;
+       if (++jj_gc > 100) {
+         jj_gc = 0;
+         for (int i = 0; i < jj_2_rtns.length; i++) {
+           JJCalls c = jj_2_rtns[i];
+           while (c != null) {
+             if (c.gen < jj_gen) c.first = null;
+             c = c.next;
+           }
+         }
+       }
+       return token;
+     }
+     token = oldToken;
+     jj_kind = kind;
+     throw generateParseException();
   }
 
+  @SuppressWarnings("serial")
   static private final class LookaheadSuccess extends java.lang.Error { }
   final private LookaheadSuccess jj_ls = new LookaheadSuccess();
   private boolean jj_scan_token(int kind) {
-    if (jj_scanpos == jj_lastpos) {
-      jj_la--;
-      if (jj_scanpos.next == null) {
-        jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.getNextToken();
-      } else {
-        jj_lastpos = jj_scanpos = jj_scanpos.next;
-      }
-    } else {
-      jj_scanpos = jj_scanpos.next;
-    }
-    if (jj_rescan) {
-      int i = 0; Token tok = token;
-      while (tok != null && tok != jj_scanpos) { i++; tok = tok.next; }
-      if (tok != null) jj_add_error_token(kind, i);
-    }
-    if (jj_scanpos.kind != kind) return true;
-    if (jj_la == 0 && jj_scanpos == jj_lastpos) throw jj_ls;
-    return false;
+     if (jj_scanpos == jj_lastpos) {
+       jj_la--;
+       if (jj_scanpos.next == null) {
+         jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.getNextToken();
+       } else {
+         jj_lastpos = jj_scanpos = jj_scanpos.next;
+       }
+     } else {
+       jj_scanpos = jj_scanpos.next;
+     }
+     if (jj_rescan) {
+       int i = 0; Token tok = token;
+       while (tok != null && tok != jj_scanpos) { i++; tok = tok.next; }
+       if (tok != null) jj_add_error_token(kind, i);
+     }
+     if (jj_scanpos.kind != kind) return true;
+     if (jj_la == 0 && jj_scanpos == jj_lastpos) throw jj_ls;
+     return false;
   }
 
 
 /** Get the next Token. */
   final public Token getNextToken() {
-    if (token.next != null) token = token.next;
-    else token = token.next = token_source.getNextToken();
-    jj_ntk = -1;
-    jj_gen++;
-    return token;
+     if (token.next != null) token = token.next;
+     else token = token.next = token_source.getNextToken();
+     jj_ntk = -1;
+     jj_gen++;
+     return token;
   }
 
 /** Get the specific Token. */
   final public Token getToken(int index) {
-    Token t = token;
-    for (int i = 0; i < index; i++) {
-      if (t.next != null) t = t.next;
-      else t = t.next = token_source.getNextToken();
-    }
-    return t;
+     Token t = token;
+     for (int i = 0; i < index; i++) {
+       if (t.next != null) t = t.next;
+       else t = t.next = token_source.getNextToken();
+     }
+     return t;
   }
 
-  private int jj_ntk() {
-    if ((jj_nt=token.next) == null)
-      return (jj_ntk = (token.next=token_source.getNextToken()).kind);
-    else
-      return (jj_ntk = jj_nt.kind);
+  private int jj_ntk_f() {
+     if ((jj_nt=token.next) == null)
+       return (jj_ntk = (token.next=token_source.getNextToken()).kind);
+     else
+       return (jj_ntk = jj_nt.kind);
   }
 
   private java.util.List<int[]> jj_expentries = new java.util.ArrayList<>();
@@ -968,65 +1194,86 @@
   private int jj_endpos;
 
   private void jj_add_error_token(int kind, int pos) {
-    if (pos >= 100) return;
-    if (pos == jj_endpos + 1) {
-      jj_lasttokens[jj_endpos++] = kind;
-    } else if (jj_endpos != 0) {
-      jj_expentry = new int[jj_endpos];
-      for (int i = 0; i < jj_endpos; i++) {
-        jj_expentry[i] = jj_lasttokens[i];
-      }
-      jj_entries_loop: for (java.util.Iterator<?> it = jj_expentries.iterator(); it.hasNext();) {
-        int[] oldentry = (int[])(it.next());
-        if (oldentry.length == jj_expentry.length) {
-          for (int i = 0; i < jj_expentry.length; i++) {
-            if (oldentry[i] != jj_expentry[i]) {
-              continue jj_entries_loop;
-            }
-          }
-          jj_expentries.add(jj_expentry);
-          break jj_entries_loop;
-        }
-      }
-      if (pos != 0) jj_lasttokens[(jj_endpos = pos) - 1] = kind;
-    }
+     if (pos >= 100) {
+        return;
+     }
+
+     if (pos == jj_endpos + 1) {
+       jj_lasttokens[jj_endpos++] = kind;
+     } else if (jj_endpos != 0) {
+       jj_expentry = new int[jj_endpos];
+
+       for (int i = 0; i < jj_endpos; i++) {
+         jj_expentry[i] = jj_lasttokens[i];
+       }
+
+       for (int[] oldentry : jj_expentries) {
+         if (oldentry.length == jj_expentry.length) {
+           boolean isMatched = true;
+
+           for (int i = 0; i < jj_expentry.length; i++) {
+             if (oldentry[i] != jj_expentry[i]) {
+               isMatched = false;
+               break;
+             }
+
+           }
+           if (isMatched) {
+             jj_expentries.add(jj_expentry);
+             break;
+           }
+         }
+       }
+
+       if (pos != 0) {
+         jj_lasttokens[(jj_endpos = pos) - 1] = kind;
+       }
+     }
   }
 
   /** Generate ParseException. */
   public ParseException generateParseException() {
-    jj_expentries.clear();
-    boolean[] la1tokens = new boolean[34];
-    if (jj_kind >= 0) {
-      la1tokens[jj_kind] = true;
-      jj_kind = -1;
-    }
-    for (int i = 0; i < 25; i++) {
-      if (jj_la1[i] == jj_gen) {
-        for (int j = 0; j < 32; j++) {
-          if ((jj_la1_0[i] & (1<<j)) != 0) {
-            la1tokens[j] = true;
-          }
-          if ((jj_la1_1[i] & (1<<j)) != 0) {
-            la1tokens[32+j] = true;
-          }
-        }
-      }
-    }
-    for (int i = 0; i < 34; i++) {
-      if (la1tokens[i]) {
-        jj_expentry = new int[1];
-        jj_expentry[0] = i;
-        jj_expentries.add(jj_expentry);
-      }
-    }
-    jj_endpos = 0;
-    jj_rescan_token();
-    jj_add_error_token(0, 0);
-    int[][] exptokseq = new int[jj_expentries.size()][];
-    for (int i = 0; i < jj_expentries.size(); i++) {
-      exptokseq[i] = jj_expentries.get(i);
-    }
-    return new ParseException(token, exptokseq, tokenImage);
+     jj_expentries.clear();
+     boolean[] la1tokens = new boolean[34];
+     if (jj_kind >= 0) {
+       la1tokens[jj_kind] = true;
+       jj_kind = -1;
+     }
+     for (int i = 0; i < 24; i++) {
+       if (jj_la1[i] == jj_gen) {
+         for (int j = 0; j < 32; j++) {
+           if ((jj_la1_0[i] & (1<<j)) != 0) {
+             la1tokens[j] = true;
+           }
+           if ((jj_la1_1[i] & (1<<j)) != 0) {
+             la1tokens[32+j] = true;
+           }
+         }
+       }
+     }
+     for (int i = 0; i < 34; i++) {
+       if (la1tokens[i]) {
+         jj_expentry = new int[1];
+         jj_expentry[0] = i;
+         jj_expentries.add(jj_expentry);
+       }
+     }
+     jj_endpos = 0;
+     jj_rescan_token();
+     jj_add_error_token(0, 0);
+     int[][] exptokseq = new int[jj_expentries.size()][];
+     for (int i = 0; i < jj_expentries.size(); i++) {
+       exptokseq[i] = jj_expentries.get(i);
+     }
+     return new ParseException(token, exptokseq, tokenImage);
+  }
+
+  private int trace_indent = 0;
+  private boolean trace_enabled;
+
+/** Trace enabled. */
+  final public boolean trace_enabled() {
+     return trace_enabled;
   }
 
   /** Enable tracing. */
@@ -1038,39 +1285,45 @@
   }
 
   private void jj_rescan_token() {
-    jj_rescan = true;
-    for (int i = 0; i < 2; i++) {
-    try {
-      JJCalls p = jj_2_rtns[i];
-      do {
-        if (p.gen > jj_gen) {
-          jj_la = p.arg; jj_lastpos = jj_scanpos = p.first;
-          switch (i) {
-            case 0: jj_3_1(); break;
-            case 1: jj_3_2(); break;
-          }
-        }
-        p = p.next;
-      } while (p != null);
-      } catch(LookaheadSuccess ls) { }
-    }
-    jj_rescan = false;
+     jj_rescan = true;
+     for (int i = 0; i < 3; i++) {
+       try {
+         JJCalls p = jj_2_rtns[i];
+
+         do {
+           if (p.gen > jj_gen) {
+             jj_la = p.arg; jj_lastpos = jj_scanpos = p.first;
+             switch (i) {
+               case 0: jj_3_1(); break;
+               case 1: jj_3_2(); break;
+               case 2: jj_3_3(); break;
+             }
+           }
+           p = p.next;
+         } while (p != null);
+
+         } catch(LookaheadSuccess ls) { }
+     }
+     jj_rescan = false;
   }
 
   private void jj_save(int index, int xla) {
-    JJCalls p = jj_2_rtns[index];
-    while (p.gen > jj_gen) {
-      if (p.next == null) { p = p.next = new JJCalls(); break; }
-      p = p.next;
-    }
-    p.gen = jj_gen + xla - jj_la; p.first = token; p.arg = xla;
+     JJCalls p = jj_2_rtns[index];
+     while (p.gen > jj_gen) {
+       if (p.next == null) { p = p.next = new JJCalls(); break; }
+       p = p.next;
+     }
+
+     p.gen = jj_gen + xla - jj_la; 
+     p.first = token;
+     p.arg = xla;
   }
 
   static final class JJCalls {
-    int gen;
-    Token first;
-    int arg;
-    JJCalls next;
+     int gen;
+     Token first;
+     int arg;
+     JJCalls next;
   }
 
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.jj b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.jj
index 0a60490..69d1084 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.jj
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.jj
@@ -205,7 +205,7 @@
           if (first instanceof ModifierQueryNode) {
             ModifierQueryNode m = (ModifierQueryNode) first;
             if (m.getModifier() == ModifierQueryNode.Modifier.MOD_NOT) {
-              return new BooleanQueryNode(Arrays.<QueryNode> asList(m));
+              return new BooleanQueryNode(Arrays.asList(m));
             }
           }
           return first;
@@ -325,8 +325,7 @@
 }
 {
 (
-  LOOKAHEAD(3)
-  fieldToken=<TERM> (
+  LOOKAHEAD(3) fieldToken=<TERM> (
     ( <OP_COLON> | <OP_EQUAL> ) {field=EscapeQuerySyntaxImpl.discardEscapeChar(fieldToken.image);} q=Term(field)
   | ( operator=<OP_LESSTHAN> | operator=<OP_LESSTHANEQ> | operator=<OP_MORETHAN> | operator=<OP_MORETHANEQ> ) {field=EscapeQuerySyntaxImpl.discardEscapeChar(fieldToken.image);}( term=<TERM> | term=<QUOTED> | term=<NUMBER> )
     {
@@ -377,14 +376,14 @@
         q = new TermRangeQueryNode(qLower, qUpper, lowerInclusive, upperInclusive);
     }
   )
-| [
-    LOOKAHEAD(2)
+  | LOOKAHEAD(3) [
+    LOOKAHEAD(3)
     fieldToken=<TERM>
     ( <OP_COLON> | <OP_EQUAL> ) {field=EscapeQuerySyntaxImpl.discardEscapeChar(fieldToken.image);}
   ]
   (
-   q=Term(field)
-   | <LPAREN> q=Query(field) <RPAREN> (<CARAT> boost=<NUMBER>)? {group=true;}
+   (q=Term(field))
+   | (<LPAREN> q=Query(field) <RPAREN> (<CARAT> boost=<NUMBER>)? {group=true;})
   )
 )
     {
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParserTokenManager.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParserTokenManager.java
index 1fdaa48..bbfa11f 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParserTokenManager.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParserTokenManager.java
@@ -1,3 +1,4 @@
+/* StandardSyntaxParserTokenManager.java */
 /* Generated By:JavaCC: Do not edit this line. StandardSyntaxParserTokenManager.java */
 package org.apache.lucene.queryparser.flexible.standard.parser;
 /*
@@ -17,21 +18,43 @@
  * limitations under the License.
  */
 
-/** Token Manager. */
-public class StandardSyntaxParserTokenManager implements StandardSyntaxParserConstants
-{
 
-  
-private final int jjStopStringLiteralDfa_2(int pos, long active0)
-{
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/** Token Manager. */
+public class StandardSyntaxParserTokenManager implements StandardSyntaxParserConstants {
+
+  /** Debug output. */
+  // (debugStream omitted).
+  /** Set debug output. */
+  // (setDebugStream omitted).
+private final int jjStopStringLiteralDfa_2(int pos, long active0){
    switch (pos)
    {
       default :
          return -1;
    }
 }
-private final int jjStartNfa_2(int pos, long active0)
-{
+private final int jjStartNfa_2(int pos, long active0){
    return jjMoveNfa_2(jjStopStringLiteralDfa_2(pos, active0), pos + 1);
 }
 private int jjStopAtPos(int pos, int kind)
@@ -40,8 +63,7 @@
    jjmatchedPos = pos;
    return pos + 1;
 }
-private int jjMoveStringLiteralDfa0_2()
-{
+private int jjMoveStringLiteralDfa0_2(){
    switch(curChar)
    {
       case 40:
@@ -72,8 +94,7 @@
          return jjMoveNfa_2(0, 0);
    }
 }
-private int jjMoveStringLiteralDfa1_2(long active0)
-{
+private int jjMoveStringLiteralDfa1_2(long active0){
    try { curChar = input_stream.readChar(); }
    catch(java.io.IOException e) {
       jjStopStringLiteralDfa_2(0, active0);
@@ -127,7 +148,7 @@
                   {
                      if (kind > 23)
                         kind = 23;
-                     jjCheckNAddTwoStates(20, 21);
+                     { jjCheckNAddTwoStates(20, 21); }
                   }
                   else if ((0x100002600L & l) != 0L)
                   {
@@ -135,9 +156,9 @@
                         kind = 7;
                   }
                   else if (curChar == 47)
-                     jjCheckNAddStates(0, 2);
+                     { jjCheckNAddStates(0, 2); }
                   else if (curChar == 34)
-                     jjCheckNAddStates(3, 5);
+                     { jjCheckNAddStates(3, 5); }
                   else if (curChar == 33)
                   {
                      if (kind > 10)
@@ -160,14 +181,14 @@
                   break;
                case 14:
                   if (curChar == 34)
-                     jjCheckNAddStates(3, 5);
+                     { jjCheckNAddStates(3, 5); }
                   break;
                case 15:
                   if ((0xfffffffbffffffffL & l) != 0L)
-                     jjCheckNAddStates(3, 5);
+                     { jjCheckNAddStates(3, 5); }
                   break;
                case 17:
-                  jjCheckNAddStates(3, 5);
+                  { jjCheckNAddStates(3, 5); }
                   break;
                case 18:
                   if (curChar == 34 && kind > 22)
@@ -178,46 +199,46 @@
                      break;
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(20, 21);
+                  { jjCheckNAddTwoStates(20, 21); }
                   break;
                case 20:
                   if ((0x8bff7cf8ffffd9ffL & l) == 0L)
                      break;
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(20, 21);
+                  { jjCheckNAddTwoStates(20, 21); }
                   break;
                case 22:
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(20, 21);
+                  { jjCheckNAddTwoStates(20, 21); }
                   break;
                case 25:
                   if ((0x3ff000000000000L & l) == 0L)
                      break;
                   if (kind > 24)
                      kind = 24;
-                  jjAddStates(6, 7);
+                  { jjAddStates(6, 7); }
                   break;
                case 26:
                   if (curChar == 46)
-                     jjCheckNAdd(27);
+                     { jjCheckNAdd(27); }
                   break;
                case 27:
                   if ((0x3ff000000000000L & l) == 0L)
                      break;
                   if (kind > 24)
                      kind = 24;
-                  jjCheckNAdd(27);
+                  { jjCheckNAdd(27); }
                   break;
                case 28:
                case 30:
                   if (curChar == 47)
-                     jjCheckNAddStates(0, 2);
+                     { jjCheckNAddStates(0, 2); }
                   break;
                case 29:
                   if ((0xffff7fffffffffffL & l) != 0L)
-                     jjCheckNAddStates(0, 2);
+                     { jjCheckNAddStates(0, 2); }
                   break;
                case 32:
                   if (curChar == 47 && kind > 25)
@@ -239,7 +260,7 @@
                   {
                      if (kind > 23)
                         kind = 23;
-                     jjCheckNAddTwoStates(20, 21);
+                     { jjCheckNAddTwoStates(20, 21); }
                   }
                   else if (curChar == 126)
                   {
@@ -248,7 +269,7 @@
                      jjstateSet[jjnewStateCnt++] = 25;
                   }
                   else if (curChar == 92)
-                     jjCheckNAdd(22);
+                     { jjCheckNAdd(22); }
                   if (curChar == 78)
                      jjstateSet[jjnewStateCnt++] = 11;
                   else if (curChar == 124)
@@ -300,14 +321,14 @@
                   break;
                case 15:
                   if ((0xffffffffefffffffL & l) != 0L)
-                     jjCheckNAddStates(3, 5);
+                     { jjCheckNAddStates(3, 5); }
                   break;
                case 16:
                   if (curChar == 92)
                      jjstateSet[jjnewStateCnt++] = 17;
                   break;
                case 17:
-                  jjCheckNAddStates(3, 5);
+                  { jjCheckNAddStates(3, 5); }
                   break;
                case 19:
                case 20:
@@ -315,20 +336,20 @@
                      break;
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(20, 21);
+                  { jjCheckNAddTwoStates(20, 21); }
                   break;
                case 21:
                   if (curChar == 92)
-                     jjCheckNAddTwoStates(22, 22);
+                     { jjCheckNAddTwoStates(22, 22); }
                   break;
                case 22:
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(20, 21);
+                  { jjCheckNAddTwoStates(20, 21); }
                   break;
                case 23:
                   if (curChar == 92)
-                     jjCheckNAdd(22);
+                     { jjCheckNAdd(22); }
                   break;
                case 24:
                   if (curChar != 126)
@@ -338,7 +359,7 @@
                   jjstateSet[jjnewStateCnt++] = 25;
                   break;
                case 29:
-                  jjAddStates(0, 2);
+                  { jjAddStates(0, 2); }
                   break;
                case 31:
                   if (curChar == 92)
@@ -350,7 +371,7 @@
       }
       else
       {
-         int hiByte = curChar >> 8;
+         int hiByte = (curChar >> 8);
          int i1 = hiByte >> 6;
          long l1 = 1L << (hiByte & 077);
          int i2 = (curChar & 0xff) >> 6;
@@ -369,13 +390,13 @@
                   {
                      if (kind > 23)
                         kind = 23;
-                     jjCheckNAddTwoStates(20, 21);
+                     { jjCheckNAddTwoStates(20, 21); }
                   }
                   break;
                case 15:
                case 17:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddStates(3, 5);
+                     { jjCheckNAddStates(3, 5); }
                   break;
                case 19:
                case 20:
@@ -383,20 +404,20 @@
                      break;
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(20, 21);
+                  { jjCheckNAddTwoStates(20, 21); }
                   break;
                case 22:
                   if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAddTwoStates(20, 21);
+                  { jjCheckNAddTwoStates(20, 21); }
                   break;
                case 29:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjAddStates(0, 2);
+                     { jjAddStates(0, 2); }
                   break;
-               default : break;
+               default : if (i1 == 0 || l1 == 0 || i2 == 0 ||  l2 == 0) break; else break;
             }
          } while(i != startsAt);
       }
@@ -440,18 +461,18 @@
                      break;
                   if (kind > 28)
                      kind = 28;
-                  jjAddStates(8, 9);
+                  { jjAddStates(8, 9); }
                   break;
                case 1:
                   if (curChar == 46)
-                     jjCheckNAdd(2);
+                     { jjCheckNAdd(2); }
                   break;
                case 2:
                   if ((0x3ff000000000000L & l) == 0L)
                      break;
                   if (kind > 28)
                      kind = 28;
-                  jjCheckNAdd(2);
+                  { jjCheckNAdd(2); }
                   break;
                default : break;
             }
@@ -470,7 +491,7 @@
       }
       else
       {
-         int hiByte = curChar >> 8;
+         int hiByte = (curChar >> 8);
          int i1 = hiByte >> 6;
          long l1 = 1L << (hiByte & 077);
          int i2 = (curChar & 0xff) >> 6;
@@ -479,7 +500,7 @@
          {
             switch(jjstateSet[--i])
             {
-               default : break;
+               default : if (i1 == 0 || l1 == 0 || i2 == 0 ||  l2 == 0) break; else break;
             }
          } while(i != startsAt);
       }
@@ -496,8 +517,7 @@
       catch(java.io.IOException e) { return curPos; }
    }
 }
-private final int jjStopStringLiteralDfa_1(int pos, long active0)
-{
+private final int jjStopStringLiteralDfa_1(int pos, long active0){
    switch (pos)
    {
       case 0:
@@ -511,12 +531,10 @@
          return -1;
    }
 }
-private final int jjStartNfa_1(int pos, long active0)
-{
+private final int jjStartNfa_1(int pos, long active0){
    return jjMoveNfa_1(jjStopStringLiteralDfa_1(pos, active0), pos + 1);
 }
-private int jjMoveStringLiteralDfa0_1()
-{
+private int jjMoveStringLiteralDfa0_1(){
    switch(curChar)
    {
       case 84:
@@ -529,8 +547,7 @@
          return jjMoveNfa_1(0, 0);
    }
 }
-private int jjMoveStringLiteralDfa1_1(long active0)
-{
+private int jjMoveStringLiteralDfa1_1(long active0){
    try { curChar = input_stream.readChar(); }
    catch(java.io.IOException e) {
       jjStopStringLiteralDfa_1(0, active0);
@@ -578,7 +595,7 @@
                   {
                      if (kind > 33)
                         kind = 33;
-                     jjCheckNAdd(6);
+                     { jjCheckNAdd(6); }
                   }
                   if ((0x100002600L & l) != 0L)
                   {
@@ -586,19 +603,19 @@
                         kind = 7;
                   }
                   else if (curChar == 34)
-                     jjCheckNAddTwoStates(2, 4);
+                     { jjCheckNAddTwoStates(2, 4); }
                   break;
                case 1:
                   if (curChar == 34)
-                     jjCheckNAddTwoStates(2, 4);
+                     { jjCheckNAddTwoStates(2, 4); }
                   break;
                case 2:
                   if ((0xfffffffbffffffffL & l) != 0L)
-                     jjCheckNAddStates(10, 12);
+                     { jjCheckNAddStates(10, 12); }
                   break;
                case 3:
                   if (curChar == 34)
-                     jjCheckNAddStates(10, 12);
+                     { jjCheckNAddStates(10, 12); }
                   break;
                case 5:
                   if (curChar == 34 && kind > 32)
@@ -609,7 +626,7 @@
                      break;
                   if (kind > 33)
                      kind = 33;
-                  jjCheckNAdd(6);
+                  { jjCheckNAdd(6); }
                   break;
                default : break;
             }
@@ -628,10 +645,10 @@
                      break;
                   if (kind > 33)
                      kind = 33;
-                  jjCheckNAdd(6);
+                  { jjCheckNAdd(6); }
                   break;
                case 2:
-                  jjAddStates(10, 12);
+                  { jjAddStates(10, 12); }
                   break;
                case 4:
                   if (curChar == 92)
@@ -643,7 +660,7 @@
       }
       else
       {
-         int hiByte = curChar >> 8;
+         int hiByte = (curChar >> 8);
          int i1 = hiByte >> 6;
          long l1 = 1L << (hiByte & 077);
          int i2 = (curChar & 0xff) >> 6;
@@ -662,21 +679,21 @@
                   {
                      if (kind > 33)
                         kind = 33;
-                     jjCheckNAdd(6);
+                     { jjCheckNAdd(6); }
                   }
                   break;
                case 2:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjAddStates(10, 12);
+                     { jjAddStates(10, 12); }
                   break;
                case 6:
                   if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 33)
                      kind = 33;
-                  jjCheckNAdd(6);
+                  { jjCheckNAdd(6); }
                   break;
-               default : break;
+               default : if (i1 == 0 || l1 == 0 || i2 == 0 ||  l2 == 0) break; else break;
             }
          } while(i != startsAt);
       }
@@ -693,6 +710,37 @@
       catch(java.io.IOException e) { return curPos; }
    }
 }
+
+/** Token literal values. */
+public static final String[] jjstrLiteralImages = {
+"", null, null, null, null, null, null, null, null, null, null, "\53", "\55", 
+"\50", "\51", "\72", "\75", "\74", "\74\75", "\76", "\76\75", "\136", null, null, 
+null, null, "\133", "\173", null, "\124\117", "\135", "\175", null, null, };
+protected Token jjFillToken()
+{
+   final Token t;
+   final String curTokenImage;
+   final int beginLine;
+   final int endLine;
+   final int beginColumn;
+   final int endColumn;
+   String im = jjstrLiteralImages[jjmatchedKind];
+   curTokenImage = (im == null) ? input_stream.GetImage() : im;
+   beginLine = input_stream.getBeginLine();
+   beginColumn = input_stream.getBeginColumn();
+   endLine = input_stream.getEndLine();
+   endColumn = input_stream.getEndColumn();
+   t = Token.newToken(jjmatchedKind);
+   t.kind = jjmatchedKind;
+   t.image = curTokenImage;
+
+   t.beginLine = beginLine;
+   t.endLine = endLine;
+   t.beginColumn = beginColumn;
+   t.endColumn = endColumn;
+
+   return t;
+}
 static final int[] jjnextStates = {
    29, 31, 32, 15, 16, 18, 25, 26, 0, 1, 2, 4, 5, 
 };
@@ -733,101 +781,6 @@
    }
 }
 
-/** Token literal values. */
-public static final String[] jjstrLiteralImages = {
-"", null, null, null, null, null, null, null, null, null, null, "\53", "\55", 
-"\50", "\51", "\72", "\75", "\74", "\74\75", "\76", "\76\75", "\136", null, null, 
-null, null, "\133", "\173", null, "\124\117", "\135", "\175", null, null, };
-
-/** Lexer state names. */
-public static final String[] lexStateNames = {
-   "Boost",
-   "Range",
-   "DEFAULT",
-};
-
-/** Lex State array. */
-public static final int[] jjnewLexState = {
-   -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, 
-   -1, 1, 1, 2, -1, 2, 2, -1, -1, 
-};
-static final long[] jjtoToken = {
-   0x3ffffff01L, 
-};
-static final long[] jjtoSkip = {
-   0x80L, 
-};
-protected CharStream input_stream;
-private final int[] jjrounds = new int[33];
-private final int[] jjstateSet = new int[66];
-protected char curChar;
-/** Constructor. */
-public StandardSyntaxParserTokenManager(CharStream stream){
-   input_stream = stream;
-}
-
-/** Constructor. */
-public StandardSyntaxParserTokenManager(CharStream stream, int lexState){
-   this(stream);
-   SwitchTo(lexState);
-}
-
-/** Reinitialise parser. */
-public void ReInit(CharStream stream)
-{
-   jjmatchedPos = jjnewStateCnt = 0;
-   curLexState = defaultLexState;
-   input_stream = stream;
-   ReInitRounds();
-}
-private void ReInitRounds()
-{
-   int i;
-   jjround = 0x80000001;
-   for (i = 33; i-- > 0;)
-      jjrounds[i] = 0x80000000;
-}
-
-/** Reinitialise parser. */
-public void ReInit(CharStream stream, int lexState)
-{
-   ReInit(stream);
-   SwitchTo(lexState);
-}
-
-/** Switch to specified lex state. */
-public void SwitchTo(int lexState)
-{
-   if (lexState >= 3 || lexState < 0)
-      throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE);
-   else
-      curLexState = lexState;
-}
-
-protected Token jjFillToken()
-{
-   final Token t;
-   final String curTokenImage;
-   final int beginLine;
-   final int endLine;
-   final int beginColumn;
-   final int endColumn;
-   String im = jjstrLiteralImages[jjmatchedKind];
-   curTokenImage = (im == null) ? input_stream.GetImage() : im;
-   beginLine = input_stream.getBeginLine();
-   beginColumn = input_stream.getBeginColumn();
-   endLine = input_stream.getEndLine();
-   endColumn = input_stream.getEndColumn();
-   t = Token.newToken(jjmatchedKind, curTokenImage);
-
-   t.beginLine = beginLine;
-   t.endLine = endLine;
-   t.beginColumn = beginColumn;
-   t.endColumn = endColumn;
-
-   return t;
-}
-
 int curLexState = 2;
 int defaultLexState = 2;
 int jjnewStateCnt;
@@ -848,9 +801,10 @@
    {
       curChar = input_stream.BeginToken();
    }
-   catch(java.io.IOException e)
+   catch(Exception e)
    {
       jjmatchedKind = 0;
+      jjmatchedPos = -1;
       matchedToken = jjFillToken();
       return matchedToken;
    }
@@ -914,6 +868,31 @@
   }
 }
 
+void SkipLexicalActions(Token matchedToken)
+{
+   switch(jjmatchedKind)
+   {
+      default :
+         break;
+   }
+}
+void MoreLexicalActions()
+{
+   jjimageLen += (lengthOfMatch = jjmatchedPos + 1);
+   switch(jjmatchedKind)
+   {
+      default :
+         break;
+   }
+}
+void TokenLexicalActions(Token matchedToken)
+{
+   switch(jjmatchedKind)
+   {
+      default :
+         break;
+   }
+}
 private void jjCheckNAdd(int state)
 {
    if (jjrounds[state] != jjround)
@@ -941,4 +920,90 @@
    } while (start++ != end);
 }
 
+    /** Constructor. */
+    public StandardSyntaxParserTokenManager(CharStream stream){
+
+
+    input_stream = stream;
+  }
+
+  /** Constructor. */
+  public StandardSyntaxParserTokenManager (CharStream stream, int lexState){
+    ReInit(stream);
+    SwitchTo(lexState);
+  }
+
+  /** Reinitialise parser. */
+  
+  public void ReInit(CharStream stream)
+  {
+
+
+    jjmatchedPos =
+    jjnewStateCnt =
+    0;
+    curLexState = defaultLexState;
+    input_stream = stream;
+    ReInitRounds();
+  }
+
+  private void ReInitRounds()
+  {
+    int i;
+    jjround = 0x80000001;
+    for (i = 33; i-- > 0;)
+      jjrounds[i] = 0x80000000;
+  }
+
+  /** Reinitialise parser. */
+  public void ReInit(CharStream stream, int lexState)
+  
+  {
+    ReInit(stream);
+    SwitchTo(lexState);
+  }
+
+  /** Switch to specified lex state. */
+  public void SwitchTo(int lexState)
+  {
+    if (lexState >= 3 || lexState < 0)
+      throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE);
+    else
+      curLexState = lexState;
+  }
+
+
+/** Lexer state names. */
+public static final String[] lexStateNames = {
+   "Boost",
+   "Range",
+   "DEFAULT",
+};
+
+/** Lex State array. */
+public static final int[] jjnewLexState = {
+   -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, 
+   -1, 1, 1, 2, -1, 2, 2, -1, -1, 
+};
+static final long[] jjtoToken = {
+   0x3ffffff01L, 
+};
+static final long[] jjtoSkip = {
+   0x80L, 
+};
+static final long[] jjtoSpecial = {
+   0x0L, 
+};
+static final long[] jjtoMore = {
+   0x0L, 
+};
+    protected CharStream  input_stream;
+
+    private final int[] jjrounds = new int[33];
+    private final int[] jjstateSet = new int[2 * 33];
+    private final StringBuilder jjimage = new StringBuilder();
+    private StringBuilder image = jjimage;
+    private int jjimageLen;
+    private int lengthOfMatch;
+    protected int curChar;
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/Token.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/Token.java
index 95e66bb..55c47b3 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/Token.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/Token.java
@@ -1,5 +1,5 @@
-/* Generated By:JavaCC: Do not edit this line. Token.java Version 5.0 */
-/* JavaCCOptions:TOKEN_EXTENDS=,KEEP_LINE_COL=null,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */
+/* Generated By:JavaCC: Do not edit this line. Token.java Version 7.0 */
+/* JavaCCOptions:TOKEN_EXTENDS=,KEEP_LINE_COLUMN=true,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */
 package org.apache.lucene.queryparser.flexible.standard.parser;
 
 /**
@@ -97,6 +97,7 @@
   /**
    * Returns the image.
    */
+  @Override
   public String toString()
   {
     return image;
@@ -128,4 +129,4 @@
   }
 
 }
-/* JavaCC - OriginalChecksum=ea8b1e55950603be28e2f63dcd544ab4 (do not edit this line) */
+/* (filtered)*/
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/TokenMgrError.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/TokenMgrError.java
index e24a62b..5109a03 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/TokenMgrError.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/TokenMgrError.java
@@ -1,4 +1,4 @@
-/* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 5.0 */
+/* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 7.0 */
 /* JavaCCOptions: */
 package org.apache.lucene.queryparser.flexible.standard.parser;
 
@@ -20,22 +20,22 @@
   /**
    * Lexical error occurred.
    */
-  static final int LEXICAL_ERROR = 0;
+  public static final int LEXICAL_ERROR = 0;
 
   /**
    * An attempt was made to create a second instance of a static token manager.
    */
-  static final int STATIC_LEXER_ERROR = 1;
+  public static final int STATIC_LEXER_ERROR = 1;
 
   /**
    * Tried to change to an invalid lexical state.
    */
-  static final int INVALID_LEXICAL_STATE = 2;
+  public static final int INVALID_LEXICAL_STATE = 2;
 
   /**
    * Detected (and bailed out of) an infinite loop in the token manager.
    */
-  static final int LOOP_DETECTED = 3;
+  public static final int LOOP_DETECTED = 3;
 
   /**
    * Indicates the reason why the exception is thrown. It will have
@@ -53,8 +53,6 @@
     for (int i = 0; i < str.length(); i++) {
       switch (str.charAt(i))
       {
-        case 0 :
-          continue;
         case '\b':
           retval.append("\\b");
           continue;
@@ -104,11 +102,12 @@
    *    curchar     : the offending character
    * Note: You can customize the lexical error message by modifying this method.
    */
-  protected static String LexicalError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar) {
+  protected static String LexicalErr(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, int curChar) {
+    char curChar1 = (char)curChar;
     return("Lexical error at line " +
           errorLine + ", column " +
           errorColumn + ".  Encountered: " +
-          (EOFSeen ? "<EOF> " : ("\"" + addEscapes(String.valueOf(curChar)) + "\"") + " (" + (int)curChar + "), ") +
+          (EOFSeen ? "<EOF> " : ("\"" + addEscapes(String.valueOf(curChar1)) + "\"") + " (" + curChar + "), ") +
           "after : \"" + addEscapes(errorAfter) + "\"");
   }
 
@@ -121,6 +120,7 @@
    *
    * from this method for such cases in the release version of your parser.
    */
+  @Override
   public String getMessage() {
     return super.getMessage();
   }
@@ -140,8 +140,8 @@
   }
 
   /** Full Constructor. */
-  public TokenMgrError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar, int reason) {
-    this(LexicalError(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
+  public TokenMgrError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, int curChar, int reason) {
+    this(LexicalErr(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
   }
 }
-/* JavaCC - OriginalChecksum=be88283d82a985d82a34dda46bcf42d5 (do not edit this line) */
+/* (filtered)*/
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/CharStream.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/CharStream.java
index ba060f8..4481de6 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/CharStream.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/CharStream.java
@@ -1,4 +1,4 @@
-/* Generated By:JavaCC: Do not edit this line. CharStream.java Version 5.0 */
+/* Generated By:JavaCC: Do not edit this line. CharStream.java Version 7.0 */
 /* JavaCCOptions:STATIC=false,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */
 package org.apache.lucene.queryparser.surround.parser;
 
@@ -111,5 +111,10 @@
    */
   void Done();
 
+
+  void setTabSize(int i);
+  int getTabSize();
+  boolean getTrackLineColumn();
+  void setTrackLineColumn(boolean trackLineColumn);
 }
-/* JavaCC - OriginalChecksum=242ae59b965491e225a44534cbc73b42 (do not edit this line) */
+/* (filtered)*/
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/FastCharStream.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/FastCharStream.java
index ddd2b73..c49548b 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/FastCharStream.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/FastCharStream.java
@@ -137,4 +137,24 @@
   public final int getBeginLine() {
     return 1;
   }
+
+  @Override
+  public void setTabSize(int i) {
+    throw new RuntimeException("Tab size not implemented.");
+  }
+
+  @Override
+  public int getTabSize() {
+    throw new RuntimeException("Tab size not implemented.");
+  }
+
+  @Override
+  public boolean getTrackLineColumn() {
+    return false;
+  }
+
+  @Override
+  public void setTrackLineColumn(boolean trackLineColumn) {
+    throw new RuntimeException("Line/Column tracking not implemented.");
+  }
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/ParseException.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/ParseException.java
index d49bc79..731cc92 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/ParseException.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/ParseException.java
@@ -1,5 +1,5 @@
-/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 5.0 */
-/* JavaCCOptions:KEEP_LINE_COL=null */
+/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 7.0 */
+/* JavaCCOptions:KEEP_LINE_COLUMN=true */
 package org.apache.lucene.queryparser.surround.parser;
 
 /**
@@ -21,6 +21,11 @@
   private static final long serialVersionUID = 1L;
 
   /**
+   * The end of line string for this machine.
+   */
+  protected static String EOL = System.getProperty("line.separator", "\n");
+
+  /**
    * This constructor is used by the method "generateParseException"
    * in the generated parser.  Calling this constructor generates
    * a new object of this type with the fields "currentToken",
@@ -88,7 +93,7 @@
   private static String initialise(Token currentToken,
                            int[][] expectedTokenSequences,
                            String[] tokenImage) {
-    String eol = System.getProperty("line.separator", "\n");
+
     StringBuilder expected = new StringBuilder();
     int maxSize = 0;
     for (int i = 0; i < expectedTokenSequences.length; i++) {
@@ -101,7 +106,7 @@
       if (expectedTokenSequences[i][expectedTokenSequences[i].length - 1] != 0) {
         expected.append("...");
       }
-      expected.append(eol).append("    ");
+      expected.append(EOL).append("    ");
     }
     String retval = "Encountered \"";
     Token tok = currentToken.next;
@@ -118,20 +123,23 @@
       tok = tok.next;
     }
     retval += "\" at line " + currentToken.next.beginLine + ", column " + currentToken.next.beginColumn;
-    retval += "." + eol;
-    if (expectedTokenSequences.length == 1) {
-      retval += "Was expecting:" + eol + "    ";
+    retval += "." + EOL;
+    
+    
+    if (expectedTokenSequences.length == 0) {
+        // Nothing to add here
     } else {
-      retval += "Was expecting one of:" + eol + "    ";
+        if (expectedTokenSequences.length == 1) {
+          retval += "Was expecting:" + EOL + "    ";
+        } else {
+          retval += "Was expecting one of:" + EOL + "    ";
+        }
+        retval += expected.toString();
     }
-    retval += expected.toString();
+    
     return retval;
   }
 
-  /**
-   * The end of line string for this machine.
-   */
-  protected String eol = System.getProperty("line.separator", "\n");
 
   /**
    * Used to convert raw characters to their escaped version
@@ -144,8 +152,6 @@
       for (int i = 0; i < str.length(); i++) {
         switch (str.charAt(i))
         {
-           case 0 :
-              continue;
            case '\b':
               retval.append("\\b");
               continue;
@@ -184,4 +190,4 @@
    }
 
 }
-/* JavaCC - OriginalChecksum=bd8163f41bf2fd1bb00f025fce3dcaaf (do not edit this line) */
+/* (filtered)*/
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java
index 584a82d..34397d2 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java
@@ -1,3 +1,4 @@
+/* QueryParser.java */
 /* Generated By:JavaCC: Do not edit this line. QueryParser.java */
 package org.apache.lucene.queryparser.surround.parser;
 
@@ -6,6 +7,8 @@
 import java.io.StringReader;
 
 
+
+
 import org.apache.lucene.queryparser.surround.query.SrndQuery;
 import org.apache.lucene.queryparser.surround.query.FieldsQuery;
 import org.apache.lucene.queryparser.surround.query.OrQuery;
@@ -149,25 +152,22 @@
     return new SrndTruncQuery(truncated, TRUNCATOR, ANY_CHAR);
   }
 
-  final public SrndQuery TopSrndQuery() throws ParseException {
-  SrndQuery q;
+  final public SrndQuery TopSrndQuery() throws ParseException {SrndQuery q;
     q = FieldsQuery();
     jj_consume_token(0);
-   {if (true) return q;}
+{if ("" != null) return q;}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public SrndQuery FieldsQuery() throws ParseException {
-  SrndQuery q;
+  final public SrndQuery FieldsQuery() throws ParseException {SrndQuery q;
   ArrayList<String> fieldNames;
     fieldNames = OptionalFields();
     q = OrQuery();
-   {if (true) return (fieldNames == null) ? q : getFieldsQuery(q, fieldNames);}
+{if ("" != null) return (fieldNames == null) ? q : getFieldsQuery(q, fieldNames);}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public ArrayList<String> OptionalFields() throws ParseException {
-  Token fieldName;
+  final public ArrayList<String> OptionalFields() throws ParseException {Token fieldName;
   ArrayList<String> fieldNames = null;
     label_1:
     while (true) {
@@ -179,304 +179,313 @@
       // to the colon
           fieldName = jj_consume_token(TERM);
       jj_consume_token(COLON);
-      if (fieldNames == null) {
+if (fieldNames == null) {
         fieldNames = new ArrayList<String>();
       }
       fieldNames.add(fieldName.image);
     }
-   {if (true) return fieldNames;}
+{if ("" != null) return fieldNames;}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public SrndQuery OrQuery() throws ParseException {
-  SrndQuery q;
+  final public SrndQuery OrQuery() throws ParseException {SrndQuery q;
   ArrayList<SrndQuery> queries = null;
   Token oprt = null;
     q = AndQuery();
     label_2:
     while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case OR:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case OR:{
         ;
         break;
+        }
       default:
         jj_la1[0] = jj_gen;
         break label_2;
       }
       oprt = jj_consume_token(OR);
-                  /* keep only last used operator */
+/* keep only last used operator */
       if (queries == null) {
         queries = new ArrayList<SrndQuery>();
         queries.add(q);
       }
       q = AndQuery();
-      queries.add(q);
+queries.add(q);
     }
-   {if (true) return (queries == null) ? q : getOrQuery(queries, true /* infix */, oprt);}
+{if ("" != null) return (queries == null) ? q : getOrQuery(queries, true /* infix */, oprt);}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public SrndQuery AndQuery() throws ParseException {
-  SrndQuery q;
+  final public SrndQuery AndQuery() throws ParseException {SrndQuery q;
   ArrayList<SrndQuery> queries = null;
   Token oprt = null;
     q = NotQuery();
     label_3:
     while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case AND:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case AND:{
         ;
         break;
+        }
       default:
         jj_la1[1] = jj_gen;
         break label_3;
       }
       oprt = jj_consume_token(AND);
-                   /* keep only last used operator */
+/* keep only last used operator */
       if (queries == null) {
         queries = new ArrayList<SrndQuery>();
         queries.add(q);
       }
       q = NotQuery();
-      queries.add(q);
+queries.add(q);
     }
-   {if (true) return (queries == null) ? q : getAndQuery(queries, true /* infix */, oprt);}
+{if ("" != null) return (queries == null) ? q : getAndQuery(queries, true /* infix */, oprt);}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public SrndQuery NotQuery() throws ParseException {
-  SrndQuery q;
+  final public SrndQuery NotQuery() throws ParseException {SrndQuery q;
   ArrayList<SrndQuery> queries = null;
   Token oprt = null;
     q = NQuery();
     label_4:
     while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case NOT:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case NOT:{
         ;
         break;
+        }
       default:
         jj_la1[2] = jj_gen;
         break label_4;
       }
       oprt = jj_consume_token(NOT);
-                    /* keep only last used operator */
+/* keep only last used operator */
       if (queries == null) {
         queries = new ArrayList<SrndQuery>();
         queries.add(q);
       }
       q = NQuery();
-      queries.add(q);
+queries.add(q);
     }
-   {if (true) return (queries == null) ? q : getNotQuery(queries, oprt);}
+{if ("" != null) return (queries == null) ? q : getNotQuery(queries, oprt);}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public SrndQuery NQuery() throws ParseException {
-  SrndQuery q;
+  final public SrndQuery NQuery() throws ParseException {SrndQuery q;
   ArrayList<SrndQuery> queries;
   Token dt;
     q = WQuery();
     label_5:
     while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case N:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case N:{
         ;
         break;
+        }
       default:
         jj_la1[3] = jj_gen;
         break label_5;
       }
       dt = jj_consume_token(N);
-      queries = new ArrayList<SrndQuery>();
+queries = new ArrayList<SrndQuery>();
       queries.add(q); /* left associative */
 
       q = WQuery();
-      queries.add(q);
+queries.add(q);
       q = getDistanceQuery(queries, true /* infix */, dt, false /* not ordered */);
     }
-   {if (true) return q;}
+{if ("" != null) return q;}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public SrndQuery WQuery() throws ParseException {
-  SrndQuery q;
+  final public SrndQuery WQuery() throws ParseException {SrndQuery q;
   ArrayList<SrndQuery> queries;
   Token wt;
     q = PrimaryQuery();
     label_6:
     while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case W:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case W:{
         ;
         break;
+        }
       default:
         jj_la1[4] = jj_gen;
         break label_6;
       }
       wt = jj_consume_token(W);
-      queries = new ArrayList<SrndQuery>();
+queries = new ArrayList<SrndQuery>();
       queries.add(q); /* left associative */
 
       q = PrimaryQuery();
-      queries.add(q);
+queries.add(q);
       q = getDistanceQuery(queries, true /* infix */, wt, true /* ordered */);
     }
-   {if (true) return q;}
+{if ("" != null) return q;}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public SrndQuery PrimaryQuery() throws ParseException {
-                             /* bracketed weighted query or weighted term */
+  final public SrndQuery PrimaryQuery() throws ParseException {/* bracketed weighted query or weighted term */
   SrndQuery q;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-    case LPAREN:
+    switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+    case LPAREN:{
       jj_consume_token(LPAREN);
       q = FieldsQuery();
       jj_consume_token(RPAREN);
       break;
+      }
     case OR:
     case AND:
     case W:
-    case N:
+    case N:{
       q = PrefixOperatorQuery();
       break;
+      }
     case TRUNCQUOTED:
     case QUOTED:
     case SUFFIXTERM:
     case TRUNCTERM:
-    case TERM:
+    case TERM:{
       q = SimpleTerm();
       break;
+      }
     default:
       jj_la1[5] = jj_gen;
       jj_consume_token(-1);
       throw new ParseException();
     }
     OptionalWeights(q);
-   {if (true) return q;}
+{if ("" != null) return q;}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public SrndQuery PrefixOperatorQuery() throws ParseException {
-  Token oprt;
+  final public SrndQuery PrefixOperatorQuery() throws ParseException {Token oprt;
   List<SrndQuery> queries;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-    case OR:
+    switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+    case OR:{
       oprt = jj_consume_token(OR);
       /* prefix OR */
           queries = FieldsQueryList();
-     {if (true) return getOrQuery(queries, false /* not infix */, oprt);}
+{if ("" != null) return getOrQuery(queries, false /* not infix */, oprt);}
       break;
-    case AND:
+      }
+    case AND:{
       oprt = jj_consume_token(AND);
       /* prefix AND */
           queries = FieldsQueryList();
-     {if (true) return getAndQuery(queries, false /* not infix */, oprt);}
+{if ("" != null) return getAndQuery(queries, false /* not infix */, oprt);}
       break;
-    case N:
+      }
+    case N:{
       oprt = jj_consume_token(N);
       /* prefix N */
           queries = FieldsQueryList();
-     {if (true) return getDistanceQuery(queries, false /* not infix */, oprt, false /* not ordered */);}
+{if ("" != null) return getDistanceQuery(queries, false /* not infix */, oprt, false /* not ordered */);}
       break;
-    case W:
+      }
+    case W:{
       oprt = jj_consume_token(W);
       /* prefix W */
           queries = FieldsQueryList();
-     {if (true) return getDistanceQuery(queries, false  /* not infix */, oprt, true /* ordered */);}
+{if ("" != null) return getDistanceQuery(queries, false  /* not infix */, oprt, true /* ordered */);}
       break;
+      }
     default:
       jj_la1[6] = jj_gen;
       jj_consume_token(-1);
       throw new ParseException();
     }
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public List<SrndQuery> FieldsQueryList() throws ParseException {
-  SrndQuery q;
+  final public List<SrndQuery> FieldsQueryList() throws ParseException {SrndQuery q;
   ArrayList<SrndQuery> queries = new ArrayList<SrndQuery>();
     jj_consume_token(LPAREN);
     q = FieldsQuery();
-                     queries.add(q);
+queries.add(q);
     label_7:
     while (true) {
       jj_consume_token(COMMA);
       q = FieldsQuery();
-                              queries.add(q);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case COMMA:
+queries.add(q);
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case COMMA:{
         ;
         break;
+        }
       default:
         jj_la1[7] = jj_gen;
         break label_7;
       }
     }
     jj_consume_token(RPAREN);
-   {if (true) return queries;}
+{if ("" != null) return queries;}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public SrndQuery SimpleTerm() throws ParseException {
-  Token term;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-    case TERM:
+  final public SrndQuery SimpleTerm() throws ParseException {Token term;
+    switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+    case TERM:{
       term = jj_consume_token(TERM);
-     {if (true) return getTermQuery(term.image, false /* not quoted */);}
+{if ("" != null) return getTermQuery(term.image, false /* not quoted */);}
       break;
-    case QUOTED:
+      }
+    case QUOTED:{
       term = jj_consume_token(QUOTED);
-     {if (true) return getTermQuery(term.image.substring(1, term.image.length()-1), true /* quoted */);}
+{if ("" != null) return getTermQuery(term.image.substring(1, term.image.length()-1), true /* quoted */);}
       break;
-    case SUFFIXTERM:
+      }
+    case SUFFIXTERM:{
       term = jj_consume_token(SUFFIXTERM);
-                        /* ending in * */
+/* ending in * */
       if (! allowedSuffix(term.image)) {
         {if (true) throw new ParseException(TRUNCATION_ERROR_MESSAGE + term.image);}
       }
-      {if (true) return getPrefixQuery(term.image.substring(0, term.image.length()-1), false /* not quoted */);}
+      {if ("" != null) return getPrefixQuery(term.image.substring(0, term.image.length()-1), false /* not quoted */);}
       break;
-    case TRUNCTERM:
+      }
+    case TRUNCTERM:{
       term = jj_consume_token(TRUNCTERM);
-                       /* with at least one * or ? */
+/* with at least one * or ? */
       if (! allowedTruncation(term.image)) {
         {if (true) throw new ParseException(TRUNCATION_ERROR_MESSAGE + term.image);}
       }
-      {if (true) return getTruncQuery(term.image);}
+      {if ("" != null) return getTruncQuery(term.image);}
       break;
-    case TRUNCQUOTED:
+      }
+    case TRUNCQUOTED:{
       term = jj_consume_token(TRUNCQUOTED);
-                         /* eg. "9b-b,m"* */
+/* eg. "9b-b,m"* */
       if ((term.image.length() - 3) < MINIMUM_PREFIX_LENGTH) {
         {if (true) throw new ParseException(TRUNCATION_ERROR_MESSAGE + term.image);}
       }
-      {if (true) return getPrefixQuery(term.image.substring(1, term.image.length()-2), true /* quoted */);}
+      {if ("" != null) return getPrefixQuery(term.image.substring(1, term.image.length()-2), true /* quoted */);}
       break;
+      }
     default:
       jj_la1[8] = jj_gen;
       jj_consume_token(-1);
       throw new ParseException();
     }
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public void OptionalWeights(SrndQuery q) throws ParseException {
-  Token weight=null;
+  final public void OptionalWeights(SrndQuery q) throws ParseException {Token weight=null;
     label_8:
     while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case CARAT:{
         ;
         break;
+        }
       default:
         jj_la1[9] = jj_gen;
         break label_8;
       }
       jj_consume_token(CARAT);
       weight = jj_consume_token(NUMBER);
-      float f;
+float f;
       try {
         f = Float.parseFloat(weight.image);
       } catch (Exception floatExc) {
@@ -488,16 +497,18 @@
       q.setWeight(f * q.getWeight()); /* left associative, fwiw */
 
     }
-  }
+}
 
-  private boolean jj_2_1(int xla) {
+  private boolean jj_2_1(int xla)
+ {
     jj_la = xla; jj_lastpos = jj_scanpos = token;
-    try { return !jj_3_1(); }
+    try { return (!jj_3_1()); }
     catch(LookaheadSuccess ls) { return true; }
     finally { jj_save(0, xla); }
   }
 
-  private boolean jj_3_1() {
+  private boolean jj_3_1()
+ {
     if (jj_scan_token(TERM)) return true;
     if (jj_scan_token(COLON)) return true;
     return false;
@@ -516,127 +527,128 @@
   final private int[] jj_la1 = new int[10];
   static private int[] jj_la1_0;
   static {
-      jj_la1_init_0();
-   }
-   private static void jj_la1_init_0() {
-      jj_la1_0 = new int[] {0x100,0x200,0x400,0x1000,0x800,0x7c3b00,0x1b00,0x8000,0x7c0000,0x20000,};
-   }
+       jj_la1_init_0();
+    }
+    private static void jj_la1_init_0() {
+       jj_la1_0 = new int[] {0x100,0x200,0x400,0x1000,0x800,0x7c3b00,0x1b00,0x8000,0x7c0000,0x20000,};
+    }
   final private JJCalls[] jj_2_rtns = new JJCalls[1];
   private boolean jj_rescan = false;
   private int jj_gc = 0;
 
   /** Constructor with user supplied CharStream. */
   public QueryParser(CharStream stream) {
-    token_source = new QueryParserTokenManager(stream);
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 10; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source = new QueryParserTokenManager(stream);
+     token = new Token();
+     jj_ntk = -1;
+     jj_gen = 0;
+     for (int i = 0; i < 10; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   /** Reinitialise. */
   public void ReInit(CharStream stream) {
-    token_source.ReInit(stream);
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 10; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source.ReInit(stream);
+     token = new Token();
+     jj_ntk = -1;
+     jj_gen = 0;
+     for (int i = 0; i < 10; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   /** Constructor with generated Token Manager. */
   public QueryParser(QueryParserTokenManager tm) {
-    token_source = tm;
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 10; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source = tm;
+     token = new Token();
+     jj_ntk = -1;
+     jj_gen = 0;
+     for (int i = 0; i < 10; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   /** Reinitialise. */
   public void ReInit(QueryParserTokenManager tm) {
-    token_source = tm;
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 10; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source = tm;
+     token = new Token();
+     jj_ntk = -1;
+     jj_gen = 0;
+     for (int i = 0; i < 10; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   private Token jj_consume_token(int kind) throws ParseException {
-    Token oldToken;
-    if ((oldToken = token).next != null) token = token.next;
-    else token = token.next = token_source.getNextToken();
-    jj_ntk = -1;
-    if (token.kind == kind) {
-      jj_gen++;
-      if (++jj_gc > 100) {
-        jj_gc = 0;
-        for (int i = 0; i < jj_2_rtns.length; i++) {
-          JJCalls c = jj_2_rtns[i];
-          while (c != null) {
-            if (c.gen < jj_gen) c.first = null;
-            c = c.next;
-          }
-        }
-      }
-      return token;
-    }
-    token = oldToken;
-    jj_kind = kind;
-    throw generateParseException();
+     Token oldToken;
+     if ((oldToken = token).next != null) token = token.next;
+     else token = token.next = token_source.getNextToken();
+     jj_ntk = -1;
+     if (token.kind == kind) {
+       jj_gen++;
+       if (++jj_gc > 100) {
+         jj_gc = 0;
+         for (int i = 0; i < jj_2_rtns.length; i++) {
+           JJCalls c = jj_2_rtns[i];
+           while (c != null) {
+             if (c.gen < jj_gen) c.first = null;
+             c = c.next;
+           }
+         }
+       }
+       return token;
+     }
+     token = oldToken;
+     jj_kind = kind;
+     throw generateParseException();
   }
 
+  @SuppressWarnings("serial")
   static private final class LookaheadSuccess extends java.lang.Error { }
   final private LookaheadSuccess jj_ls = new LookaheadSuccess();
   private boolean jj_scan_token(int kind) {
-    if (jj_scanpos == jj_lastpos) {
-      jj_la--;
-      if (jj_scanpos.next == null) {
-        jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.getNextToken();
-      } else {
-        jj_lastpos = jj_scanpos = jj_scanpos.next;
-      }
-    } else {
-      jj_scanpos = jj_scanpos.next;
-    }
-    if (jj_rescan) {
-      int i = 0; Token tok = token;
-      while (tok != null && tok != jj_scanpos) { i++; tok = tok.next; }
-      if (tok != null) jj_add_error_token(kind, i);
-    }
-    if (jj_scanpos.kind != kind) return true;
-    if (jj_la == 0 && jj_scanpos == jj_lastpos) throw jj_ls;
-    return false;
+     if (jj_scanpos == jj_lastpos) {
+       jj_la--;
+       if (jj_scanpos.next == null) {
+         jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.getNextToken();
+       } else {
+         jj_lastpos = jj_scanpos = jj_scanpos.next;
+       }
+     } else {
+       jj_scanpos = jj_scanpos.next;
+     }
+     if (jj_rescan) {
+       int i = 0; Token tok = token;
+       while (tok != null && tok != jj_scanpos) { i++; tok = tok.next; }
+       if (tok != null) jj_add_error_token(kind, i);
+     }
+     if (jj_scanpos.kind != kind) return true;
+     if (jj_la == 0 && jj_scanpos == jj_lastpos) throw jj_ls;
+     return false;
   }
 
 
 /** Get the next Token. */
   final public Token getNextToken() {
-    if (token.next != null) token = token.next;
-    else token = token.next = token_source.getNextToken();
-    jj_ntk = -1;
-    jj_gen++;
-    return token;
+     if (token.next != null) token = token.next;
+     else token = token.next = token_source.getNextToken();
+     jj_ntk = -1;
+     jj_gen++;
+     return token;
   }
 
 /** Get the specific Token. */
   final public Token getToken(int index) {
-    Token t = token;
-    for (int i = 0; i < index; i++) {
-      if (t.next != null) t = t.next;
-      else t = t.next = token_source.getNextToken();
-    }
-    return t;
+     Token t = token;
+     for (int i = 0; i < index; i++) {
+       if (t.next != null) t = t.next;
+       else t = t.next = token_source.getNextToken();
+     }
+     return t;
   }
 
-  private int jj_ntk() {
-    if ((jj_nt=token.next) == null)
-      return (jj_ntk = (token.next=token_source.getNextToken()).kind);
-    else
-      return (jj_ntk = jj_nt.kind);
+  private int jj_ntk_f() {
+     if ((jj_nt=token.next) == null)
+       return (jj_ntk = (token.next=token_source.getNextToken()).kind);
+     else
+       return (jj_ntk = jj_nt.kind);
   }
 
   private java.util.List<int[]> jj_expentries = new java.util.ArrayList<>();
@@ -646,62 +658,83 @@
   private int jj_endpos;
 
   private void jj_add_error_token(int kind, int pos) {
-    if (pos >= 100) return;
-    if (pos == jj_endpos + 1) {
-      jj_lasttokens[jj_endpos++] = kind;
-    } else if (jj_endpos != 0) {
-      jj_expentry = new int[jj_endpos];
-      for (int i = 0; i < jj_endpos; i++) {
-        jj_expentry[i] = jj_lasttokens[i];
-      }
-      jj_entries_loop: for (java.util.Iterator<?> it = jj_expentries.iterator(); it.hasNext();) {
-        int[] oldentry = (int[])(it.next());
-        if (oldentry.length == jj_expentry.length) {
-          for (int i = 0; i < jj_expentry.length; i++) {
-            if (oldentry[i] != jj_expentry[i]) {
-              continue jj_entries_loop;
-            }
-          }
-          jj_expentries.add(jj_expentry);
-          break jj_entries_loop;
-        }
-      }
-      if (pos != 0) jj_lasttokens[(jj_endpos = pos) - 1] = kind;
-    }
+     if (pos >= 100) {
+        return;
+     }
+
+     if (pos == jj_endpos + 1) {
+       jj_lasttokens[jj_endpos++] = kind;
+     } else if (jj_endpos != 0) {
+       jj_expentry = new int[jj_endpos];
+
+       for (int i = 0; i < jj_endpos; i++) {
+         jj_expentry[i] = jj_lasttokens[i];
+       }
+
+       for (int[] oldentry : jj_expentries) {
+         if (oldentry.length == jj_expentry.length) {
+           boolean isMatched = true;
+
+           for (int i = 0; i < jj_expentry.length; i++) {
+             if (oldentry[i] != jj_expentry[i]) {
+               isMatched = false;
+               break;
+             }
+
+           }
+           if (isMatched) {
+             jj_expentries.add(jj_expentry);
+             break;
+           }
+         }
+       }
+
+       if (pos != 0) {
+         jj_lasttokens[(jj_endpos = pos) - 1] = kind;
+       }
+     }
   }
 
   /** Generate ParseException. */
   public ParseException generateParseException() {
-    jj_expentries.clear();
-    boolean[] la1tokens = new boolean[24];
-    if (jj_kind >= 0) {
-      la1tokens[jj_kind] = true;
-      jj_kind = -1;
-    }
-    for (int i = 0; i < 10; i++) {
-      if (jj_la1[i] == jj_gen) {
-        for (int j = 0; j < 32; j++) {
-          if ((jj_la1_0[i] & (1<<j)) != 0) {
-            la1tokens[j] = true;
-          }
-        }
-      }
-    }
-    for (int i = 0; i < 24; i++) {
-      if (la1tokens[i]) {
-        jj_expentry = new int[1];
-        jj_expentry[0] = i;
-        jj_expentries.add(jj_expentry);
-      }
-    }
-    jj_endpos = 0;
-    jj_rescan_token();
-    jj_add_error_token(0, 0);
-    int[][] exptokseq = new int[jj_expentries.size()][];
-    for (int i = 0; i < jj_expentries.size(); i++) {
-      exptokseq[i] = jj_expentries.get(i);
-    }
-    return new ParseException(token, exptokseq, tokenImage);
+     jj_expentries.clear();
+     boolean[] la1tokens = new boolean[24];
+     if (jj_kind >= 0) {
+       la1tokens[jj_kind] = true;
+       jj_kind = -1;
+     }
+     for (int i = 0; i < 10; i++) {
+       if (jj_la1[i] == jj_gen) {
+         for (int j = 0; j < 32; j++) {
+           if ((jj_la1_0[i] & (1<<j)) != 0) {
+             la1tokens[j] = true;
+           }
+         }
+       }
+     }
+     for (int i = 0; i < 24; i++) {
+       if (la1tokens[i]) {
+         jj_expentry = new int[1];
+         jj_expentry[0] = i;
+         jj_expentries.add(jj_expentry);
+       }
+     }
+     jj_endpos = 0;
+     jj_rescan_token();
+     jj_add_error_token(0, 0);
+     int[][] exptokseq = new int[jj_expentries.size()][];
+     for (int i = 0; i < jj_expentries.size(); i++) {
+       exptokseq[i] = jj_expentries.get(i);
+     }
+     return new ParseException(token, exptokseq, tokenImage);
+  }
+
+  private int trace_indent = 0;
+  private boolean trace_enabled;
+
+/** Trace enabled. */
+  final public boolean trace_enabled() {
+     return trace_enabled;
   }
 
   /** Enable tracing. */
@@ -713,38 +746,43 @@
   }
 
   private void jj_rescan_token() {
-    jj_rescan = true;
-    for (int i = 0; i < 1; i++) {
-    try {
-      JJCalls p = jj_2_rtns[i];
-      do {
-        if (p.gen > jj_gen) {
-          jj_la = p.arg; jj_lastpos = jj_scanpos = p.first;
-          switch (i) {
-            case 0: jj_3_1(); break;
-          }
-        }
-        p = p.next;
-      } while (p != null);
-      } catch(LookaheadSuccess ls) { }
-    }
-    jj_rescan = false;
+     jj_rescan = true;
+     for (int i = 0; i < 1; i++) {
+       try {
+         JJCalls p = jj_2_rtns[i];
+
+         do {
+           if (p.gen > jj_gen) {
+             jj_la = p.arg; jj_lastpos = jj_scanpos = p.first;
+             switch (i) {
+               case 0: jj_3_1(); break;
+             }
+           }
+           p = p.next;
+         } while (p != null);
+
+         } catch(LookaheadSuccess ls) { }
+     }
+     jj_rescan = false;
   }
 
   private void jj_save(int index, int xla) {
-    JJCalls p = jj_2_rtns[index];
-    while (p.gen > jj_gen) {
-      if (p.next == null) { p = p.next = new JJCalls(); break; }
-      p = p.next;
-    }
-    p.gen = jj_gen + xla - jj_la; p.first = token; p.arg = xla;
+     JJCalls p = jj_2_rtns[index];
+     while (p.gen > jj_gen) {
+       if (p.next == null) { p = p.next = new JJCalls(); break; }
+       p = p.next;
+     }
+
+     p.gen = jj_gen + xla - jj_la; 
+     p.first = token;
+     p.arg = xla;
   }
 
   static final class JJCalls {
-    int gen;
-    Token first;
-    int arg;
-    JJCalls next;
+     int gen;
+     Token first;
+     int arg;
+     JJCalls next;
   }
 
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParserTokenManager.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParserTokenManager.java
index f51a03e..0f0d30c 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParserTokenManager.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParserTokenManager.java
@@ -1,20 +1,35 @@
+/* QueryParserTokenManager.java */
 /* Generated By:JavaCC: Do not edit this line. QueryParserTokenManager.java */
 package org.apache.lucene.queryparser.surround.parser;
-/** Token Manager. */
-public class QueryParserTokenManager implements QueryParserConstants
-{
 
-  
-private final int jjStopStringLiteralDfa_1(int pos, long active0)
-{
+
+
+
+
+
+
+
+
+
+
+
+
+
+/** Token Manager. */
+public class QueryParserTokenManager implements QueryParserConstants {
+
+  /** Debug output. */
+  // (debugStream omitted).
+  /** Set debug output. */
+  // (setDebugStream omitted).
+private final int jjStopStringLiteralDfa_1(int pos, long active0){
    switch (pos)
    {
       default :
          return -1;
    }
 }
-private final int jjStartNfa_1(int pos, long active0)
-{
+private final int jjStartNfa_1(int pos, long active0){
    return jjMoveNfa_1(jjStopStringLiteralDfa_1(pos, active0), pos + 1);
 }
 private int jjStopAtPos(int pos, int kind)
@@ -23,8 +38,7 @@
    jjmatchedPos = pos;
    return pos + 1;
 }
-private int jjMoveStringLiteralDfa0_1()
-{
+private int jjMoveStringLiteralDfa0_1(){
    switch(curChar)
    {
       case 40:
@@ -70,7 +84,7 @@
                   {
                      if (kind > 22)
                         kind = 22;
-                     jjCheckNAddStates(0, 4);
+                     { jjCheckNAddStates(0, 4); }
                   }
                   else if ((0x100002600L & l) != 0L)
                   {
@@ -78,35 +92,35 @@
                         kind = 7;
                   }
                   else if (curChar == 34)
-                     jjCheckNAddStates(5, 7);
+                     { jjCheckNAddStates(5, 7); }
                   if ((0x3fc000000000000L & l) != 0L)
-                     jjCheckNAddStates(8, 11);
+                     { jjCheckNAddStates(8, 11); }
                   else if (curChar == 49)
-                     jjCheckNAddTwoStates(20, 21);
+                     { jjCheckNAddTwoStates(20, 21); }
                   break;
                case 19:
                   if ((0x3fc000000000000L & l) != 0L)
-                     jjCheckNAddStates(8, 11);
+                     { jjCheckNAddStates(8, 11); }
                   break;
                case 20:
                   if ((0x3ff000000000000L & l) != 0L)
-                     jjCheckNAdd(17);
+                     { jjCheckNAdd(17); }
                   break;
                case 21:
                   if ((0x3ff000000000000L & l) != 0L)
-                     jjCheckNAdd(18);
+                     { jjCheckNAdd(18); }
                   break;
                case 22:
                   if (curChar == 49)
-                     jjCheckNAddTwoStates(20, 21);
+                     { jjCheckNAddTwoStates(20, 21); }
                   break;
                case 23:
                   if (curChar == 34)
-                     jjCheckNAddStates(5, 7);
+                     { jjCheckNAddStates(5, 7); }
                   break;
                case 24:
                   if ((0xfffffffbffffffffL & l) != 0L)
-                     jjCheckNAddTwoStates(24, 25);
+                     { jjCheckNAddTwoStates(24, 25); }
                   break;
                case 25:
                   if (curChar == 34)
@@ -118,11 +132,11 @@
                   break;
                case 27:
                   if ((0xfffffffbffffffffL & l) != 0L)
-                     jjCheckNAddStates(12, 14);
+                     { jjCheckNAddStates(12, 14); }
                   break;
                case 29:
                   if (curChar == 34)
-                     jjCheckNAddStates(12, 14);
+                     { jjCheckNAddStates(12, 14); }
                   break;
                case 30:
                   if (curChar == 34 && kind > 19)
@@ -133,11 +147,11 @@
                      break;
                   if (kind > 22)
                      kind = 22;
-                  jjCheckNAddStates(0, 4);
+                  { jjCheckNAddStates(0, 4); }
                   break;
                case 32:
                   if ((0x7bffe8faffffd9ffL & l) != 0L)
-                     jjCheckNAddTwoStates(32, 33);
+                     { jjCheckNAddTwoStates(32, 33); }
                   break;
                case 33:
                   if (curChar == 42 && kind > 20)
@@ -145,28 +159,28 @@
                   break;
                case 34:
                   if ((0x7bffe8faffffd9ffL & l) != 0L)
-                     jjCheckNAddTwoStates(34, 35);
+                     { jjCheckNAddTwoStates(34, 35); }
                   break;
                case 35:
                   if ((0x8000040000000000L & l) == 0L)
                      break;
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAddTwoStates(35, 36);
+                  { jjCheckNAddTwoStates(35, 36); }
                   break;
                case 36:
                   if ((0xfbffecfaffffd9ffL & l) == 0L)
                      break;
                   if (kind > 21)
                      kind = 21;
-                  jjCheckNAdd(36);
+                  { jjCheckNAdd(36); }
                   break;
                case 37:
                   if ((0x7bffe8faffffd9ffL & l) == 0L)
                      break;
                   if (kind > 22)
                      kind = 22;
-                  jjCheckNAdd(37);
+                  { jjCheckNAdd(37); }
                   break;
                default : break;
             }
@@ -184,7 +198,7 @@
                   {
                      if (kind > 22)
                         kind = 22;
-                     jjCheckNAddStates(0, 4);
+                     { jjCheckNAddStates(0, 4); }
                   }
                   if ((0x400000004000L & l) != 0L)
                   {
@@ -282,11 +296,11 @@
                      kind = 12;
                   break;
                case 24:
-                  jjAddStates(15, 16);
+                  { jjAddStates(15, 16); }
                   break;
                case 27:
                   if ((0xffffffffefffffffL & l) != 0L)
-                     jjCheckNAddStates(12, 14);
+                     { jjCheckNAddStates(12, 14); }
                   break;
                case 28:
                   if (curChar == 92)
@@ -294,22 +308,22 @@
                   break;
                case 29:
                   if (curChar == 92)
-                     jjCheckNAddStates(12, 14);
+                     { jjCheckNAddStates(12, 14); }
                   break;
                case 31:
                   if ((0xffffffffbfffffffL & l) == 0L)
                      break;
                   if (kind > 22)
                      kind = 22;
-                  jjCheckNAddStates(0, 4);
+                  { jjCheckNAddStates(0, 4); }
                   break;
                case 32:
                   if ((0xffffffffbfffffffL & l) != 0L)
-                     jjCheckNAddTwoStates(32, 33);
+                     { jjCheckNAddTwoStates(32, 33); }
                   break;
                case 34:
                   if ((0xffffffffbfffffffL & l) != 0L)
-                     jjCheckNAddTwoStates(34, 35);
+                     { jjCheckNAddTwoStates(34, 35); }
                   break;
                case 36:
                   if ((0xffffffffbfffffffL & l) == 0L)
@@ -323,7 +337,7 @@
                      break;
                   if (kind > 22)
                      kind = 22;
-                  jjCheckNAdd(37);
+                  { jjCheckNAdd(37); }
                   break;
                default : break;
             }
@@ -331,7 +345,7 @@
       }
       else
       {
-         int hiByte = curChar >> 8;
+         int hiByte = (curChar >> 8);
          int i1 = hiByte >> 6;
          long l1 = 1L << (hiByte & 077);
          int i2 = (curChar & 0xff) >> 6;
@@ -345,23 +359,23 @@
                      break;
                   if (kind > 22)
                      kind = 22;
-                  jjCheckNAddStates(0, 4);
+                  { jjCheckNAddStates(0, 4); }
                   break;
                case 24:
                   if (jjCanMove_0(hiByte, i1, i2, l1, l2))
-                     jjAddStates(15, 16);
+                     { jjAddStates(15, 16); }
                   break;
                case 27:
                   if (jjCanMove_0(hiByte, i1, i2, l1, l2))
-                     jjAddStates(12, 14);
+                     { jjAddStates(12, 14); }
                   break;
                case 32:
                   if (jjCanMove_0(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddTwoStates(32, 33);
+                     { jjCheckNAddTwoStates(32, 33); }
                   break;
                case 34:
                   if (jjCanMove_0(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddTwoStates(34, 35);
+                     { jjCheckNAddTwoStates(34, 35); }
                   break;
                case 36:
                   if (!jjCanMove_0(hiByte, i1, i2, l1, l2))
@@ -375,9 +389,9 @@
                      break;
                   if (kind > 22)
                      kind = 22;
-                  jjCheckNAdd(37);
+                  { jjCheckNAdd(37); }
                   break;
-               default : break;
+               default : if (i1 == 0 || l1 == 0 || i2 == 0 ||  l2 == 0) break; else break;
             }
          } while(i != startsAt);
       }
@@ -421,18 +435,18 @@
                      break;
                   if (kind > 23)
                      kind = 23;
-                  jjAddStates(17, 18);
+                  { jjAddStates(17, 18); }
                   break;
                case 1:
                   if (curChar == 46)
-                     jjCheckNAdd(2);
+                     { jjCheckNAdd(2); }
                   break;
                case 2:
                   if ((0x3ff000000000000L & l) == 0L)
                      break;
                   if (kind > 23)
                      kind = 23;
-                  jjCheckNAdd(2);
+                  { jjCheckNAdd(2); }
                   break;
                default : break;
             }
@@ -451,7 +465,7 @@
       }
       else
       {
-         int hiByte = curChar >> 8;
+         int hiByte = (curChar >> 8);
          int i1 = hiByte >> 6;
          long l1 = 1L << (hiByte & 077);
          int i2 = (curChar & 0xff) >> 6;
@@ -460,7 +474,7 @@
          {
             switch(jjstateSet[--i])
             {
-               default : break;
+               default : if (i1 == 0 || l1 == 0 || i2 == 0 ||  l2 == 0) break; else break;
             }
          } while(i != startsAt);
       }
@@ -477,6 +491,36 @@
       catch(java.io.IOException e) { return curPos; }
    }
 }
+
+/** Token literal values. */
+public static final String[] jjstrLiteralImages = {
+"", null, null, null, null, null, null, null, null, null, null, null, null, 
+"\50", "\51", "\54", "\72", "\136", null, null, null, null, null, null, };
+protected Token jjFillToken()
+{
+   final Token t;
+   final String curTokenImage;
+   final int beginLine;
+   final int endLine;
+   final int beginColumn;
+   final int endColumn;
+   String im = jjstrLiteralImages[jjmatchedKind];
+   curTokenImage = (im == null) ? input_stream.GetImage() : im;
+   beginLine = input_stream.getBeginLine();
+   beginColumn = input_stream.getBeginColumn();
+   endLine = input_stream.getEndLine();
+   endColumn = input_stream.getEndColumn();
+   t = Token.newToken(jjmatchedKind);
+   t.kind = jjmatchedKind;
+   t.image = curTokenImage;
+
+   t.beginLine = beginLine;
+   t.endLine = endLine;
+   t.beginColumn = beginColumn;
+   t.endColumn = endColumn;
+
+   return t;
+}
 static final int[] jjnextStates = {
    32, 33, 34, 35, 37, 24, 27, 28, 20, 17, 21, 18, 27, 28, 30, 24, 
    25, 0, 1, 
@@ -494,98 +538,6 @@
    }
 }
 
-/** Token literal values. */
-public static final String[] jjstrLiteralImages = {
-"", null, null, null, null, null, null, null, null, null, null, null, null, 
-"\50", "\51", "\54", "\72", "\136", null, null, null, null, null, null, };
-
-/** Lexer state names. */
-public static final String[] lexStateNames = {
-   "Boost",
-   "DEFAULT",
-};
-
-/** Lex State array. */
-public static final int[] jjnewLexState = {
-   -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, 1, 
-};
-static final long[] jjtoToken = {
-   0xffff01L, 
-};
-static final long[] jjtoSkip = {
-   0x80L, 
-};
-protected CharStream input_stream;
-private final int[] jjrounds = new int[38];
-private final int[] jjstateSet = new int[76];
-protected char curChar;
-/** Constructor. */
-public QueryParserTokenManager(CharStream stream){
-   input_stream = stream;
-}
-
-/** Constructor. */
-public QueryParserTokenManager(CharStream stream, int lexState){
-   this(stream);
-   SwitchTo(lexState);
-}
-
-/** Reinitialise parser. */
-public void ReInit(CharStream stream)
-{
-   jjmatchedPos = jjnewStateCnt = 0;
-   curLexState = defaultLexState;
-   input_stream = stream;
-   ReInitRounds();
-}
-private void ReInitRounds()
-{
-   int i;
-   jjround = 0x80000001;
-   for (i = 38; i-- > 0;)
-      jjrounds[i] = 0x80000000;
-}
-
-/** Reinitialise parser. */
-public void ReInit(CharStream stream, int lexState)
-{
-   ReInit(stream);
-   SwitchTo(lexState);
-}
-
-/** Switch to specified lex state. */
-public void SwitchTo(int lexState)
-{
-   if (lexState >= 2 || lexState < 0)
-      throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE);
-   else
-      curLexState = lexState;
-}
-
-protected Token jjFillToken()
-{
-   final Token t;
-   final String curTokenImage;
-   final int beginLine;
-   final int endLine;
-   final int beginColumn;
-   final int endColumn;
-   String im = jjstrLiteralImages[jjmatchedKind];
-   curTokenImage = (im == null) ? input_stream.GetImage() : im;
-   beginLine = input_stream.getBeginLine();
-   beginColumn = input_stream.getBeginColumn();
-   endLine = input_stream.getEndLine();
-   endColumn = input_stream.getEndColumn();
-   t = Token.newToken(jjmatchedKind, curTokenImage);
-
-   t.beginLine = beginLine;
-   t.endLine = endLine;
-   t.beginColumn = beginColumn;
-   t.endColumn = endColumn;
-
-   return t;
-}
-
 int curLexState = 1;
 int defaultLexState = 1;
 int jjnewStateCnt;
@@ -606,9 +558,10 @@
    {
       curChar = input_stream.BeginToken();
    }
-   catch(java.io.IOException e)
+   catch(Exception e)
    {
       jjmatchedKind = 0;
+      jjmatchedPos = -1;
       matchedToken = jjFillToken();
       return matchedToken;
    }
@@ -667,6 +620,31 @@
   }
 }
 
+void SkipLexicalActions(Token matchedToken)
+{
+   switch(jjmatchedKind)
+   {
+      default :
+         break;
+   }
+}
+void MoreLexicalActions()
+{
+   jjimageLen += (lengthOfMatch = jjmatchedPos + 1);
+   switch(jjmatchedKind)
+   {
+      default :
+         break;
+   }
+}
+void TokenLexicalActions(Token matchedToken)
+{
+   switch(jjmatchedKind)
+   {
+      default :
+         break;
+   }
+}
 private void jjCheckNAdd(int state)
 {
    if (jjrounds[state] != jjround)
@@ -694,4 +672,88 @@
    } while (start++ != end);
 }
 
+    /** Constructor. */
+    public QueryParserTokenManager(CharStream stream){
+
+
+    input_stream = stream;
+  }
+
+  /** Constructor. */
+  public QueryParserTokenManager (CharStream stream, int lexState){
+    ReInit(stream);
+    SwitchTo(lexState);
+  }
+
+  /** Reinitialise parser. */
+  
+  public void ReInit(CharStream stream)
+  {
+
+
+    jjmatchedPos =
+    jjnewStateCnt =
+    0;
+    curLexState = defaultLexState;
+    input_stream = stream;
+    ReInitRounds();
+  }
+
+  private void ReInitRounds()
+  {
+    int i;
+    jjround = 0x80000001;
+    for (i = 38; i-- > 0;)
+      jjrounds[i] = 0x80000000;
+  }
+
+  /** Reinitialise parser. */
+  public void ReInit(CharStream stream, int lexState)
+  
+  {
+    ReInit(stream);
+    SwitchTo(lexState);
+  }
+
+  /** Switch to specified lex state. */
+  public void SwitchTo(int lexState)
+  {
+    if (lexState >= 2 || lexState < 0)
+      throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE);
+    else
+      curLexState = lexState;
+  }
+
+
+/** Lexer state names. */
+public static final String[] lexStateNames = {
+   "Boost",
+   "DEFAULT",
+};
+
+/** Lex State array. */
+public static final int[] jjnewLexState = {
+   -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, 1, 
+};
+static final long[] jjtoToken = {
+   0xffff01L, 
+};
+static final long[] jjtoSkip = {
+   0x80L, 
+};
+static final long[] jjtoSpecial = {
+   0x0L, 
+};
+static final long[] jjtoMore = {
+   0x0L, 
+};
+    protected CharStream  input_stream;
+
+    private final int[] jjrounds = new int[38];
+    private final int[] jjstateSet = new int[2 * 38];
+    private final StringBuilder jjimage = new StringBuilder();
+    private StringBuilder image = jjimage;
+    private int jjimageLen;
+    private int lengthOfMatch;
+    protected int curChar;
 }
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/Token.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/Token.java
index d6736f8..f924938 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/Token.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/Token.java
@@ -1,5 +1,5 @@
-/* Generated By:JavaCC: Do not edit this line. Token.java Version 5.0 */
-/* JavaCCOptions:TOKEN_EXTENDS=,KEEP_LINE_COL=null,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */
+/* Generated By:JavaCC: Do not edit this line. Token.java Version 7.0 */
+/* JavaCCOptions:TOKEN_EXTENDS=,KEEP_LINE_COLUMN=true,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */
 package org.apache.lucene.queryparser.surround.parser;
 
 /**
@@ -97,6 +97,7 @@
   /**
    * Returns the image.
    */
+  @Override
   public String toString()
   {
     return image;
@@ -128,4 +129,4 @@
   }
 
 }
-/* JavaCC - OriginalChecksum=f2df701e24da1cf2d025118ce6efdd2f (do not edit this line) */
+/* (filtered)*/
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/TokenMgrError.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/TokenMgrError.java
index 8b8727d..80f8188 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/TokenMgrError.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/TokenMgrError.java
@@ -1,4 +1,4 @@
-/* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 5.0 */
+/* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 7.0 */
 /* JavaCCOptions: */
 package org.apache.lucene.queryparser.surround.parser;
 
@@ -20,22 +20,22 @@
   /**
    * Lexical error occurred.
    */
-  static final int LEXICAL_ERROR = 0;
+  public static final int LEXICAL_ERROR = 0;
 
   /**
    * An attempt was made to create a second instance of a static token manager.
    */
-  static final int STATIC_LEXER_ERROR = 1;
+  public static final int STATIC_LEXER_ERROR = 1;
 
   /**
    * Tried to change to an invalid lexical state.
    */
-  static final int INVALID_LEXICAL_STATE = 2;
+  public static final int INVALID_LEXICAL_STATE = 2;
 
   /**
    * Detected (and bailed out of) an infinite loop in the token manager.
    */
-  static final int LOOP_DETECTED = 3;
+  public static final int LOOP_DETECTED = 3;
 
   /**
    * Indicates the reason why the exception is thrown. It will have
@@ -53,8 +53,6 @@
     for (int i = 0; i < str.length(); i++) {
       switch (str.charAt(i))
       {
-        case 0 :
-          continue;
         case '\b':
           retval.append("\\b");
           continue;
@@ -104,11 +102,12 @@
    *    curchar     : the offending character
    * Note: You can customize the lexical error message by modifying this method.
    */
-  protected static String LexicalError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar) {
+  protected static String LexicalErr(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, int curChar) {
+    char curChar1 = (char)curChar;
     return("Lexical error at line " +
           errorLine + ", column " +
           errorColumn + ".  Encountered: " +
-          (EOFSeen ? "<EOF> " : ("\"" + addEscapes(String.valueOf(curChar)) + "\"") + " (" + (int)curChar + "), ") +
+          (EOFSeen ? "<EOF> " : ("\"" + addEscapes(String.valueOf(curChar1)) + "\"") + " (" + curChar + "), ") +
           "after : \"" + addEscapes(errorAfter) + "\"");
   }
 
@@ -121,6 +120,7 @@
    *
    * from this method for such cases in the release version of your parser.
    */
+  @Override
   public String getMessage() {
     return super.getMessage();
   }
@@ -140,8 +140,8 @@
   }
 
   /** Full Constructor. */
-  public TokenMgrError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar, int reason) {
-    this(LexicalError(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
+  public TokenMgrError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, int curChar, int reason) {
+    this(LexicalErr(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
   }
 }
-/* JavaCC - OriginalChecksum=8c69a370d9a9893140562c8bb911678c (do not edit this line) */
+/* (filtered)*/
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
index 717a013..1f9fd91 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
@@ -17,6 +17,7 @@
 package org.apache.lucene.queryparser.flexible.standard;
 
 import java.io.IOException;
+import java.io.StringReader;
 import java.text.DateFormat;
 import java.util.Calendar;
 import java.util.Date;
@@ -53,6 +54,9 @@
 import org.apache.lucene.queryparser.flexible.messages.MessageImpl;
 import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler;
 import org.apache.lucene.queryparser.flexible.standard.nodes.WildcardQueryNode;
+import org.apache.lucene.queryparser.flexible.standard.parser.FastCharStream;
+import org.apache.lucene.queryparser.flexible.standard.parser.ParseException;
+import org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
@@ -494,7 +498,12 @@
         "+(apple \"steve jobs\") -(foo bar baz)");
     assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null,
         "+(title:dog title:cat) -author:\"bob dole\"");
+  }
 
+  public void testParse() throws ParseException {
+    StandardSyntaxParser p = new StandardSyntaxParser(new FastCharStream(new StringReader("")));
+    p.ReInit(new FastCharStream(new StringReader("title:(dog OR cat)")));
+    System.out.println(p.TopLevelQuery("_fld_"));
   }
 
   public void testPunct() throws Exception {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/DeflateWithPresetCompressingCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/DeflateWithPresetCompressingCodec.java
index b65d355..cf20279 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/DeflateWithPresetCompressingCodec.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/DeflateWithPresetCompressingCodec.java
@@ -25,7 +25,7 @@
   public DeflateWithPresetCompressingCodec(int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockSize) {
     super("DeflateWithPresetCompressingStoredFieldsData", 
           withSegmentSuffix ? "DeflateWithPresetCompressingStoredFields" : "",
-          new DeflateWithPresetDictCompressionMode(chunkSize/10, chunkSize/3+1), chunkSize, maxDocsPerChunk, blockSize);
+          new DeflateWithPresetDictCompressionMode(), chunkSize, maxDocsPerChunk, blockSize);
   }
 
   /** No-arg constructor. */
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/LZ4WithPresetCompressingCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/LZ4WithPresetCompressingCodec.java
index ea413fc..690d26c 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/LZ4WithPresetCompressingCodec.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/LZ4WithPresetCompressingCodec.java
@@ -25,7 +25,7 @@
   public LZ4WithPresetCompressingCodec(int chunkSize, int maxDocsPerChunk, boolean withSegmentSuffix, int blockSize) {
     super("LZ4WithPresetCompressingStoredFieldsData", 
           withSegmentSuffix ? "DeflateWithPresetCompressingStoredFields" : "",
-          new LZ4WithPresetDictCompressionMode(chunkSize/10, chunkSize/3+1), chunkSize, maxDocsPerChunk, blockSize);
+          new LZ4WithPresetDictCompressionMode(), chunkSize, maxDocsPerChunk, blockSize);
   }
 
   /** No-arg constructor. */
diff --git a/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java b/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
index 1775e6d..07804c3 100644
--- a/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
+++ b/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
@@ -297,7 +297,8 @@
       for (LeafReaderContext leaf : ir2.leaves()) {
         CodecReader sr = (CodecReader) leaf.reader();
         CompressingStoredFieldsReader reader = (CompressingStoredFieldsReader)sr.getFieldsReader();
-        assertEquals(1, reader.getNumChunks());
+        assertTrue(reader.getNumDirtyDocs() > 0);
+        assertTrue(reader.getNumDirtyDocs() < 100); // can't be gte the number of docs per chunk
         assertEquals(1, reader.getNumDirtyChunks());
       }
     }
diff --git a/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingTermVectorsFormat.java b/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingTermVectorsFormat.java
index e3477e9..e21ab6e 100644
--- a/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingTermVectorsFormat.java
+++ b/lucene/test-framework/src/test/org/apache/lucene/codecs/compressing/TestCompressingTermVectorsFormat.java
@@ -102,7 +102,7 @@
       for (LeafReaderContext leaf : ir2.leaves()) {
         CodecReader sr = (CodecReader) leaf.reader();
         CompressingTermVectorsReader reader = (CompressingTermVectorsReader)sr.getTermVectorsReader();
-        assertEquals(1, reader.getNumChunks());
+        assertTrue(reader.getNumDirtyDocs() > 0);
         assertEquals(1, reader.getNumDirtyChunks());
       }
     }
diff --git a/settings.gradle b/settings.gradle
index aeafd2b..520b7bf 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -62,7 +62,6 @@
 include "solr:contrib:langid"
 include "solr:contrib:jaegertracer-configurator"
 include "solr:contrib:prometheus-exporter"
-include "solr:contrib:velocity"
 include "solr:contrib:ltr"
 include "solr:webapp"
 include "solr:test-framework"
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 9c90e32..afdc6cf 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -46,6 +46,8 @@
 
 * SOLR-14615: CPU Utilization Based Circuit Breaker (Atri Sharma)
 
+* SOLR-14799: JWT authentication plugin only requires "sub" claim when principalClaim=sub. (Erik Hatcher)
+
 Other Changes
 ----------------------
 * SOLR-14656: Autoscaling framework removed (Ishan Chattopadhyaya, noble, Ilan Ginzburg)
diff --git a/solr/contrib/velocity/build.gradle b/solr/contrib/velocity/build.gradle
deleted file mode 100644
index b17b38b..0000000
--- a/solr/contrib/velocity/build.gradle
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-apply plugin: 'java-library'
-
-description = 'Solr Velocity Response Writer'
-
-dependencies {
-  implementation project(':solr:core')
-
-  implementation('org.apache.velocity.tools:velocity-tools-view-jsp', {
-    exclude group: "commons-beanutils", module: "commons-beanutils"
-    exclude group: "org.apache.commons", module: "commons-digester3"
-    exclude group: "com.github.cliftonlabs", module: "json-simple"
-  })
-
-  testImplementation project(':solr:test-framework')
-}
diff --git a/solr/contrib/velocity/src/java/org/apache/solr/response/PageTool.java b/solr/contrib/velocity/src/java/org/apache/solr/response/PageTool.java
deleted file mode 100644
index 193c89b..0000000
--- a/solr/contrib/velocity/src/java/org/apache/solr/response/PageTool.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.response;
-
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.search.DocList;
-import org.apache.solr.search.DocSlice;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-
-/**
- * This class is used by the Velocity response writer to provide a consistent paging tool for use by templates.
- *
- * TODO: add more details
- */
-public class PageTool {
-  private long start;
-  private int results_per_page = 10;
-  private long results_found;
-  private int page_count;
-  private int current_page_number;
-
-  public PageTool(SolrQueryRequest request, SolrQueryResponse response) {
-    String rows = request.getParams().get("rows");
-
-    if (rows != null) {
-      results_per_page = Integer.parseInt(rows);
-    }
-    //TODO: Handle group by results
-    Object docs = response.getResponse();
-    if (docs != null) {
-      if (docs instanceof DocSlice) {
-        results_found = ((DocSlice) docs).matches();
-        start = ((DocSlice) docs).offset();
-      } else if(docs instanceof ResultContext) {
-        DocList dl = ((ResultContext) docs).getDocList();
-        results_found = dl.matches();
-        start = dl.offset();
-      } else if(docs instanceof SolrDocumentList) {
-        SolrDocumentList doc_list = (SolrDocumentList) docs;
-        results_found = doc_list.getNumFound();
-        start = doc_list.getStart();
-      } else {
-        throw new SolrException(SolrException.ErrorCode.UNKNOWN, "Unknown response type "+docs+". Expected one of DocSlice, ResultContext or SolrDocumentList");
-      }
-    }
-
-    page_count = (int) Math.ceil(results_found / (double) results_per_page);
-    current_page_number = (int) Math.ceil(start / (double) results_per_page) + (page_count > 0 ? 1 : 0);
-  }
-
-  public long getStart() {
-    return start;
-  }
-
-  public int getResults_per_page() {
-    return results_per_page;
-  }
-
-  public long getResults_found() {
-    return results_found;
-  }
-
-  public int getPage_count() {
-    return page_count;
-  }
-
-  public int getCurrent_page_number() {
-    return current_page_number;
-  }
-
-  @Override
-  public String toString() {
-    return "Found " + results_found +
-           " Page " + current_page_number + " of " + page_count +
-           " Starting at " + start + " per page " + results_per_page;
-  }
-}
diff --git a/solr/contrib/velocity/src/java/org/apache/solr/response/SolrVelocityResourceLoader.java b/solr/contrib/velocity/src/java/org/apache/solr/response/SolrVelocityResourceLoader.java
deleted file mode 100644
index bf778e1..0000000
--- a/solr/contrib/velocity/src/java/org/apache/solr/response/SolrVelocityResourceLoader.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.response;
-
-import java.io.IOException;
-import java.io.Reader;
-
-import org.apache.solr.core.SolrResourceLoader;
-import org.apache.velocity.exception.ResourceNotFoundException;
-import org.apache.velocity.runtime.resource.Resource;
-import org.apache.velocity.runtime.resource.loader.ResourceLoader;
-import org.apache.velocity.util.ExtProperties;
-
-/**
-  * Velocity resource loader wrapper around Solr resource loader
-  */
-public class SolrVelocityResourceLoader extends ResourceLoader {
-  private SolrResourceLoader loader;
-
-  public SolrVelocityResourceLoader(SolrResourceLoader loader) {
-    super();
-    this.loader = loader;
-  }
-
-  @Override
-  public void init(ExtProperties extendedProperties) {
-  }
-
-  @Override
-  public Reader getResourceReader(String source, String encoding) throws ResourceNotFoundException {
-    try {
-      return buildReader(loader.openResource("velocity/" + source), encoding);
-    } catch (IOException ioe) {
-      throw new ResourceNotFoundException(ioe);
-    }
-  }
-
-  @Override
-  public boolean isSourceModified(Resource resource) {
-    return false;
-  }
-
-  @Override
-  public long getLastModified(Resource resource) {
-    return 0;
-  }
-}
diff --git a/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java b/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java
deleted file mode 100644
index 58d8b2b..0000000
--- a/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java
+++ /dev/null
@@ -1,468 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.response;
-
-import java.io.File;
-import java.io.FilePermission;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.StringWriter;
-import java.io.Writer;
-import java.lang.invoke.MethodHandles;
-import java.nio.charset.StandardCharsets;
-import java.security.AccessControlContext;
-import java.security.AccessController;
-import java.security.Permissions;
-import java.security.PrivilegedActionException;
-import java.security.PrivilegedExceptionAction;
-import java.security.ProtectionDomain;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Properties;
-import java.util.PropertyPermission;
-import java.util.ResourceBundle;
-
-import org.apache.solr.client.solrj.SolrResponse;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.client.solrj.response.SolrResponseBase;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.apache.velocity.Template;
-import org.apache.velocity.VelocityContext;
-import org.apache.velocity.app.VelocityEngine;
-import org.apache.velocity.runtime.RuntimeConstants;
-import org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader;
-import org.apache.velocity.tools.generic.CollectionTool;
-import org.apache.velocity.tools.generic.ComparisonDateTool;
-import org.apache.velocity.tools.generic.DisplayTool;
-import org.apache.velocity.tools.generic.EscapeTool;
-import org.apache.velocity.tools.generic.LocaleConfig;
-import org.apache.velocity.tools.generic.MathTool;
-import org.apache.velocity.tools.generic.NumberTool;
-import org.apache.velocity.tools.generic.ResourceTool;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.solr.common.params.CommonParams.SORT;
-
-public class VelocityResponseWriter implements QueryResponseWriter, SolrCoreAware {
-  // init param names, these are _only_ loaded at init time (no per-request control of these)
-  //   - multiple different named writers could be created with different init params
-  public static final String TEMPLATE_BASE_DIR = "template.base.dir";
-  public static final String PROPERTIES_FILE = "init.properties.file";
-
-  // System property names, these are _only_ loaded at node startup (no per-request control of these)
-  public static final String SOLR_RESOURCE_LOADER_ENABLED = "velocity.resourceloader.solr.enabled";
-
-  // request param names
-  public static final String TEMPLATE = "v.template";
-  public static final String LAYOUT = "v.layout";
-  public static final String LAYOUT_ENABLED = "v.layout.enabled";
-  public static final String CONTENT_TYPE = "v.contentType";
-  public static final String JSON = "v.json";
-  public static final String LOCALE = "v.locale";
-
-  public static final String TEMPLATE_EXTENSION = ".vm";
-  public static final String DEFAULT_CONTENT_TYPE = "text/html;charset=UTF-8";
-  public static final String JSON_CONTENT_TYPE = "application/json;charset=UTF-8";
-
-  private File fileResourceLoaderBaseDir;
-  private String initPropertiesFileName;  // used just to hold from init() to inform()
-
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private Properties velocityInitProps = new Properties();
-  private Map<String,String> customTools = new HashMap<String,String>();
-
-  @Override
-  public void init(@SuppressWarnings({"rawtypes"})NamedList args) {
-    log.warn("VelocityResponseWriter is deprecated. This may be removed in future Solr releases. Please SOLR-14065.");
-    fileResourceLoaderBaseDir = null;
-    String templateBaseDir = (String) args.get(TEMPLATE_BASE_DIR);
-
-    if (templateBaseDir != null && !templateBaseDir.isEmpty()) {
-      fileResourceLoaderBaseDir = new File(templateBaseDir).getAbsoluteFile();
-      if (!fileResourceLoaderBaseDir.exists()) { // "*not* exists" condition!
-        log.warn("{} specified does not exist: {}", TEMPLATE_BASE_DIR, fileResourceLoaderBaseDir);
-        fileResourceLoaderBaseDir = null;
-      } else {
-        if (!fileResourceLoaderBaseDir.isDirectory()) { // "*not* a directory" condition
-          log.warn("{} specified is not a directory: {}", TEMPLATE_BASE_DIR, fileResourceLoaderBaseDir);
-          fileResourceLoaderBaseDir = null;
-        }
-      }
-    }
-
-    initPropertiesFileName = (String) args.get(PROPERTIES_FILE);
-
-    @SuppressWarnings({"rawtypes"})
-    NamedList tools = (NamedList)args.get("tools");
-    if (tools != null) {
-      for(Object t : tools) {
-        @SuppressWarnings({"rawtypes"})
-        Map.Entry tool = (Map.Entry)t;
-        customTools.put(tool.getKey().toString(), tool.getValue().toString());
-      }
-    }
-  }
-
-  @Override
-  public void inform(SolrCore core) {
-    // need to leverage SolrResourceLoader, so load init.properties.file here instead of init()
-    if (initPropertiesFileName != null) {
-      try {
-        velocityInitProps.load(new InputStreamReader(core.getResourceLoader().openResource(initPropertiesFileName), StandardCharsets.UTF_8));
-      } catch (IOException e) {
-        log.warn("Error loading {} specified property file: {}", PROPERTIES_FILE, initPropertiesFileName, e);
-      }
-    }
-    }
-
-  @Override
-  public String getContentType(SolrQueryRequest request, SolrQueryResponse response) {
-    String contentType = request.getParams().get(CONTENT_TYPE);
-
-    // Use the v.contentType specified, or either of the default content types depending on the presence of v.json
-    return (contentType != null) ? contentType : ((request.getParams().get(JSON) == null) ? DEFAULT_CONTENT_TYPE : JSON_CONTENT_TYPE);
-  }
-
-  @Override
-  public void write(Writer writer, SolrQueryRequest request, SolrQueryResponse response) throws IOException {
-    // run doWrite() with the velocity sandbox
-    try {
-      AccessController.doPrivileged(new PrivilegedExceptionAction<Void>() {
-        @Override
-        public Void run() throws IOException {
-          doWrite(writer, request, response);
-          return null;
-        }
-      }, VELOCITY_SANDBOX);
-    } catch (PrivilegedActionException e) {
-      throw (IOException) e.getException();
-    }
-  }
-
-  // sandbox for velocity code
-  // TODO: we could read in a policy file instead, in case someone needs to tweak it?
-  private static final AccessControlContext VELOCITY_SANDBOX;
-  static {
-    Permissions permissions = new Permissions();
-    // TODO: restrict the scope of this! we probably only need access to classpath
-    permissions.add(new FilePermission("<<ALL FILES>>", "read,readlink"));
-    // properties needed by SolrResourceLoader (called from velocity code)
-    permissions.add(new PropertyPermission("jetty.testMode", "read"));
-    permissions.add(new PropertyPermission("solr.allow.unsafe.resourceloading", "read"));
-    // properties needed by log4j (called from velocity code)
-    permissions.add(new PropertyPermission("java.version", "read"));
-    // needed by velocity duck-typing
-    permissions.add(new RuntimePermission("accessDeclaredMembers"));
-    permissions.setReadOnly();
-    VELOCITY_SANDBOX = new AccessControlContext(new ProtectionDomain[] { new ProtectionDomain(null, permissions) });
-  }
-
-  private void doWrite(Writer writer, SolrQueryRequest request, SolrQueryResponse response) throws IOException {
-    VelocityEngine engine = createEngine(request);  // TODO: have HTTP headers available for configuring engine
-
-    Template template = getTemplate(engine, request);
-
-    VelocityContext context = createContext(request, response);
-    context.put("engine", engine);  // for $engine.resourceExists(...)
-
-    String layoutTemplate = request.getParams().get(LAYOUT);
-    boolean layoutEnabled = request.getParams().getBool(LAYOUT_ENABLED, true) && layoutTemplate != null;
-
-    String jsonWrapper = request.getParams().get(JSON);
-    boolean wrapResponse = layoutEnabled || jsonWrapper != null;
-
-    // create output
-    if (!wrapResponse) {
-      // straight-forward template/context merge to output
-      template.merge(context, writer);
-    }
-    else {
-      // merge to a string buffer, then wrap with layout and finally as JSON
-      StringWriter stringWriter = new StringWriter();
-      template.merge(context, stringWriter);
-
-      if (layoutEnabled) {
-        context.put("content", stringWriter.toString());
-        stringWriter = new StringWriter();
-        try {
-          engine.getTemplate(layoutTemplate + TEMPLATE_EXTENSION).merge(context, stringWriter);
-        } catch (Exception e) {
-          throw new IOException(e.getMessage());
-        }
-      }
-
-      if (jsonWrapper != null) {
-        for (int i=0; i<jsonWrapper.length(); i++) {
-          if (!Character.isJavaIdentifierPart(jsonWrapper.charAt(i))) {
-            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid function name for " + JSON + ": '" + jsonWrapper + "'");
-          }
-        }
-        writer.write(jsonWrapper + "(");
-        writer.write(getJSONWrap(stringWriter.toString()));
-        writer.write(')');
-      } else {  // using a layout, but not JSON wrapping
-        writer.write(stringWriter.toString());
-      }
-    }
-  }
-
-  @SuppressWarnings({"unchecked"})
-  private VelocityContext createContext(SolrQueryRequest request, SolrQueryResponse response) {
-    VelocityContext context = new VelocityContext();
-
-    // Register useful Velocity "tools"
-    String locale = request.getParams().get(LOCALE);
-    @SuppressWarnings({"rawtypes"})
-    Map toolConfig = new HashMap();
-    toolConfig.put("locale", locale);
-
-
-    context.put("log", log);   // TODO: add test; TODO: should this be overridable with a custom "log" named tool?
-    context.put("esc", new EscapeTool());
-    context.put("date", new ComparisonDateTool());
-    context.put(SORT, new CollectionTool());
-
-    MathTool mathTool = new MathTool();
-    mathTool.configure(toolConfig);
-    context.put("math", mathTool);
-
-    NumberTool numberTool = new NumberTool();
-    numberTool.configure(toolConfig);
-    context.put("number", numberTool);
-
-
-    DisplayTool displayTool = new DisplayTool();
-    displayTool.configure(toolConfig);
-    context.put("display", displayTool);
-
-    ResourceTool resourceTool = new SolrVelocityResourceTool(request.getCore().getSolrConfig().getResourceLoader().getClassLoader());
-    resourceTool.configure(toolConfig);
-    context.put("resource", resourceTool);
-
-    if (request.getCore().getCoreDescriptor().isConfigSetTrusted()) {
-      // Load custom tools, only if in a trusted configset
-
-      /*
-          // Custom tools, specified in config as:
-              <queryResponseWriter name="velocityWithCustomTools" class="solr.VelocityResponseWriter">
-                <lst name="tools">
-                  <str name="mytool">com.example.solr.velocity.MyTool</str>
-                </lst>
-              </queryResponseWriter>
-      */
-      // Custom tools can override any of the built-in tools provided above, by registering one with the same name
-      if (request.getCore().getCoreDescriptor().isConfigSetTrusted()) {
-        for (Map.Entry<String, String> entry : customTools.entrySet()) {
-          String name = entry.getKey();
-          // TODO: at least log a warning when one of the *fixed* tools classes is same name with a custom one, currently silently ignored
-          Object customTool = SolrCore.createInstance(entry.getValue(), Object.class, "VrW custom tool: " + name, request.getCore(), request.getCore().getResourceLoader());
-          if (customTool instanceof LocaleConfig) {
-            ((LocaleConfig) customTool).configure(toolConfig);
-          }
-          context.put(name, customTool);
-        }
-      }
-
-      // custom tools _cannot_ override context objects added below, like $request and $response
-    }
-
-
-    // Turn the SolrQueryResponse into a SolrResponse.
-    // QueryResponse has lots of conveniences suitable for a view
-    // Problem is, which SolrResponse class to use?
-    // One patch to SOLR-620 solved this by passing in a class name as
-    // as a parameter and using reflection and Solr's class loader to
-    // create a new instance.  But for now the implementation simply
-    // uses QueryResponse, and if it chokes in a known way, fall back
-    // to bare bones SolrResponseBase.
-    // Can this writer know what the handler class is?  With echoHandler=true it can get its string name at least
-    SolrResponse rsp = new QueryResponse();
-    NamedList<Object> parsedResponse = BinaryResponseWriter.getParsedResponse(request, response);
-    try {
-      rsp.setResponse(parsedResponse);
-
-      // page only injected if QueryResponse works
-      context.put("page", new PageTool(request, response));  // page tool only makes sense for a SearchHandler request
-      context.put("debug",((QueryResponse)rsp).getDebugMap());
-    } catch (ClassCastException e) {
-      // known edge case where QueryResponse's extraction assumes "response" is a SolrDocumentList
-      // (AnalysisRequestHandler emits a "response")
-      rsp = new SolrResponseBase();
-      rsp.setResponse(parsedResponse);
-    }
-
-    context.put("request", request);
-    context.put("response", rsp);
-
-    return context;
-  }
-
-  private VelocityEngine createEngine(SolrQueryRequest request) {
-
-    boolean trustedMode = request.getCore().getCoreDescriptor().isConfigSetTrusted();
-
-
-    VelocityEngine engine = new VelocityEngine();
-
-    // load the built-in _macros.vm first, then load VM_global_library.vm for legacy (pre-5.0) support,
-    // and finally allow macros.vm to have the final say and override anything defined in the preceding files.
-    engine.setProperty(RuntimeConstants.VM_LIBRARY, "_macros.vm,VM_global_library.vm,macros.vm");
-
-    // Standard templates autoload, but not the macro one(s), by default, so let's just make life
-    // easier, and consistent, for macro development too.
-    engine.setProperty(RuntimeConstants.VM_LIBRARY_AUTORELOAD, "true");
-
-    /*
-      Set up Velocity resource loader(s)
-       terminology note: "resource loader" is overloaded here, there is Solr's resource loader facility for plugins,
-       and there are Velocity template resource loaders.  It's confusing, they overlap: there is a Velocity resource
-       loader that loads templates from Solr's resource loader (SolrVelocityResourceLoader).
-
-      The Velocity resource loader order is `[file,][solr],builtin` intentionally ordered in this manner.
-      The "file" resource loader, enabled when the configset is trusted and `template.base.dir` is specified as a
-      response writer init property.
-
-      The "solr" resource loader, enabled when the configset is trusted, and provides templates from a velocity/
-      sub-tree in either the classpath or under conf/.
-
-      By default, only "builtin" resource loader is enabled, providing tenplates from builtin Solr .jar files.
-
-      The basic browse templates are built into
-      this plugin, but can be individually overridden by placing a same-named template in the template.base.dir specified
-      directory, or within a trusted configset's velocity/ directory.
-     */
-    ArrayList<String> loaders = new ArrayList<String>();
-    if ((fileResourceLoaderBaseDir != null) && trustedMode) {
-      loaders.add("file");
-      engine.setProperty(RuntimeConstants.FILE_RESOURCE_LOADER_PATH, fileResourceLoaderBaseDir.getAbsolutePath());
-    }
-    if (trustedMode) {
-      // The solr resource loader serves templates under a velocity/ subtree from <lib>, conf/,
-      // or SolrCloud's configuration tree.  Or rather the other way around, other resource loaders are rooted
-      // from the top, whereas this is velocity/ sub-tree rooted.
-      loaders.add("solr");
-      engine.setProperty("solr.resource.loader.instance", new SolrVelocityResourceLoader(request.getCore().getSolrConfig().getResourceLoader()));
-    }
-
-    // Always have the built-in classpath loader.  This is needed when using VM_LIBRARY macros, as they are required
-    // to be present if specified, and we want to have a nice macros facility built-in for users to use easily, and to
-    // extend in custom ways.
-    loaders.add("builtin");
-    engine.setProperty("builtin.resource.loader.instance", new ClasspathResourceLoader());
-
-    engine.setProperty(RuntimeConstants.RESOURCE_LOADER, String.join(",", loaders));
-
-
-    engine.setProperty(RuntimeConstants.INPUT_ENCODING, "UTF-8");
-    engine.setProperty(RuntimeConstants.SPACE_GOBBLING, RuntimeConstants.SpaceGobbling.LINES.toString());
-
-    // install a class/package restricting uberspector
-    engine.setProperty(RuntimeConstants.UBERSPECT_CLASSNAME,"org.apache.velocity.util.introspection.SecureUberspector");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_PACKAGES,"java.lang.reflect");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"java.lang.Class");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"java.lang.ClassLoader");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"java.lang.Compiler");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"java.lang.InheritableThreadLocal");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"java.lang.Package");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"java.lang.Process");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"java.lang.Runtime");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"java.lang.RuntimePermission");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"java.lang.SecurityManager");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"java.lang.System");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"java.lang.Thread");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"java.lang.ThreadGroup");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"java.lang.ThreadLocal");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"org.apache.solr.core.SolrResourceLoader");
-    engine.addProperty(RuntimeConstants.INTROSPECTOR_RESTRICT_CLASSES,"org.apache.solr.core.CoreContainer");
-
-    if (trustedMode) {
-      // Work around VELOCITY-908 with Velocity not handling locales properly
-      Object spaceGobblingInitProperty = velocityInitProps.get(RuntimeConstants.SPACE_GOBBLING);
-      if (spaceGobblingInitProperty != null) {
-        // If there is an init property, uppercase it before Velocity.
-        velocityInitProps.put(RuntimeConstants.SPACE_GOBBLING,
-            String.valueOf(spaceGobblingInitProperty).toUpperCase(Locale.ROOT));
-      }
-      // bring in any custom properties too
-      engine.setProperties(velocityInitProps);
-    }
-
-    engine.init();
-
-    return engine;
-  }
-
-  private Template getTemplate(VelocityEngine engine, SolrQueryRequest request) throws IOException {
-    Template template;
-
-    String templateName = request.getParams().get(TEMPLATE);
-
-    String qt = request.getParams().get(CommonParams.QT);
-    String path = (String) request.getContext().get("path");
-    if (templateName == null && path != null) {
-      templateName = path;
-    }  // TODO: path is never null, so qt won't get picked up  maybe special case for '/select' to use qt, otherwise use path?
-    if (templateName == null && qt != null) {
-      templateName = qt;
-    }
-    if (templateName == null) templateName = "index";
-    try {
-      template = engine.getTemplate(templateName + TEMPLATE_EXTENSION);
-    } catch (Exception e) {
-      throw new IOException(e.getMessage());
-    }
-
-    return template;
-  }
-
-  private String getJSONWrap(String xmlResult) {  // maybe noggit or Solr's JSON utilities can make this cleaner?
-    // escape the double quotes and backslashes
-    String replace1 = xmlResult.replaceAll("\\\\", "\\\\\\\\");
-    replace1 = replace1.replaceAll("\\n", "\\\\n");
-    replace1 = replace1.replaceAll("\\r", "\\\\r");
-    String replaced = replace1.replaceAll("\"", "\\\\\"");
-    // wrap it in a JSON object
-    return "{\"result\":\"" + replaced + "\"}";
-  }
-
-  // see: https://github.com/apache/velocity-tools/blob/trunk/velocity-tools-generic/src/main/java/org/apache/velocity/tools/generic/ResourceTool.java
-  private static class SolrVelocityResourceTool extends ResourceTool {
-
-    private ClassLoader solrClassLoader;
-
-    public SolrVelocityResourceTool(ClassLoader cl) {
-      this.solrClassLoader = cl;
-    }
-
-    @Override
-    protected ResourceBundle getBundle(String baseName, Object loc) {
-      // resource bundles for this tool must be in velocity "package"
-      return ResourceBundle.getBundle(
-          "velocity." + baseName,
-          (loc == null) ? this.getLocale() : this.toLocale(loc),
-          solrClassLoader);
-    }
-  }
-}
diff --git a/solr/contrib/velocity/src/java/org/apache/solr/response/package.html b/solr/contrib/velocity/src/java/org/apache/solr/response/package.html
deleted file mode 100644
index b36606c..0000000
--- a/solr/contrib/velocity/src/java/org/apache/solr/response/package.html
+++ /dev/null
@@ -1,23 +0,0 @@
-<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<!-- not a package-info.java, because we already defined this package in core/ -->
-<html>
-<body>
-{@link org.apache.solr.response.VelocityResponseWriter} and related code.
-</body>
-</html>
diff --git a/solr/contrib/velocity/src/java/overview.html b/solr/contrib/velocity/src/java/overview.html
deleted file mode 100644
index 2168639..0000000
--- a/solr/contrib/velocity/src/java/overview.html
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<html>
-<body>
-Apache Solr Search Server: Velocity Response Writer contrib
-</body>
-</html>
diff --git a/solr/contrib/velocity/src/resources/VM_global_library.vm b/solr/contrib/velocity/src/resources/VM_global_library.vm
deleted file mode 100644
index fd3699b..0000000
--- a/solr/contrib/velocity/src/resources/VM_global_library.vm
+++ /dev/null
@@ -1,4 +0,0 @@
-## legacy support, the writer will load this as a macro library as it used to
-## but best not to have your own file called VM_global_library.vm; put them in macros.vm instead.
-## This file is needed for the "builtin" resource loader as Velocity requires all macro library files exist, but
-## we don't want users to have to have a macro library file in their template directories.
diff --git a/solr/contrib/velocity/src/resources/_macros.vm b/solr/contrib/velocity/src/resources/_macros.vm
deleted file mode 100644
index 406c4a9..0000000
--- a/solr/contrib/velocity/src/resources/_macros.vm
+++ /dev/null
@@ -1,70 +0,0 @@
-#macro(param $key)$request.params.get($key)#end
-
-#macro(url_root)/solr#end
-
-#macro(core_name)$request.core.name#end
-#macro(url_for_solr)#{url_root}#if($request.core.name != "")/$request.core.name#end#end
-#macro(url_for_home)#url_for_solr$request.context.path#end
-
-#macro(q)&q=$!{esc.url($request.params.get('q'))}#end
-
-#macro(fqs $p)#foreach($fq in $p)#if($velocityCount>1)&#{end}fq=$esc.url($fq)#end#end
-
-#macro(debug)#if($debug)&debug=true#end#end
-
-#macro(sort $p)#if($p)#foreach($s in $p)&sort=$esc.url($s)#end#end#end
-
-#macro(lensNoQ)?#if($request.params.getParams('fq') and $request.params.getParams('fq').size() > 0)&#fqs($request.params.getParams('fq'))#end#sort($request.params.getParams('sort'))#debug#end
-#macro(lens)#lensNoQ#q#end
-
-#macro(url_for_lens)#{url_for_home}#lens#end
-
-#macro(url_for_start $start)#url_for_home#lens&start=$start#end
-
-#macro(url_for_filters $p)#url_for_home?#q#if($p.size() > 0)&#fqs($p)#end#debug#end
-
-#macro(url_for_nested_facet_query $field)#url_for_home#lens&fq=$esc.url($field)#end
-
-#macro(url_for_facet_filter $field $value)#url_for_home#lens&fq=#if($value!=$null)$esc.url($field):%22$esc.url($value)%22#else-$esc.url($field):[*+TO+*]#end#end
-
-#macro(url_for_facet_date_filter $field $value)#url_for_home#lens&fq=$esc.url($field):$esc.url($value)#end
-
-#macro(url_for_facet_range_filter $field $value)#url_for_home#lens&fq=$esc.url($field):$esc.url($value)#end
-
-# TODO: make this parameterized fully, no context sensitivity
-#macro(field $f)
-  #if($response.response.highlighting.get($docId).get($f).get(0))
-    #set($pad = "")
-      #foreach($v in $response.response.highlighting.get($docId).get($f))
-        $pad$v##  #TODO: $esc.html() or maybe make that optional?
-        #set($pad = " ... ")
-      #end
-  #else
-    $esc.html($display.list($doc.getFieldValues($f), ", "))
-  #end
-#end
-
-#macro(link_to_previous_page)
-  #if($page.current_page_number > 1)
-    #set($prev_start = $page.start - $page.results_per_page)
-    <a class="prev-page" href="#url_for_start($prev_start)">$resource.previous</a>
-  #end
-#end
-
-#macro(link_to_next_page)
-  #if($page.current_page_number < $page.page_count)
-    #set($next_start = $page.start + $page.results_per_page)
-    <a class="next-page" href="#url_for_start($next_start)">$resource.next</a>
-  #end
-#end
-
-#macro(link_to_page $page_number $text)
-  #if($page_number == $page.current_page_number)
-    $text
-  #else
-    #if($page_number <= $page.page_count)
-      #set($page_start = $page_number * $page.results_per_page - $page.results_per_page)
-      <a class="page" href="#url_for_start($page_start)">$text</a>
-    #end
-  #end
-#end
diff --git a/solr/contrib/velocity/src/resources/macros.vm b/solr/contrib/velocity/src/resources/macros.vm
deleted file mode 100644
index 0018c5f..0000000
--- a/solr/contrib/velocity/src/resources/macros.vm
+++ /dev/null
@@ -1,3 +0,0 @@
-## placeholder for users to specify their own macros
-## This file is needed for the "builtin" resource loader as Velocity requires all macro library files exist.
-## This is the file we want users to override to add their own macros.
diff --git a/solr/contrib/velocity/src/resources/velocity/_default.vm b/solr/contrib/velocity/src/resources/velocity/_default.vm
deleted file mode 100644
index 71cac96..0000000
--- a/solr/contrib/velocity/src/resources/velocity/_default.vm
+++ /dev/null
@@ -1,14 +0,0 @@
-<h1>Request</h1>
-<ul>
-  <li>context.path = $request.context.path</li>
-  <li>params.qt = $!request.params.qt</li>
-</ul>
-
-<pre>
-  $esc.html($request)
-</pre>
-
-<h1>Response</h1>
-<pre>
-  $esc.html($response)
-</pre>
diff --git a/solr/contrib/velocity/src/resources/velocity/browse.vm b/solr/contrib/velocity/src/resources/velocity/browse.vm
deleted file mode 100644
index b651295..0000000
--- a/solr/contrib/velocity/src/resources/velocity/browse.vm
+++ /dev/null
@@ -1,73 +0,0 @@
-#*
-  - Make search box bigger
-  - Add in pivot and other facets?
-  - Work on template default selection logic
-*#
-
-## Show Error Message, if any
-<div class="error">
-  #parse("error.vm")
-</div>
-
-<div class="query-box">
-  <form id="query-form" action="#{url_for_home}" method="GET">
-    $resource.find:
-    <input type="text" id="q" name="q" value="$!esc.html($request.params.get('q'))"/>
-    <input type="submit"/>
-
-    #if($debug) ## TODO: this would automatically happen when arbitrary parameters are kept on URLs
-      <input type="hidden" name="debug" value="true"/>
-    #end
-    #foreach($fq in $request.params.getParams('fq'))
-      <input type="hidden" name="fq" id="allFQs" value="$esc.html($fq)"/>
-    #end
-
-    <div class="constraints">
-      #foreach($fq in $request.params.getParams('fq'))
-        #set($previous_fq_count=$velocityCount - 1)
-        #if($fq != '')
-          &gt;
-          <a href="#url_for_filters($request.params.getParams('fq').subList(0,$previous_fq_count))">$fq</a>
-        #end
-      #end
-    </div>
-
-    <div class="parsed_query_header">
-      #if($debug)
-        <a href="#" onclick='jQuery(this).siblings("div").toggle(); return false;'>toggle parsed query</a>
-        <div class="parsed_query" style="display:none">$response.response.debug.parsedquery</div>
-      #end
-    </div>
-
-  </form>
-</div>
-
-<div class="facets">
-  #parse("facets.vm")
-</div>
-
-<div class="pagination">
-  <span>
-    <span class="results-found">$page.results_found</span>
-    results found in
-    ${response.responseHeader.QTime}ms
-  </span>
-
-  $resource.page_of.insert($page.current_page_number,$page.page_count)
-</div>
-
-## Render Results, actual matching docs
-<div class="results">
-  #parse("results_list.vm")
-</div>
-
-<div class="pagination">
-  #link_to_previous_page
-
-  <span class="results-found">$page.results_found</span>
-  results found.
-
-  $resource.page_of.insert($page.current_page_number,$page.page_count)
-
-  #link_to_next_page
-</div>
diff --git a/solr/contrib/velocity/src/resources/velocity/error.vm b/solr/contrib/velocity/src/resources/velocity/error.vm
deleted file mode 100644
index 1c4d1dc..0000000
--- a/solr/contrib/velocity/src/resources/velocity/error.vm
+++ /dev/null
@@ -1,4 +0,0 @@
-#if( $response.response.error.code )
-  <h1>ERROR $response.response.error.code</h1>
-  $response.response.error.msg
-#end
diff --git a/solr/contrib/velocity/src/resources/velocity/facets.vm b/solr/contrib/velocity/src/resources/velocity/facets.vm
deleted file mode 100644
index e832704..0000000
--- a/solr/contrib/velocity/src/resources/velocity/facets.vm
+++ /dev/null
@@ -1,23 +0,0 @@
-#**
- *  Display facets based on field values
- *  e.g.: fields specified by &facet.field=
- *#
-
-#if($response.facetFields.size() > 0)
-  <h2>Field Facets</h2>
-
-  #foreach($field in $response.facetFields)
-    ## Hide facets without value
-    #if($field.values.size() > 0)
-      <span class="facet-field">$field.name</span>
-      <ul>
-        #foreach($facet in $field.values)
-          <li>
-            <a href="#url_for_facet_filter($field.name, $facet.name)" title="$esc.html($facet.name)">
-              #if($facet.name!=$null)$esc.html($display.truncate($facet.name,20))#else<em>missing</em>#end</a> ($facet.count)
-          </li>
-        #end
-      </ul>
-    #end  ## end if > 0
-  #end    ## end for each facet field
-#end      ## end if response has facet fields
diff --git a/solr/contrib/velocity/src/resources/velocity/footer.vm b/solr/contrib/velocity/src/resources/velocity/footer.vm
deleted file mode 100644
index be9c687..0000000
--- a/solr/contrib/velocity/src/resources/velocity/footer.vm
+++ /dev/null
@@ -1,19 +0,0 @@
-<hr/>
-<div>
-  <span>Options:</span>
-
-  #if($debug)
-    <a href="#url_for_home?#q#if($request.params.getParams('fq').size() > 0)&#fqs($request.params.getParams('fq'))#end">
-      disable debug</a>
-  #else
-    <a href="#url_for_lens&debug=true&fl=*,score">enable debug</a>
-  #end
-  -
-  <a href="#url_for_lens&wt=xml#if($debug)&debug=true#end">XML results</a> ## TODO: Add links for other formats, maybe dynamically?
-
-</div>
-
-<div>
-  <a href="http://lucene.apache.org/solr">Solr Home Page</a>
-</div>
-
diff --git a/solr/contrib/velocity/src/resources/velocity/head.vm b/solr/contrib/velocity/src/resources/velocity/head.vm
deleted file mode 100644
index dbcfb31..0000000
--- a/solr/contrib/velocity/src/resources/velocity/head.vm
+++ /dev/null
@@ -1,185 +0,0 @@
-#**
- *  Provide elements for the <head> section of the HTML document
- *#
-
-  <title>Solr browse: #core_name</title>
-
-  <meta http-equiv="content-type" content="text/html; charset=UTF-8"/>
-
-  <link rel="icon" type="image/x-icon" href="#{url_root}/img/favicon.ico"/>
-  <link rel="shortcut icon" type="image/x-icon" href="#{url_root}/img/favicon.ico"/>
-
-  <script type="text/javascript" src="#{url_root}/libs/jquery-3.4.1.min.js"></script>
-
-  <style>
-    #admin{
-      text-align: right;
-      vertical-align: top;
-    }
-
-    #head{
-      width: 100%;
-    }
-
-    .parsed_query_header {
-      font-family: Helvetica, Arial, sans-serif;
-      font-size: 10pt;
-      font-weight: bold;
-    }
-
-    .parsed_query {
-      font-family: Courier, Courier New, monospaced;
-      font-size: 10pt;
-      font-weight: normal;
-    }
-
-    body {
-      font-family: Helvetica, Arial, sans-serif;
-      font-size: 10pt;
-    }
-
-    a {
-      color: #305CB3;
-    }
-
-    em {
-      color: #FF833D;
-    }
-
-    .facets {
-      float: left;
-      margin: 5px;
-      margin-top: 0px;
-      width: 185px;
-      padding: 5px;
-      top: -20px;
-      position: relative;
-    }
-
-    .facets h2 {
-      background: #D9411E;
-      padding: 2px 5px;
-    }
-
-    .facets ul {
-      list-style: none;
-      margin: 0;
-      margin-bottom: 5px;
-      margin-top: 5px;
-      padding-left: 10px;
-    }
-
-    .facets ul li {
-      color: #999;
-      padding: 2px;
-    }
-
-    .facet-field {
-      font-weight: bold;
-    }
-
-    .field-name {
-      font-weight: bold;
-      // align="right" valign="top"
-    }
-
-    .highlighted-facet-field {
-      background: white;
-    }
-
-    .constraints {
-      margin-top: 10px;
-    }
-
-    #query-form{
-      width: 80%;
-    }
-
-    .query-box, .constraints {
-      padding: 5px;
-      margin: 5px;
-      font-weight: normal;
-      font-size: 24px;
-      letter-spacing: 0.08em;
-    }
-
-    .query-box #q {
-      margin-left: 8px;
-      width: 60%;
-      height: 50px;
-      border: 1px solid #999;
-      font-size: 1em;
-      padding: 0.4em;
-    }
-
-    .query-box {
-
-    }
-
-    .query-box .inputs{
-      left: 180px;
-      position: relative;
-
-    }
-
-    #logo {
-      width: 115px;
-      margin: 0px 0px 20px 12px;
-      border-style: none;
-    }
-
-    .pagination {
-      padding-left: 33%;
-      background: #eee;
-      margin: 5px;
-      margin-left: 210px;
-      padding-top: 5px;
-      padding-bottom: 5px;
-    }
-
-    .result-document {
-      border: 1px solid #999;
-      padding: 5px;
-      margin: 5px;
-      margin-left: 210px;
-      margin-bottom: 15px;
-    }
-
-    .result-document div{
-      padding: 5px;
-    }
-
-    .result-title{
-      width:60%;
-    }
-
-    .result-body{
-      background: #ddd;
-    }
-
-    .mlt{
-
-    }
-
-    .result-document:nth-child(2n+1) {
-      background-color: #eee;
-    }
-
-
-    .selected-facet-field {
-      font-weight: bold;
-    }
-
-    li.show {
-      list-style: disc;
-    }
-
-    .error {
-      color: white;
-      background-color: red;
-      left: 210px;
-      width:80%;
-      position: relative;
-
-    }
-  </style>
diff --git a/solr/contrib/velocity/src/resources/velocity/hit.vm b/solr/contrib/velocity/src/resources/velocity/hit.vm
deleted file mode 100644
index ec4dfd8..0000000
--- a/solr/contrib/velocity/src/resources/velocity/hit.vm
+++ /dev/null
@@ -1,27 +0,0 @@
-#set($docId = $doc.getFirstValue($request.schema.uniqueKeyField.name))
-
-
-<div class="result-document">
-
-  <table>
-    #foreach( $fieldName in $doc.fieldNames )
-        <tr>
-          <th align="right" valign="top" style="field-name">
-            $esc.html($fieldName):
-          </th>
-
-          <td align="left" valign="top">
-            #field($fieldName)
-          </td>
-        </tr>
-    #end
-  </table>
-
-  #if($debug)
-    <a href="#" onclick='jQuery(this).siblings("pre").toggle(); return false;'>toggle explain</a>
-
-    <pre style="display:none">
-      $response.getExplainMap().get($docId)
-    </pre>
-  #end
-</div>
diff --git a/solr/contrib/velocity/src/resources/velocity/layout.vm b/solr/contrib/velocity/src/resources/velocity/layout.vm
deleted file mode 100644
index aa68ffc..0000000
--- a/solr/contrib/velocity/src/resources/velocity/layout.vm
+++ /dev/null
@@ -1,19 +0,0 @@
-<html>
-<head>
-  #parse("head.vm")
-</head>
-  <body>
-    <div id="admin"><a href="#url_root/#/#core_name">Solr Admin</a></div>
-    <div id="head">
-      <a href="#url_for_home#if($debug)?debug=true#end"><img src="#{url_root}/img/solr.svg" id="logo"/></a>
-    </div>
-
-    <div id="content">
-      $content
-    </div>
-
-    <div id="footer">
-      #parse("footer.vm")
-    </div>
-  </body>
-</html>
diff --git a/solr/contrib/velocity/src/resources/velocity/resources.properties b/solr/contrib/velocity/src/resources/velocity/resources.properties
deleted file mode 100644
index dff221c..0000000
--- a/solr/contrib/velocity/src/resources/velocity/resources.properties
+++ /dev/null
@@ -1,6 +0,0 @@
-find=Find
-page_of=Page <span class="page-num">{0}</span> of <span class="page-count">{1}</span>
-previous=previous
-next=next
-
-
diff --git a/solr/contrib/velocity/src/resources/velocity/results_list.vm b/solr/contrib/velocity/src/resources/velocity/results_list.vm
deleted file mode 100644
index 91ae6ea..0000000
--- a/solr/contrib/velocity/src/resources/velocity/results_list.vm
+++ /dev/null
@@ -1,3 +0,0 @@
-#foreach($doc in $response.results)
-  #parse("hit.vm")
-#end
diff --git a/solr/contrib/velocity/src/test-files/velocity/file.vm b/solr/contrib/velocity/src/test-files/velocity/file.vm
deleted file mode 100644
index 9a2c773..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/file.vm
+++ /dev/null
@@ -1 +0,0 @@
-testing
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/schema.xml b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/schema.xml
deleted file mode 100644
index 5b12cda..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/schema.xml
+++ /dev/null
@@ -1,26 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<schema name="minimal-velocity" version="1.6">
-  <fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
-
-  <field name="id" type="string" indexed="true" stored="true" required="true"/>
-  <dynamicField name="*_s" type="string" indexed="true" stored="true"/>
-
-  <uniqueKey>id</uniqueKey>
-</schema>
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/solrconfig.xml b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/solrconfig.xml
deleted file mode 100644
index 35ce52b..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/solrconfig.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<config>
-  <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
-
-  <!--<lib dir="../../contrib/velocity/lib" />-->
-  <!--<lib dir="../../dist/" regex="solr-velocity-\d.*\.jar" />-->
-
-  <schemaFactory class="ClassicIndexSchemaFactory"/>
-
-  <requestHandler name="search" class="solr.SearchHandler" default="true">
-     <lst name="defaults">
-       <str name="echoParams">explicit</str>
-       <int name="rows">10</int>
-     </lst>
-    </requestHandler>
-
-  <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter"/>
-
-  <queryResponseWriter name="velocityWithInitProps" class="solr.VelocityResponseWriter">
-    <str name="init.properties.file">velocity-init.properties</str>
-  </queryResponseWriter>
-
-  <queryResponseWriter name="velocityWithCustomTools" class="solr.VelocityResponseWriter">
-    <lst name="tools">
-      <!-- how someone would typically add a custom tool, with a custom, non-clashing name -->
-      <str name="mytool">org.apache.solr.velocity.MockTool</str>
-
-      <!-- override the $log context object -->
-      <str name="log">org.apache.solr.velocity.MockTool</str>
-
-      <!-- Try to override response, but ignored -->
-      <str name="response">org.apache.solr.velocity.MockTool</str>
-    </lst>
-  </queryResponseWriter>
-</config>
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity-init.properties b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity-init.properties
deleted file mode 100644
index 853e5fc..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity-init.properties
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-foreach.provide.scope.control=false
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/VM_global_library.vm b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/VM_global_library.vm
deleted file mode 100644
index 7bd767e..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/VM_global_library.vm
+++ /dev/null
@@ -1,3 +0,0 @@
-#macro(legacy_macro)legacy_macro_SUCCESS#end
-
-#macro(url_root)Loaded from: VM_global_library.vm#end
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/encoding.vm b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/encoding.vm
deleted file mode 100644
index 419f3f2..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/encoding.vm
+++ /dev/null
@@ -1 +0,0 @@
-éñçø∂îñg
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/layout.vm b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/layout.vm
deleted file mode 100644
index 39136e1..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/layout.vm
+++ /dev/null
@@ -1 +0,0 @@
-{{{$content}}}
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/locale.vm b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/locale.vm
deleted file mode 100644
index a85fd05..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/locale.vm
+++ /dev/null
@@ -1 +0,0 @@
-$resource.color
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/locale_number.vm b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/locale_number.vm
deleted file mode 100644
index 9994022..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/locale_number.vm
+++ /dev/null
@@ -1 +0,0 @@
-$number.format(2112)
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/macros.vm b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/macros.vm
deleted file mode 100644
index 46a508f..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/macros.vm
+++ /dev/null
@@ -1,3 +0,0 @@
-#macro(test_macro)test_macro_SUCCESS#end
-
-#macro(url_root)Loaded from: macros.vm#end
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/numFound.vm b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/numFound.vm
deleted file mode 100644
index 7bafdcd..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/numFound.vm
+++ /dev/null
@@ -1 +0,0 @@
-$response.response.response.numFound
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/outside_the_box.vm b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/outside_the_box.vm
deleted file mode 100644
index c52c94b..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/outside_the_box.vm
+++ /dev/null
@@ -1,4 +0,0 @@
-#set($x='')
-#set($sys=$x.class.forName('java.lang.System'))
-#set($ex=$sys.getProperty('os.name'))
-$ex
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/resource_get.vm b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/resource_get.vm
deleted file mode 100644
index 8a4890f..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/resource_get.vm
+++ /dev/null
@@ -1 +0,0 @@
-$resource.get("color","resources","en_UK")
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/sandbox_intersection.vm b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/sandbox_intersection.vm
deleted file mode 100644
index 80c7422..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/sandbox_intersection.vm
+++ /dev/null
@@ -1,5 +0,0 @@
-#set($x='')
-#set($sys=$x.class.forName('java.nio.file.Paths'))
-#set($path=$sys.get('/dumbass/denied_location'))
-#set($ex=$path.resolve($path).toRealPath())
-$ex
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/test_macro_legacy_support.vm b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/test_macro_legacy_support.vm
deleted file mode 100644
index 30f32fe..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/test_macro_legacy_support.vm
+++ /dev/null
@@ -1 +0,0 @@
-#legacy_macro
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/test_macro_overridden.vm b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/test_macro_overridden.vm
deleted file mode 100644
index f06b28f..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/test_macro_overridden.vm
+++ /dev/null
@@ -1 +0,0 @@
-#url_root
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/test_macro_visible.vm b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/test_macro_visible.vm
deleted file mode 100644
index 7a5baed..0000000
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/velocity/test_macro_visible.vm
+++ /dev/null
@@ -1 +0,0 @@
-#test_macro
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test/custom_tool.vm b/solr/contrib/velocity/src/test/custom_tool.vm
deleted file mode 100644
index 3efff7d..0000000
--- a/solr/contrib/velocity/src/test/custom_tool.vm
+++ /dev/null
@@ -1,19 +0,0 @@
-#* Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License. *#
-
-mytool.star=$!mytool.star("LATERALUS")
-mytool.locale=$!mytool.locale
-log.star=$!log.star("log overridden")
-response.star=$!response.star("response overridden??")
diff --git a/solr/contrib/velocity/src/test/foreach.vm b/solr/contrib/velocity/src/test/foreach.vm
deleted file mode 100644
index 5d7d1e9..0000000
--- a/solr/contrib/velocity/src/test/foreach.vm
+++ /dev/null
@@ -1,14 +0,0 @@
-#* Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License. *##foreach($x in ["a","b"])$!foreach.index#end
diff --git a/solr/contrib/velocity/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java b/solr/contrib/velocity/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java
deleted file mode 100644
index 1aa3dfa..0000000
--- a/solr/contrib/velocity/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.velocity;
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.security.AccessControlException;
-import java.util.Properties;
-
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.response.QueryResponseWriter;
-import org.apache.solr.response.SolrQueryResponse;
-import org.apache.solr.response.VelocityResponseWriter;
-import org.apache.velocity.exception.MethodInvocationException;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-
-public class VelocityResponseWriterTest extends SolrTestCaseJ4 {
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    initCore("solrconfig.xml", "schema.xml", getFile("velocity/solr").getAbsolutePath());
-  }
-
-  @AfterClass
-  public static void afterClass() throws Exception {
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    // This test case toggles the configset used from trusted to untrusted - return to default of trusted for each test
-    h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(true);
-    super.setUp();
-  }
-
-  @Test
-  public void testVelocityResponseWriterRegistered() {
-    QueryResponseWriter writer = h.getCore().getQueryResponseWriter("velocity");
-    assertTrue("VrW registered check", writer instanceof VelocityResponseWriter);
-  }
-
-  @Test
-  public void testSecureUberspector() throws Exception {
-    VelocityResponseWriter vrw = new VelocityResponseWriter();
-    NamedList<String> nl = new NamedList<>();
-    nl.add("template.base.dir", getFile("velocity").getAbsolutePath());
-    vrw.init(nl);
-    SolrQueryRequest req = req(VelocityResponseWriter.TEMPLATE,"outside_the_box");
-    SolrQueryResponse rsp = new SolrQueryResponse();
-    StringWriter buf = new StringWriter();
-    vrw.write(buf, req, rsp);
-    assertEquals("$ex",buf.toString());  // $ex rendered literally because it is null, and thus did not succeed to break outside the box
-  }
-
-  @Test
-  @Ignore("SOLR-14025: Velocity's SecureUberspector addresses this")
-  public void testTemplateSandbox() throws Exception {
-    assumeTrue("This test only works with security manager", System.getSecurityManager() != null);
-    VelocityResponseWriter vrw = new VelocityResponseWriter();
-    NamedList<String> nl = new NamedList<>();
-    nl.add("template.base.dir", getFile("velocity").getAbsolutePath());
-    vrw.init(nl);
-    SolrQueryRequest req = req(VelocityResponseWriter.TEMPLATE,"outside_the_box");
-    SolrQueryResponse rsp = new SolrQueryResponse();
-    StringWriter buf = new StringWriter();
-    try {
-      vrw.write(buf, req, rsp);
-      fail("template broke outside the box, retrieved: " + buf);
-    } catch (MethodInvocationException e) {
-      assertNotNull(e.getCause());
-      assertEquals(AccessControlException.class, e.getCause().getClass());
-      // expected failure, can't get outside the box
-    }
-  }
-
-  @Test
-  @Ignore("SOLR-14025: Velocity's SecureUberspector addresses this")
-  public void testSandboxIntersection() throws Exception {
-    assumeTrue("This test only works with security manager", System.getSecurityManager() != null);
-    VelocityResponseWriter vrw = new VelocityResponseWriter();
-    NamedList<String> nl = new NamedList<>();
-    nl.add("template.base.dir", getFile("velocity").getAbsolutePath());
-    vrw.init(nl);
-    SolrQueryRequest req = req(VelocityResponseWriter.TEMPLATE,"sandbox_intersection");
-    SolrQueryResponse rsp = new SolrQueryResponse();
-    StringWriter buf = new StringWriter();
-    try {
-      vrw.write(buf, req, rsp);
-      fail("template broke outside the box, retrieved: " + buf);
-    } catch (MethodInvocationException e) {
-      assertNotNull(e.getCause());
-      assertEquals(AccessControlException.class, e.getCause().getClass());
-      // expected failure, can't get outside the box
-    }
-  }
-
-  @Test
-  public void testFileResourceLoader() throws Exception {
-    VelocityResponseWriter vrw = new VelocityResponseWriter();
-    NamedList<String> nl = new NamedList<>();
-    nl.add("template.base.dir", getFile("velocity").getAbsolutePath());
-    vrw.init(nl);
-    SolrQueryRequest req = req(VelocityResponseWriter.TEMPLATE,"file");
-    SolrQueryResponse rsp = new SolrQueryResponse();
-    StringWriter buf = new StringWriter();
-    vrw.write(buf, req, rsp);
-    assertEquals("testing", buf.toString());
-  }
-
-  @Test
-  public void testTemplateTrust() throws Exception {
-    // Try on trusted configset....
-    assertEquals("0", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"numFound")));
-
-    // Turn off trusted configset, which disables the Solr resource loader
-    h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(false);
-    assertFalse(h.getCoreContainer().getCoreDescriptor(coreName).isConfigSetTrusted());
-
-    try {
-      assertEquals("0", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"numFound")));
-      fail("template rendering should have failed, from an untrusted configset");
-    } catch (IOException e) {
-      // expected exception
-      assertEquals(IOException.class, e.getClass());
-    }
-
-    // set the harness back to the default of trusted
-    h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(true);
-  }
-
-
-  @Test
-  public void testSolrResourceLoaderTemplate() throws Exception {
-    assertEquals("0", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"numFound")));
-  }
-
-  @Test
-  public void testEncoding() throws Exception {
-    assertEquals("éñçø∂îñg", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"encoding")));
-  }
-
-  @Test
-  public void testMacros() throws Exception {
-    // tests that a macro in a custom macros.vm is visible
-    assertEquals("test_macro_SUCCESS", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"test_macro_visible")));
-
-    // tests that a builtin (_macros.vm) macro, #url_root in this case, can be overridden in a custom macros.vm
-    // the macro is also defined in VM_global_library.vm, which should also be overridden by macros.vm
-    assertEquals("Loaded from: macros.vm", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"test_macro_overridden")));
-
-    // tests that macros defined in VM_global_library.vm are visible.  This file was where macros in pre-5.0 versions were defined
-    assertEquals("legacy_macro_SUCCESS", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"test_macro_legacy_support")));
-  }
-
-  @Test
-  public void testInitProps() throws Exception {
-    // The test init properties file turns off being able to use $foreach.index (the implicit loop counter)
-    // The foreach.vm template uses $!foreach.index, with ! suppressing the literal "$foreach.index" output
-
-    assertEquals("01", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"foreach")));
-    assertEquals("", h.query(req("q","*:*", "wt","velocityWithInitProps",VelocityResponseWriter.TEMPLATE,"foreach")));
-
-    // Turn off trusted configset, which disables the init properties
-    h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(false);
-    assertFalse(h.getCoreContainer().getCoreDescriptor(coreName).isConfigSetTrusted());
-
-    assertEquals("01", h.query(req("q","*:*", "wt","velocityWithInitProps",VelocityResponseWriter.TEMPLATE,"foreach")));
-
-    // set the harness back to the default of trusted
-    h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(true);
-  }
-
-  @Test
-  public void testCustomTools() throws Exception {
-    // Render this template once without a custom tool defined, and once with it defined.  The tool has a `.star` method.
-    // The tool added as `mytool`, `log`, and `response`.  `log` is designed to be overridable, but not `response`
-    //    mytool.star=$!mytool.star("LATERALUS")
-    //    mytool.locale=$!mytool.locale
-    //    log.star=$!log.star("log overridden")
-    //    response.star=$!response.star("response overridden??")
-
-    // First without the tool defined, with `$!` turning null object/method references into empty string
-    Properties rendered_props = new Properties();
-    String rsp = h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"custom_tool"));
-    rendered_props.load(new StringReader(rsp));
-    // ignore mytool.locale here, as it will be the random test one
-    assertEquals("",rendered_props.getProperty("mytool.star"));
-    assertEquals("",rendered_props.getProperty("log.star"));
-    assertEquals("",rendered_props.getProperty("response.star"));
-
-    // Now with custom tools defined:
-    rsp = h.query(req("q","*:*", "wt","velocityWithCustomTools",VelocityResponseWriter.TEMPLATE,"custom_tool",VelocityResponseWriter.LOCALE, "de_DE"));
-    rendered_props.clear();
-    rendered_props.load(new StringReader(rsp));
-    assertEquals("** LATERALUS **",rendered_props.getProperty("mytool.star"));
-    assertEquals("** log overridden **",rendered_props.getProperty("log.star"));
-    assertEquals("",rendered_props.getProperty("response.star"));
-    assertEquals("de_DE",rendered_props.getProperty("mytool.locale"));
-
-
-    // Turn off trusted configset, which disables the custom tool injection
-    h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(false);
-    assertFalse(h.getCoreContainer().getCoreDescriptor(coreName).isConfigSetTrusted());
-
-    rsp = h.query(req("q","*:*", "wt","velocityWithCustomTools",VelocityResponseWriter.TEMPLATE,"custom_tool",VelocityResponseWriter.LOCALE, "de_DE"));
-    rendered_props.clear();
-    rendered_props.load(new StringReader(rsp));
-    assertEquals("",rendered_props.getProperty("mytool.star"));
-    assertEquals("",rendered_props.getProperty("log.star"));
-    assertEquals("",rendered_props.getProperty("response.star"));
-    assertEquals("",rendered_props.getProperty("mytool.locale"));
-
-    // set the harness back to the default of trusted
-    h.getCoreContainer().getCoreDescriptor(h.coreName).setConfigSetTrusted(true);
-
-
-    // Custom tools can also have a SolrCore-arg constructor because they are instantiated with SolrCore.createInstance
-    // TODO: do we really need to support this?  no great loss, as a custom tool could take a SolrCore object as a parameter to
-    // TODO: any method, so one could do $mytool.my_method($request.core)
-    // I'm currently inclined to make this feature undocumented/unsupported, as we may want to instantiate classes
-    // in a different manner that only supports no-arg constructors, commented (passing) test case out
-    //    assertEquals("collection1", h.query(req("q","*:*", "wt","velocityWithCustomTools",VelocityResponseWriter.TEMPLATE,"t",
-    //        SolrParamResourceLoader.TEMPLATE_PARAM_PREFIX+"t", "$mytool.core.name")))
-    //           - NOTE: example uses removed inline param; convert to external template as needed
-  }
-
-  @Test
-  public void testLocaleFeature() throws Exception {
-    assertEquals("Color", h.query(req("q", "*:*", "wt", "velocity", VelocityResponseWriter.TEMPLATE, "locale",
-        VelocityResponseWriter.LOCALE,"en_US")));
-    assertEquals("Colour", h.query(req("q", "*:*", "wt", "velocity", VelocityResponseWriter.TEMPLATE, "locale",
-        VelocityResponseWriter.LOCALE,"en_UK")));
-
-    // Test that $resource.get(key,baseName,locale) works with specified locale
-    assertEquals("Colour", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"resource_get")));
-
-    // Test that $number tool uses the specified locale
-    assertEquals("2,112", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"locale_number",
-        VelocityResponseWriter.LOCALE, "en_US")));
-    assertEquals("2.112", h.query(req("q","*:*", "wt","velocity",VelocityResponseWriter.TEMPLATE,"locale_number",
-        VelocityResponseWriter.LOCALE, "de_DE")));
-  }
-
-  @Test
-  public void testLayoutFeature() throws Exception {
-    assertEquals("{{{0}}}", h.query(req("q","*:*", "wt","velocity",
-        VelocityResponseWriter.TEMPLATE,"numFound", VelocityResponseWriter.LAYOUT,"layout")));
-
-    // even with v.layout specified, layout can be disabled explicitly
-    assertEquals("0", h.query(req("q","*:*", "wt","velocity",
-        VelocityResponseWriter.TEMPLATE,"numFound",
-        VelocityResponseWriter.LAYOUT,"layout",
-        VelocityResponseWriter.LAYOUT_ENABLED,"false")));
-  }
-
-  @Test
-  public void testJSONWrapper() throws Exception {
-    assertEquals("foo({\"result\":\"0\"})", h.query(req("q", "*:*", "wt", "velocity",
-        VelocityResponseWriter.TEMPLATE, "numFound",
-        VelocityResponseWriter.JSON,"foo")));
-
-    // Now with layout, for good measure
-    assertEquals("foo({\"result\":\"{{{0}}}\"})", h.query(req("q", "*:*", "wt", "velocity",
-        VelocityResponseWriter.TEMPLATE, "numFound",
-        VelocityResponseWriter.JSON,"foo",
-        VelocityResponseWriter.LAYOUT,"layout")));
-
-    assertQEx("Bad function name should throw exception", req("q", "*:*", "wt", "velocity",
-        VelocityResponseWriter.TEMPLATE, "numFound",
-        VelocityResponseWriter.JSON,"<foo>"), SolrException.ErrorCode.BAD_REQUEST
-    );
-  }
-
-  @Test
-  public void testContentType() {
-    VelocityResponseWriter vrw = new VelocityResponseWriter();
-    NamedList<String> nl = new NamedList<>();
-    vrw.init(nl);
-    SolrQueryResponse rsp = new SolrQueryResponse();
-
-    // with v.json=wrf, content type should default to application/json
-    assertEquals("application/json;charset=UTF-8",
-        vrw.getContentType(req(VelocityResponseWriter.TEMPLATE, "numFound",
-            VelocityResponseWriter.JSON, "wrf"), rsp));
-
-    // with no v.json specified, the default text/html should be returned
-    assertEquals("text/html;charset=UTF-8",
-        vrw.getContentType(req(VelocityResponseWriter.TEMPLATE, "numFound"), rsp));
-
-    // if v.contentType is specified, that should be used, even if v.json is specified
-    assertEquals("text/plain",
-        vrw.getContentType(req(VelocityResponseWriter.TEMPLATE, "numFound",
-            VelocityResponseWriter.CONTENT_TYPE,"text/plain"), rsp));
-    assertEquals("text/plain",
-        vrw.getContentType(req(VelocityResponseWriter.TEMPLATE, "numFound",
-            VelocityResponseWriter.JSON,"wrf",
-            VelocityResponseWriter.CONTENT_TYPE,"text/plain"), rsp));
-  }
-}
diff --git a/solr/contrib/velocity/src/test/velocity/resources.properties b/solr/contrib/velocity/src/test/velocity/resources.properties
deleted file mode 100644
index ec6320f..0000000
--- a/solr/contrib/velocity/src/test/velocity/resources.properties
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-color=Color
\ No newline at end of file
diff --git a/solr/contrib/velocity/src/test/velocity/resources_en_UK.properties b/solr/contrib/velocity/src/test/velocity/resources_en_UK.properties
deleted file mode 100644
index 21a476f..0000000
--- a/solr/contrib/velocity/src/test/velocity/resources_en_UK.properties
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-color=Colour
\ No newline at end of file
diff --git a/solr/core/src/java/org/apache/solr/api/AnnotatedApi.java b/solr/core/src/java/org/apache/solr/api/AnnotatedApi.java
index d9548c9..fd77413 100644
--- a/solr/core/src/java/org/apache/solr/api/AnnotatedApi.java
+++ b/solr/core/src/java/org/apache/solr/api/AnnotatedApi.java
@@ -37,7 +37,11 @@
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SpecProvider;
-import org.apache.solr.common.util.*;
+import org.apache.solr.common.util.CommandOperation;
+import org.apache.solr.common.util.ContentStream;
+import org.apache.solr.common.util.JsonSchemaCreator;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.common.util.ValidatingJsonMap;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
 import org.apache.solr.security.AuthorizationContext;
@@ -267,6 +271,7 @@
 
     @SuppressWarnings({"unchecked"})
     void invoke(SolrQueryRequest req, SolrQueryResponse rsp, CommandOperation cmd) {
+      Object original = null;
       try {
         Object o = null;
         String commandName = null;
@@ -282,12 +287,13 @@
             }
           } else {
             commandName = cmd.name;
-            o = cmd.getCommandData();
+            original = cmd.getCommandData();
+            o = original;
             if (o instanceof Map && parameterClass != null && parameterClass != Map.class) {
               o = mapper.readValue(Utils.toJSONString(o), parameterClass);
             }
           }
-          PayloadObj<Object> payloadObj = new PayloadObj<>(commandName, o, o, req, rsp);
+          PayloadObj<Object> payloadObj = new PayloadObj<>(commandName, original, o, req, rsp);
           cmd = payloadObj;
           method.invoke(obj, payloadObj);
           checkForErrorInPayload(cmd);
diff --git a/solr/core/src/java/org/apache/solr/api/PayloadObj.java b/solr/core/src/java/org/apache/solr/api/PayloadObj.java
index 7941304..df4c1e4 100644
--- a/solr/core/src/java/org/apache/solr/api/PayloadObj.java
+++ b/solr/core/src/java/org/apache/solr/api/PayloadObj.java
@@ -31,8 +31,8 @@
     final SolrQueryRequest req;
     final SolrQueryResponse rsp;
 
-    public PayloadObj(String operationName, Object metaData, T obj, SolrQueryRequest req, SolrQueryResponse rsp) {
-        super(operationName, metaData);
+    public PayloadObj(String operationName, Object originalMetadata, T obj, SolrQueryRequest req, SolrQueryResponse rsp) {
+        super(operationName, originalMetadata);
         this.obj = obj;
         this.req = req;
         this.rsp = rsp;
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
index 909b3ed..3d95bbe 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java
@@ -44,6 +44,9 @@
 import org.apache.solr.client.solrj.cloud.VersionedData;
 import org.apache.solr.cloud.rule.ReplicaAssigner;
 import org.apache.solr.cloud.rule.Rule;
+import org.apache.solr.cluster.placement.PlacementPlugin;
+import org.apache.solr.cluster.placement.impl.PlacementPluginAssignStrategy;
+import org.apache.solr.cluster.placement.impl.PlacementPluginConfigImpl;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
@@ -297,8 +300,7 @@
         .assignPullReplicas(pullReplicas)
         .onNodes(createNodeList)
         .build();
-    AssignStrategyFactory assignStrategyFactory = new AssignStrategyFactory(cloudManager);
-    AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, coll);
+    AssignStrategy assignStrategy = createAssignStrategy(cloudManager, clusterState, coll);
     return assignStrategy.assign(cloudManager, assignRequest);
   }
 
@@ -387,12 +389,12 @@
   }
 
   public static class AssignRequest {
-    public String collectionName;
-    public List<String> shardNames;
-    public List<String> nodes;
-    public int numNrtReplicas;
-    public int numTlogReplicas;
-    public int numPullReplicas;
+    public final String collectionName;
+    public final List<String> shardNames;
+    public final List<String> nodes;
+    public final int numNrtReplicas;
+    public final int numTlogReplicas;
+    public final int numPullReplicas;
 
     public AssignRequest(String collectionName, List<String> shardNames, List<String> nodes, int numNrtReplicas, int numTlogReplicas, int numPullReplicas) {
       this.collectionName = collectionName;
@@ -543,40 +545,30 @@
     }
   }
 
-  public static class AssignStrategyFactory {
-    public SolrCloudManager solrCloudManager;
+  /**
+   * Creates the appropriate instance of {@link AssignStrategy} based on how the cluster and/or individual collections are
+   * configured.
+   */
+  public static AssignStrategy createAssignStrategy(SolrCloudManager solrCloudManager, ClusterState clusterState, DocCollection collection) {
+    PlacementPlugin plugin = PlacementPluginConfigImpl.getPlacementPlugin(solrCloudManager);
 
-    public AssignStrategyFactory(SolrCloudManager solrCloudManager) {
-      this.solrCloudManager = solrCloudManager;
-    }
-
-    public AssignStrategy create(ClusterState clusterState, DocCollection collection) throws IOException, InterruptedException {
+    if (plugin != null) {
+      // If a cluster wide placement plugin is configured (and that's the only way to define a placement plugin), it overrides
+      // per collection configuration (i.e. rules are ignored)
+      return new PlacementPluginAssignStrategy(collection, plugin);
+    } else {
       @SuppressWarnings({"unchecked", "rawtypes"})
-      List<Map> ruleMaps = (List<Map>) collection.get("rule");
-      @SuppressWarnings({"rawtypes"})
-      List snitches = (List) collection.get(SNITCH);
+      List<Map> ruleMaps = (List<Map>) collection.get(DocCollection.RULE);
 
-      Strategy strategy = null;
       if (ruleMaps != null && !ruleMaps.isEmpty()) {
-        strategy = Strategy.RULES;
+        List<Rule> rules = new ArrayList<>();
+        for (Object map : ruleMaps) rules.add(new Rule((Map) map));
+        @SuppressWarnings({"rawtypes"})
+        List snitches = (List) collection.get(SNITCH);
+        return new RulesBasedAssignStrategy(rules, snitches, clusterState);
       } else {
-        strategy = Strategy.LEGACY;        
+        return new LegacyAssignStrategy();
       }
-      
-      switch (strategy) {
-        case LEGACY:
-          return new LegacyAssignStrategy();
-        case RULES:
-          List<Rule> rules = new ArrayList<>();
-          for (Object map : ruleMaps) rules.add(new Rule((Map) map));
-          return new RulesBasedAssignStrategy(rules, snitches, clusterState);
-        default:
-          throw new Assign.AssignmentException("Unknown strategy type: " + strategy);
-      }
-    }
-
-    private enum Strategy {
-      LEGACY, RULES;
     }
   }
 }
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
index cfad397..854ee9a 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java
@@ -378,8 +378,7 @@
           .assignPullReplicas(numPullReplicas)
           .onNodes(nodeList)
           .build();
-      Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(cloudManager);
-      Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, docCollection);
+      Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(cloudManager, clusterState, docCollection);
       replicaPositions = assignStrategy.assign(cloudManager, assignRequest);
     }
     return replicaPositions;
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
index aa10bb1e..2267b4d 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/ReplaceNodeCmd.java
@@ -120,8 +120,7 @@
               .assignPullReplicas(numPullReplicas)
               .onNodes(new ArrayList<>(ocmh.cloudManager.getClusterStateProvider().getLiveNodes()))
               .build();
-          Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(ocmh.cloudManager);
-          Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, clusterState.getCollection(sourceCollection));
+          Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(ocmh.cloudManager, clusterState, clusterState.getCollection(sourceCollection));
           targetNode = assignStrategy.assign(ocmh.cloudManager, assignRequest).get(0).node;
         }
         ZkNodeProps msg = sourceReplica.plus("parallel", String.valueOf(parallel)).plus(CoreAdminParams.NODE, targetNode);
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
index 0aa4389..db408b4 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java
@@ -229,8 +229,7 @@
             .assignPullReplicas(numPullReplicas)
             .onNodes(nodeList)
             .build();
-    Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(ocmh.cloudManager);
-    Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, restoreCollection);
+    Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(ocmh.cloudManager, clusterState, restoreCollection);
     List<ReplicaPosition> replicaPositions = assignStrategy.assign(ocmh.cloudManager, assignRequest);
 
     CountDownLatch countDownLatch = new CountDownLatch(restoreCollection.getSlices().size());
diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
index 495bf65..072c5d6 100644
--- a/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
+++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/SplitShardCmd.java
@@ -431,8 +431,7 @@
           .assignPullReplicas(numPull.get())
           .onNodes(new ArrayList<>(clusterState.getLiveNodes()))
           .build();
-      Assign.AssignStrategyFactory assignStrategyFactory = new Assign.AssignStrategyFactory(ocmh.cloudManager);
-      Assign.AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, collection);
+      Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(ocmh.cloudManager, clusterState, collection);
       List<ReplicaPosition> replicaPositions = assignStrategy.assign(ocmh.cloudManager, assignRequest);
       t.stop();
 
diff --git a/solr/core/src/java/org/apache/solr/cluster/Cluster.java b/solr/core/src/java/org/apache/solr/cluster/Cluster.java
new file mode 100644
index 0000000..3b7bdd4
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/Cluster.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Set;
+
+/**
+ * <p>A representation of the SolrCloud cluster state, providing information on which nodes and collections are part of
+ * the cluster and a way to get to more detailed info.
+ */
+public interface Cluster {
+  /**
+   * @return current set of live nodes.
+   */
+  Set<Node> getLiveNodes();
+
+  /**
+   * Returns info about the given collection if one exists.
+   *
+   * @return {@code null} if no collection of the given name exists in the cluster.
+   */
+  SolrCollection getCollection(String collectionName) throws IOException;
+
+  /**
+   * @return an iterator over all {@link SolrCollection}s in the cluster.
+   */
+  Iterator<SolrCollection> iterator();
+
+  /**
+   * Allow foreach iteration on all collections of the cluster, such as: {@code for (SolrCollection c : cluster.collections()) {...}}.
+   */
+  Iterable<SolrCollection> collections();
+}
diff --git a/solr/contrib/velocity/src/test/org/apache/solr/velocity/MockTool.java b/solr/core/src/java/org/apache/solr/cluster/Node.java
similarity index 67%
copy from solr/contrib/velocity/src/test/org/apache/solr/velocity/MockTool.java
copy to solr/core/src/java/org/apache/solr/cluster/Node.java
index c6287fd..301078e 100644
--- a/solr/contrib/velocity/src/test/org/apache/solr/velocity/MockTool.java
+++ b/solr/core/src/java/org/apache/solr/cluster/Node.java
@@ -14,21 +14,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.solr.velocity;
 
-import org.apache.solr.core.SolrCore;
-import org.apache.velocity.tools.generic.LocaleConfig;
+package org.apache.solr.cluster;
 
-public class MockTool extends LocaleConfig {
-  private final SolrCore core;
-
-  public MockTool(SolrCore core) {
-    this.core = core;
-  }
-
-  public String star(String str) {
-    return "** " + str + " **";
-  }
-
-  public SolrCore getCore() { return core; }
+/**
+ * Representation of a SolrCloud node or server in the SolrCloud cluster.
+ */
+public interface Node {
+  String getName();
 }
diff --git a/solr/core/src/java/org/apache/solr/cluster/Replica.java b/solr/core/src/java/org/apache/solr/cluster/Replica.java
new file mode 100644
index 0000000..2c9230f
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/Replica.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster;
+
+/**
+ * An instantiation (or one of the copies) of a given {@link Shard} of a given {@link SolrCollection}.
+ */
+public interface Replica {
+  Shard getShard();
+
+  ReplicaType getType();
+  ReplicaState getState();
+
+  String getReplicaName();
+
+  /**
+   * The core name on disk
+   */
+  String getCoreName();
+
+  /**
+   * {@link Node} on which this {@link Replica} is located.
+   */
+  Node getNode();
+
+  /**
+   * The order of this enum is important from the most to least "important" replica type.
+   */
+  enum ReplicaType {
+    NRT, TLOG, PULL
+  }
+
+  enum ReplicaState {
+    ACTIVE, DOWN, RECOVERING, RECOVERY_FAILED
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/Shard.java b/solr/core/src/java/org/apache/solr/cluster/Shard.java
new file mode 100644
index 0000000..b1ffc14
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/Shard.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster;
+
+import java.util.Iterator;
+
+/**
+ * Shard in a {@link SolrCollection}, i.e. a subset of the data indexed in that collection.
+ */
+public interface Shard {
+  String getShardName();
+
+  /**
+   * @return the collection this shard is part of
+   */
+  SolrCollection getCollection();
+
+  /**
+   * Returns the {@link Replica} of the given name for that shard, if such a replica exists.
+   * @return {@code null} if the replica does not (or does not yet) exist for the shard.
+   */
+  Replica getReplica(String name);
+
+  /**
+   * @return an iterator over {@link Replica}s already existing for this {@link Shard}.
+   */
+  Iterator<Replica> iterator();
+
+  /**
+   * Allow foreach iteration on replicas such as: {@code for (Replica r : shard.replicas()) {...}}.
+   */
+  Iterable<Replica> replicas();
+
+  /**
+   * @return the current leader {@link Replica} for this {@link Shard}. Note that by the time this method returns the leader might
+   * have changed. Also, if there's no leader for any reason (don't shoot the messenger), this method will return {@code null}.
+   */
+  Replica getLeader();
+
+  ShardState getState();
+
+  enum ShardState {
+    ACTIVE, INACTIVE, CONSTRUCTION, RECOVERY, RECOVERY_FAILED
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/SolrCollection.java b/solr/core/src/java/org/apache/solr/cluster/SolrCollection.java
new file mode 100644
index 0000000..23e79a4
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/SolrCollection.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster;
+
+import org.apache.solr.cluster.placement.AttributeFetcher;
+import org.apache.solr.cluster.placement.AttributeValues;
+import org.apache.solr.cluster.placement.PlacementPlugin;
+import org.apache.solr.cluster.placement.PlacementRequest;
+
+import java.util.Iterator;
+
+/**
+ * Represents a Collection in SolrCloud (unrelated to {@link java.util.Collection} that uses the nicer name).
+ */
+public interface SolrCollection {
+  /**
+   * The collection name (value passed to {@link Cluster#getCollection(String)}).
+   */
+  String getName();
+
+  /**
+   * <p>Returns the {@link Shard} of the given name for that collection, if such a shard exists.</p>
+   *
+   * <p>Note that when a request for adding replicas for a collection is received by a {@link PlacementPlugin}, it is
+   * possible that replicas need to be added to non existing shards (see {@link PlacementRequest#getShardNames()}.
+   * Non existing shards <b>will not</b> be returned by this method. Only shards already existing will be returned.</p>
+   *
+   * @return {@code null} if the shard does not or does not yet exist for the collection.
+   */
+  Shard getShard(String name);
+
+  /**
+   * @return an iterator over {@link Shard}s of this {@link SolrCollection}.
+   */
+  Iterator<Shard> iterator();
+
+  /**
+   * Allow foreach iteration on shards such as: {@code for (Shard s : solrCollection.shards()) {...}}.
+   */
+  Iterable<Shard> shards();
+
+  /**
+   * <p>Returns the value of a custom property name set on the {@link SolrCollection} or {@code null} when no such
+   * property was set. Properties are set through the Collection API. See for example {@code COLLECTIONPROP} in the Solr reference guide.
+   *
+   * <p><b>{@link PlacementPlugin} related note:</b></p>
+   * <p>Using custom properties in conjunction with ad hoc {@link PlacementPlugin} code allows customizing placement
+   * decisions per collection.
+   *
+   * <p>For example if a collection is to be placed only on nodes using SSD storage and not rotating disks, it can be
+   * identified as such using some custom property (collection property could for example be called "driveType" and have
+   * value "ssd" in that case), and the placement plugin (implementing {@link PlacementPlugin}) would then
+   * {@link AttributeFetcher#requestNodeSystemProperty(String)} for that property from all nodes and only place replicas
+   * of this collection on {@link Node}'s for which
+   * {@link AttributeValues#getDiskType(Node)} is non empty and equal to {@link org.apache.solr.cluster.placement.AttributeFetcher.DiskHardwareType#SSD}.
+   */
+  String getCustomProperty(String customPropertyName);
+
+  /*
+   * There might be missing pieces here (and in other classes in this package) and these would have to be added when
+   * starting to use these interfaces to code real world placement and balancing code (plugins).
+   */
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/package-info.java b/solr/core/src/java/org/apache/solr/cluster/package-info.java
new file mode 100644
index 0000000..7571d5a
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <p>This package contains the interfaces giving access to cluster state, including nodes, collections and the
+ * structure of the collections (shards and replicas). These interfaces allow separating external code contribution
+ * from the internal Solr implementations of these concepts to make usage simpler and to not require changes to
+ * external contributed code every time the internal abstractions are modified.</p>
+ *
+ * <p>The top level abstraction is {@link org.apache.solr.cluster.Cluster}. The cluster is composed of {@link org.apache.solr.cluster.Node}s.
+ * Indexes are stored in {@link org.apache.solr.cluster.SolrCollection}s, composed of {@link org.apache.solr.cluster.Shard}s
+ * whose actual copies on {@link org.apache.solr.cluster.Node}s are called {@link org.apache.solr.cluster.Replica}s.</p>
+ */
+package org.apache.solr.cluster;
\ No newline at end of file
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/AttributeFetcher.java b/solr/core/src/java/org/apache/solr/cluster/placement/AttributeFetcher.java
new file mode 100644
index 0000000..cb368d7
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/AttributeFetcher.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement;
+
+import org.apache.solr.cluster.Node;
+
+import java.util.Set;
+
+/**
+ * <p>Instances of this interface are used to fetch various attributes from nodes (and other sources) in the cluster.</p>
+ */
+public interface AttributeFetcher {
+  /** Request the number of cores on each node. To get the value use {@link AttributeValues#getCoresCount(Node)} */
+  AttributeFetcher requestNodeCoreCount();
+
+  /** Request the disk hardware type on each node. To get the value use {@link AttributeValues#getDiskType(Node)} */
+  AttributeFetcher requestNodeDiskType();
+
+  /** Request the free disk size on each node. To get the value use {@link AttributeValues#getFreeDisk(Node)} */
+  AttributeFetcher requestNodeFreeDisk();
+
+  /** Request the total disk size on each node. To get the value use {@link AttributeValues#getTotalDisk(Node)} */
+  AttributeFetcher requestNodeTotalDisk();
+
+  /** Request the heap usage on each node. To get the value use {@link AttributeValues#getHeapUsage(Node)} */
+  AttributeFetcher requestNodeHeapUsage();
+
+  /** Request the system load average on each node. To get the value use {@link AttributeValues#getSystemLoadAverage(Node)} */
+  AttributeFetcher requestNodeSystemLoadAverage();
+
+  /** Request a given system property on each node. To get the value use {@link AttributeValues#getSystemProperty(Node, String)} */
+  AttributeFetcher requestNodeSystemProperty(String name);
+
+  /** Request an environment variable on each node. To get the value use {@link AttributeValues#getEnvironmentVariable(Node, String)} */
+  AttributeFetcher requestNodeEnvironmentVariable(String name);
+
+  /** Request a node metric from each node. To get the value use {@link AttributeValues#getMetric(Node, String, NodeMetricRegistry)} */
+  AttributeFetcher requestNodeMetric(String metricName, NodeMetricRegistry registry);
+
+
+  /**
+   * The set of nodes from which to fetch all node related attributes. Calling this method is mandatory if any of the {@code requestNode*}
+   * methods got called.
+   */
+  AttributeFetcher fetchFrom(Set<Node> nodes);
+
+  /** Requests a (non node) metric of a given scope and name. To get the value use {@link AttributeValues#getMetric(String, String)} */
+  AttributeFetcher requestMetric(String scope, String metricName);
+
+  /**
+   * Fetches all requested node attributes from all nodes passed to {@link #fetchFrom(Set)} as well as non node attributes
+   * (those requested for example using {@link #requestMetric(String, String)}.
+   * @return An instance allowing retrieval of all attributed that could be fetched.
+   */
+  AttributeValues fetchAttributes();
+
+  /**
+   * Registry options for {@link Node} metrics.
+   */
+  enum NodeMetricRegistry {
+    /** corresponds to solr.node */
+    SOLR_NODE,
+    /** corresponds to solr.jvm */
+    SOLR_JVM
+  }
+
+  enum DiskHardwareType {
+    SSD, ROTATIONAL
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/AttributeValues.java b/solr/core/src/java/org/apache/solr/cluster/placement/AttributeValues.java
new file mode 100644
index 0000000..4519c8a
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/AttributeValues.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement;
+
+import org.apache.solr.cluster.Node;
+
+import java.util.Optional;
+
+public interface AttributeValues {
+    /** For the given node: number of cores */
+    Optional<Integer> getCoresCount(Node node);
+
+    /** For the given node: Hardware type of the disk partition where cores are stored */
+    Optional<AttributeFetcher.DiskHardwareType> getDiskType(Node node);
+
+    /** For the given node: Free disk size in Gigabytes of the partition on which cores are stored */
+    Optional<Long> getFreeDisk(Node node);
+
+    /** For the given node: Total disk size in Gigabytes of the partition on which cores are stored */
+    Optional<Long> getTotalDisk(Node node);
+
+    /** For the given node: Percentage between 0 and 100 of used heap over max heap */
+    Optional<Double> getHeapUsage(Node node);
+
+    /** For the given node: matches {@link java.lang.management.OperatingSystemMXBean#getSystemLoadAverage()} */
+    Optional<Double> getSystemLoadAverage(Node node);
+
+    /** For the given node: system property value (system properties are passed to Java using {@code -Dname=value} */
+    Optional<String> getSystemProperty(Node node, String name);
+
+    /** For the given node: environment variable value */
+    Optional<String> getEnvironmentVariable(Node node, String name);
+
+    /** For the given node: metric of specific name and registry */
+    Optional<Double> getMetric(Node node, String metricName, AttributeFetcher.NodeMetricRegistry registry);
+
+
+    /** Get a non node related metric of specific scope and name */
+    Optional<Double> getMetric(String scope, String metricName);
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/PlacementException.java b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementException.java
new file mode 100644
index 0000000..33af511
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementException.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement;
+
+/**
+ * Exception thrown by a {@link PlacementPlugin} when it is unable to compute placement for whatever reason (except an
+ * {@link InterruptedException} that {@link PlacementPlugin#computePlacement}
+ * is also allowed to throw).
+ */
+public class PlacementException extends Exception {
+
+  public PlacementException() {
+    super();
+  }
+
+  public PlacementException(String message) {
+    super(message);
+  }
+
+  public PlacementException(String message, Throwable cause) {
+    super(message, cause);
+  }
+
+  public PlacementException(Throwable cause) {
+    super(cause);
+  }
+
+  protected PlacementException(String message, Throwable cause,
+                               boolean enableSuppression,
+                               boolean writableStackTrace) {
+    super(message, cause, enableSuppression, writableStackTrace);
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPlan.java b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPlan.java
new file mode 100644
index 0000000..c4738a5
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPlan.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement;
+
+import org.apache.solr.cluster.Node;
+
+import java.util.Set;
+
+/**
+ * A fully specified plan or instructions for placement, deletion or move to be applied to the cluster.<p>
+ * Fully specified means the actual {@link Node}'s on which to place replicas have been decided.
+ *
+ * Instances are created by plugin code using {@link PlacementPlanFactory}. This interface obviously doesn't expose much but
+ * the underlying Solr side implementation has all that is needed (and will do at least one cast in order to execute the
+ * plan, likely then using some type of visitor pattern).
+ */
+public interface PlacementPlan {
+  /**
+   * @return the {@link PlacementRequest} at the origin of this {@link PlacementPlan}, as passed to the {@link PlacementPlanFactory} method
+   * that created this instance.
+   */
+  PlacementRequest getRequest();
+
+  /**
+   * @return the set of {@link ReplicaPlacement}'s computed by the plugin to implement the request
+   */
+  Set<ReplicaPlacement> getReplicaPlacements();
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPlanFactory.java b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPlanFactory.java
new file mode 100644
index 0000000..c602ab0
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPlanFactory.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement;
+
+import org.apache.solr.cluster.Node;
+import org.apache.solr.cluster.Replica;
+import org.apache.solr.cluster.SolrCollection;
+
+import java.util.Set;
+
+/**
+ * Allows plugins to create {@link PlacementPlan}s telling the Solr layer where to create replicas following the processing of
+ * a {@link PlacementRequest}. The Solr layer can (and will) check that the {@link PlacementPlan} conforms to the {@link PlacementRequest} (and
+ * if it does not, the requested operation will fail).
+ */
+public interface PlacementPlanFactory {
+  /**
+   * <p>Creates a {@link PlacementPlan} for adding replicas to a given shard(s) of an existing collection. Note this is also
+   * used for creating new collections since such a creation first creates the collection, then adds the replicas.
+   *
+   * <p>This is in support (directly or indirectly) of {@link org.apache.solr.cloud.api.collections.AddReplicaCmd},
+   * {@link org.apache.solr.cloud.api.collections.CreateShardCmd}, {@link org.apache.solr.cloud.api.collections.ReplaceNodeCmd},
+   * {@link org.apache.solr.cloud.api.collections.MoveReplicaCmd}, {@link org.apache.solr.cloud.api.collections.SplitShardCmd},
+   * {@link org.apache.solr.cloud.api.collections.RestoreCmd}, {@link org.apache.solr.cloud.api.collections.MigrateCmd}
+   * as well as of {@link org.apache.solr.cloud.api.collections.CreateCollectionCmd}.
+   */
+  PlacementPlan createPlacementPlan(PlacementRequest request, Set<ReplicaPlacement> replicaPlacements);
+
+  /**
+   * <p>Creates a {@link ReplicaPlacement} to be passed to {@link PlacementPlan} factory methods.
+   *
+   * <p>Note the plugin can also build its own instances implementing {@link ReplicaPlacement} instead of using this call
+   * (but using this method makes it easier).
+   */
+  ReplicaPlacement createReplicaPlacement(SolrCollection solrCollection, String shardName, Node node, Replica.ReplicaType replicaType);
+}
\ No newline at end of file
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPlugin.java b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPlugin.java
new file mode 100644
index 0000000..28b6476
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPlugin.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement;
+
+import org.apache.solr.cluster.Cluster;
+
+/**
+ * <p>Implemented by external plugins to control replica placement and movement on the search cluster (as well as other things
+ * such as cluster elasticity?) when cluster changes are required (initiated elsewhere, most likely following a Collection
+ * API call).
+ *
+ * <p>Instances of classes implementing this interface are created by {@link PlacementPluginFactory}
+ *
+ * <p>Implementations of this interface <b>must</b> be reentrant. {@link #computePlacement} <b>will</b> be called concurrently
+ * from many threads.
+ */
+public interface PlacementPlugin {
+  /**
+   * <p>Request from plugin code to compute placement. Note this method must be reentrant as a plugin instance may (read
+   * will) get multiple such calls in parallel.
+   *
+   * <p>Configuration is passed upon creation of a new instance of this class by {@link PlacementPluginFactory#createPluginInstance}.
+   *
+   * @param cluster initial state of the cluster. Note there are {@link java.util.Set}'s and {@link java.util.Map}'s
+   *                accessible from the {@link Cluster} and other reachable instances. These collection will not change
+   *                while the plugin is executing and will be thrown away once the plugin is done. The plugin code can
+   *                therefore modify them if needed.
+   * @param placementRequest request for placing new replicas or moving existing replicas on the cluster.
+   * @param attributeFetcher Factory used by the plugin to fetch additional attributes from the cluster nodes, such as
+   *                         count of coresm ssytem properties etc..
+   * @param placementPlanFactory Factory used to create instances of {@link PlacementPlan} to return computed decision.
+   * @return plan satisfying the placement request.
+   */
+  PlacementPlan computePlacement(Cluster cluster, PlacementRequest placementRequest, AttributeFetcher attributeFetcher,
+                                 PlacementPlanFactory placementPlanFactory) throws PlacementException, InterruptedException;
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPluginConfig.java b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPluginConfig.java
new file mode 100644
index 0000000..a39390f
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPluginConfig.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement;
+
+/**
+ * <p>Configuration passed by Solr to {@link PlacementPluginFactory#createPluginInstance(PlacementPluginConfig)} so that plugin instances
+ * ({@link PlacementPlugin}) created by the factory can easily retrieve their configuration.</p>
+ *
+ * <p>A plugin writer decides the names and the types of the configurable parameters it needs. Available types are
+ * {@link String}, {@link Long}, {@link Boolean}, {@link Double}. This configuration currently lives in the {@code /clusterprops.json}
+ * file in Zookeeper (this could change in the future, the plugin code will not change but the way to store its configuration
+ * in the cluster might). {@code clusterprops.json} also contains the name of the plugin factory class implementing
+ * {@link org.apache.logging.log4j.core.config.plugins.PluginBuilderFactory}.</p>
+ *
+ * <p>In order to configure a plugin to be used for placement decisions, the following {@code curl} command (or something
+ * equivalent) has to be executed once the cluster is already running to set the configuration.
+ * Replace {@code localhost:8983} by one of your servers' IP address and port.</p>
+ *
+ * <pre>
+ *
+ * curl -X POST -H 'Content-type:application/json' -d '{
+ *   "set-placement-plugin": {
+ *     "class": "factory.class.name$inner",
+ *     "myfirstString": "a text value",
+ *     "aLong": 50,
+ *     "aDoubleConfig": 3.1415928,
+ *     "shouldIStay": true
+ *   }
+ * }' http://localhost:8983/api/cluster
+ * </pre>
+ *
+ * <p>The consequence will be the creation (or replacement if it exists) of an element in the Zookeeper file
+ * {@code /clusterprops.json} as follows:</p>
+ *
+ * <pre>
+ *
+ * "placement-plugin":{
+ *     "class":"factory.class.name$inner",
+ *     "myfirstString": "a text value",
+ *     "aLong": 50,
+ *     "aDoubleConfig": 3.1415928,
+ *     "shouldIStay": true}
+ * </pre>
+ *
+ * <p>In order to delete the placement-plugin section from {@code /clusterprops.json} (and to fallback to either Legacy
+ * or rule based placement if so configured for a collection), execute:</p>
+ *
+ * <pre>
+ *
+ * curl -X POST -H 'Content-type:application/json' -d '{
+ *   "set-placement-plugin" : null
+ * }' http://localhost:8983/api/cluster
+ * </pre>
+ */
+public interface PlacementPluginConfig {
+  /**
+   * @return the configured {@link String} value corresponding to {@code configName} if one exists (could be the empty
+   * string) and {@code null} otherwise.
+   */
+  String getStringConfig(String configName);
+
+  /**
+   * @return the configured {@link String} value corresponding to {@code configName} if one exists (could be the empty
+   * string) and {@code defaultValue} otherwise.
+   */
+  String getStringConfig(String configName, String defaultValue);
+
+  /**
+   * @return the configured {@link Boolean} value corresponding to {@code configName} if one exists, {@code null} otherwise.
+   */
+  Boolean getBooleanConfig(String configName);
+
+  /**
+   * @return the configured {@link Boolean} value corresponding to {@code configName} if one exists, a boxed {@code defaultValue}
+   * otherwise (this method never returns {@code null}.
+   */
+  Boolean getBooleanConfig(String configName, boolean defaultValue);
+
+  /**
+   * @return the configured {@link Long} value corresponding to {@code configName} if one exists, {@code null} otherwise.
+   */
+  Long getLongConfig(String configName);
+
+  /**
+   * @return the configured {@link Long} value corresponding to {@code configName} if one exists, a boxed {@code defaultValue}
+   * otherwise (this method never returns {@code null}.
+   */
+  Long getLongConfig(String configName, long defaultValue);
+
+  /**
+   * @return the configured {@link Double} value corresponding to {@code configName} if one exists, {@code null} otherwise.
+   */
+  Double getDoubleConfig(String configName);
+
+  /**
+   * @return the configured {@link Double} value corresponding to {@code configName} if one exists, a boxed {@code defaultValue}
+   * otherwise (this method never returns {@code null}.
+   */
+  Double getDoubleConfig(String configName, double defaultValue);
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPluginFactory.java b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPluginFactory.java
new file mode 100644
index 0000000..7372003
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementPluginFactory.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement;
+
+/**
+ * Factory implemented by client code and configured in {@code solr.xml} allowing the creation of instances of
+ * {@link PlacementPlugin} to be used for replica placement computation.
+ */
+public interface PlacementPluginFactory {
+  /**
+   * Returns an instance of the plugin that will be repeatedly (and concurrently) be called to compute placement. Multiple
+   * instances of a plugin can be used in parallel (for example if configuration has to change, but plugin instances with
+   * the previous configuration are still being used).
+   */
+  PlacementPlugin createPluginInstance(PlacementPluginConfig config);
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/PlacementRequest.java b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementRequest.java
new file mode 100644
index 0000000..61b49dd
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/PlacementRequest.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement;
+
+import org.apache.solr.cluster.*;
+
+import java.util.Set;
+
+/**
+ * A cluster related placement request that Solr asks a {@link PlacementPlugin} plugin to resolve and compute a
+ * {@link PlacementPlan} placing one or more {@link Replica}'s of one or more {@link Shard}'s of an existing {@link SolrCollection}.
+ * The shard might or might not already exist, plugin code can easily find out by calling {@link SolrCollection#getShard(String)}
+ * with the shard name(s) returned by {@link #getShardNames()}.
+ *
+ * <p>The set of {@link Node}s on which the replicas should be placed
+ * is specified (defaults to being equal to the set returned by {@link Cluster#getLiveNodes()}).
+ */
+public interface PlacementRequest {
+    /**
+     * The {@link SolrCollection} to add {@link Replica}(s) to.
+     */
+    SolrCollection getCollection();
+
+    /**
+     * <p>Shard name(s) for which new replicas placement should be computed. The shard(s) might exist or not (that's why this
+     * method returns a {@link Set} of {@link String}'s and not directly a set of {@link Shard} instances).
+     *
+     * <p>Note the Collection API allows specifying the shard name or a {@code _route_} parameter. The Solr implementation will
+     * convert either specification into the relevant shard name so the plugin code doesn't have to worry about this.
+     */
+    Set<String> getShardNames();
+
+    /**
+     * <p>Replicas should only be placed on nodes in the set returned by this method.
+     *
+     * <p>When Collection API calls do not specify a specific set of target nodes, replicas can be placed on any live node of
+     * the cluster. In such cases, this set will be equal to the set of all live nodes. The plugin placement code does not
+     * need to worry (or care) if a set of nodes was explicitly specified or not.
+     *
+     * @return never {@code null} and never empty set (if that set was to be empty for any reason, no placement would be
+     * possible and the Solr infrastructure driving the plugin code would detect the error itself rather than calling the plugin).
+     */
+    Set<Node> getTargetNodes();
+
+    /**
+     * Returns the number of replica to create for the given replica type.
+     */
+    int getCountReplicasToCreate(Replica.ReplicaType replicaType);
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/ReplicaPlacement.java b/solr/core/src/java/org/apache/solr/cluster/placement/ReplicaPlacement.java
new file mode 100644
index 0000000..16bdc2c
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/ReplicaPlacement.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement;
+
+import org.apache.solr.cluster.Node;
+import org.apache.solr.cluster.Replica;
+import org.apache.solr.cluster.Shard;
+import org.apache.solr.cluster.SolrCollection;
+
+/**
+ * <p>Placement decision for a single {@link Replica}. Note this placement decision is used as part of a {@link PlacementPlan},
+ * it does not directly lead to the plugin code getting a corresponding {@link Replica} instance, nor does it require the
+ * plugin to provide a {@link Shard} instance (the plugin code gets such instances for existing replicas and shards in the
+ * cluster but does not create them directly for adding new replicas for new or existing shards).
+ *
+ * <p>Captures the {@link SolrCollection}, {@link Shard} (via the shard name), {@link Node} and {@link org.apache.solr.cluster.Replica.ReplicaType}
+ * of a Replica to be created.
+ */
+public interface ReplicaPlacement {
+
+  /**
+   * @return the {@link SolrCollection} for which the replica should be created
+   */
+  SolrCollection getCollection();
+
+  /**
+   * @return the name of the {@link Shard} for which the replica should be created. Note that only the name of the shard
+   * is returned and not a {@link Shard} instance because the shard might not yet exist when the placement request is made.
+   */
+  String getShardName();
+
+  /**
+   * @return the {@link Node} on which the replica should be created
+   */
+  Node getNode();
+
+  /**
+   * @return the type of the replica to be created
+   */
+  Replica.ReplicaType getReplicaType();
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/AttributeFetcherImpl.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/AttributeFetcherImpl.java
new file mode 100644
index 0000000..98367d3
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/AttributeFetcherImpl.java
@@ -0,0 +1,228 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement.impl;
+
+import com.google.common.collect.Maps;
+import org.apache.solr.client.solrj.cloud.SolrCloudManager;
+import org.apache.solr.client.solrj.impl.SolrClientNodeStateProvider;
+import org.apache.solr.cluster.placement.AttributeFetcher;
+import org.apache.solr.cluster.placement.AttributeValues;
+import org.apache.solr.cluster.Node;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.rule.ImplicitSnitch;
+import org.apache.solr.core.SolrInfoBean;
+import org.apache.solr.metrics.SolrMetricManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.invoke.MethodHandles;
+import java.util.*;
+import java.util.function.BiConsumer;
+
+public class AttributeFetcherImpl implements AttributeFetcher {
+    private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+    boolean requestedNodeCoreCount;
+    boolean requestedNodeDiskType;
+    boolean requestedNodeFreeDisk;
+    boolean requestedNodeTotalDisk;
+    boolean requestedNodeHeapUsage;
+    boolean requestedNodeSystemLoadAverage;
+    Set<String> requestedNodeSystemPropertiesSnitchTags = new HashSet<>();
+    Set<String> requestedNodeMetricSnitchTags  = new HashSet<>();
+
+    Set<Node> nodes = Collections.emptySet();
+
+    private final SolrCloudManager cloudManager;
+
+    AttributeFetcherImpl(SolrCloudManager cloudManager) {
+        this.cloudManager = cloudManager;
+    }
+
+    @Override
+    public AttributeFetcher requestNodeCoreCount() {
+        requestedNodeCoreCount = true;
+        return this;
+    }
+
+    @Override
+    public AttributeFetcher requestNodeDiskType() {
+        requestedNodeDiskType = true;
+        return this;
+    }
+
+    @Override
+    public AttributeFetcher requestNodeFreeDisk() {
+        requestedNodeFreeDisk = true;
+        return this;
+    }
+
+    @Override
+    public AttributeFetcher requestNodeTotalDisk() {
+        requestedNodeTotalDisk = true;
+        return this;
+    }
+
+    @Override
+    public AttributeFetcher requestNodeHeapUsage() {
+        requestedNodeHeapUsage = true;
+        return this;
+    }
+
+    @Override
+    public AttributeFetcher requestNodeSystemLoadAverage() {
+        requestedNodeSystemLoadAverage = true;
+        return this;
+    }
+
+    @Override
+    public AttributeFetcher requestNodeSystemProperty(String name) {
+        requestedNodeSystemPropertiesSnitchTags.add(getSystemPropertySnitchTag(name));
+        return this;
+    }
+
+    @Override
+    public AttributeFetcher requestNodeEnvironmentVariable(String name) {
+        throw new UnsupportedOperationException("Not yet implemented...");
+    }
+
+    @Override
+    public AttributeFetcher requestNodeMetric(String metricName, NodeMetricRegistry registry) {
+        requestedNodeMetricSnitchTags.add(getMetricSnitchTag(metricName, registry));
+        return this;
+    }
+
+    @Override
+    public AttributeFetcher fetchFrom(Set<Node> nodes) {
+        this.nodes = nodes;
+        return this;
+    }
+
+    @Override
+    public AttributeFetcher requestMetric(String scope, String metricName) {
+        throw new UnsupportedOperationException("Not yet implemented...");
+    }
+
+    @Override
+    public AttributeValues fetchAttributes() {
+        // TODO Code here only supports node related attributes for now
+
+        // Maps in which attribute values will be added
+        Map<Node, Integer> nodeToCoreCount = Maps.newHashMap();
+        Map<Node, DiskHardwareType> nodeToDiskType = Maps.newHashMap();
+        Map<Node, Long> nodeToFreeDisk = Maps.newHashMap();
+        Map<Node, Long> nodeToTotalDisk = Maps.newHashMap();
+        Map<Node, Double> nodeToHeapUsage = Maps.newHashMap();
+        Map<Node, Double> nodeToSystemLoadAverage = Maps.newHashMap();
+        Map<String, Map<Node, String>> syspropSnitchToNodeToValue = Maps.newHashMap();
+        Map<String, Map<Node, Double>> metricSnitchToNodeToValue = Maps.newHashMap();
+
+        // In order to match the returned values for the various snitches, we need to keep track of where each
+        // received value goes. Given the target maps are of different types (the maps from Node to whatever defined
+        // above) we instead pass a function taking two arguments, the node and the (non null) returned value,
+        // that will cast the value into the appropriate type for the snitch tag and insert it into the appropriate map
+        // with the node as the key.
+        Map<String, BiConsumer<Node, Object>> allSnitchTagsToInsertion = Maps.newHashMap();
+        if (requestedNodeCoreCount) {
+            allSnitchTagsToInsertion.put(ImplicitSnitch.CORES, (node, value) -> nodeToCoreCount.put(node, ((Number) value).intValue()));
+        }
+        if (requestedNodeDiskType) {
+            allSnitchTagsToInsertion.put(ImplicitSnitch.DISKTYPE, (node, value) -> {
+                if ("rotational".equals(value)) {
+                    nodeToDiskType.put(node, DiskHardwareType.ROTATIONAL);
+                } else if ("ssd".equals(value)) {
+                    nodeToDiskType.put(node, DiskHardwareType.SSD);
+                }
+                // unknown disk type: insert no value, returned optional will be empty
+            });
+        }
+        if (requestedNodeFreeDisk) {
+            allSnitchTagsToInsertion.put(SolrClientNodeStateProvider.Variable.FREEDISK.tagName,
+                    // Convert from bytes to GB
+                    (node, value) -> nodeToFreeDisk.put(node, ((Number) value).longValue() / 1024 / 1024 / 1024));
+        }
+        if (requestedNodeTotalDisk) {
+            allSnitchTagsToInsertion.put(SolrClientNodeStateProvider.Variable.TOTALDISK.tagName,
+                    // Convert from bytes to GB
+                    (node, value) -> nodeToTotalDisk.put(node, ((Number) value).longValue() / 1024 / 1024 / 1024));
+        }
+        if (requestedNodeHeapUsage) {
+            allSnitchTagsToInsertion.put(ImplicitSnitch.HEAPUSAGE,
+                    (node, value) -> nodeToHeapUsage.put(node, ((Number) value).doubleValue()));
+        }
+        if (requestedNodeSystemLoadAverage) {
+            allSnitchTagsToInsertion.put(ImplicitSnitch.SYSLOADAVG,
+                    (node, value) -> nodeToSystemLoadAverage.put(node, ((Number) value).doubleValue()));
+        }
+        for (String sysPropSnitch : requestedNodeSystemPropertiesSnitchTags) {
+            final Map<Node, String> sysPropMap = Maps.newHashMap();
+            syspropSnitchToNodeToValue.put(sysPropSnitch, sysPropMap);
+            allSnitchTagsToInsertion.put(sysPropSnitch, (node, value) -> sysPropMap.put(node, (String) value));
+        }
+        for (String metricSnitch : requestedNodeMetricSnitchTags) {
+            final Map<Node, Double> metricMap = Maps.newHashMap();
+            metricSnitchToNodeToValue.put(metricSnitch, metricMap);
+            allSnitchTagsToInsertion.put(metricSnitch, (node, value) -> metricMap.put(node, (Double) value));
+        }
+
+        // Now that we know everything we need to fetch (and where to put it), just do it.
+        for (Node node : nodes) {
+            Map<String, Object> tagValues = cloudManager.getNodeStateProvider().getNodeValues(node.getName(), allSnitchTagsToInsertion.keySet());
+            for (Map.Entry<String, Object> e : tagValues.entrySet()) {
+                String tag = e.getKey();
+                Object value = e.getValue(); // returned value from the node
+
+                BiConsumer<Node, Object> inserter = allSnitchTagsToInsertion.get(tag);
+                // If inserter is null it's a return of a tag that we didn't request
+                if (inserter != null) {
+                    inserter.accept(node, value);
+                } else {
+                    log.error("Received unsolicited snitch tag {} from node {}", tag, node);
+                }
+            }
+        }
+
+        return new AttributeValuesImpl(nodeToCoreCount,
+                                       nodeToDiskType,
+                                       nodeToFreeDisk,
+                                       nodeToTotalDisk,
+                                       nodeToHeapUsage,
+                                       nodeToSystemLoadAverage,
+                                       syspropSnitchToNodeToValue,
+                                       metricSnitchToNodeToValue);
+    }
+
+    private static SolrInfoBean.Group getGroupFromMetricRegistry(NodeMetricRegistry registry) {
+        switch (registry) {
+            case SOLR_JVM:
+                return SolrInfoBean.Group.jvm;
+            case SOLR_NODE:
+                return SolrInfoBean.Group.node;
+            default:
+                throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unsupported registry value " + registry);
+        }
+    }
+
+    static String getMetricSnitchTag(String metricName, NodeMetricRegistry registry) {
+        return SolrClientNodeStateProvider.METRICS_PREFIX + SolrMetricManager.getRegistryName(getGroupFromMetricRegistry(registry), metricName);
+    }
+
+    static String getSystemPropertySnitchTag(String name) {
+        return ImplicitSnitch.SYSPROP + name;
+    }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/AttributeValuesImpl.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/AttributeValuesImpl.java
new file mode 100644
index 0000000..78c2143
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/AttributeValuesImpl.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement.impl;
+
+import org.apache.solr.cluster.placement.AttributeFetcher;
+import org.apache.solr.cluster.placement.AttributeValues;
+import org.apache.solr.cluster.Node;
+
+import java.util.Map;
+import java.util.Optional;
+
+public class AttributeValuesImpl implements AttributeValues {
+    final Map<Node, Integer> nodeToCoreCount;
+    final Map<Node, AttributeFetcher.DiskHardwareType> nodeToDiskType;
+    final Map<Node, Long> nodeToFreeDisk;
+    final Map<Node, Long> nodeToTotalDisk;
+    final Map<Node, Double> nodeToHeapUsage;
+    final Map<Node, Double> nodeToSystemLoadAverage;
+    final Map<String, Map<Node, String>> syspropSnitchToNodeToValue;
+    final Map<String, Map<Node, Double>> metricSnitchToNodeToValue;
+
+    AttributeValuesImpl(Map<Node, Integer> nodeToCoreCount,
+                        Map<Node, AttributeFetcher.DiskHardwareType> nodeToDiskType,
+                        Map<Node, Long> nodeToFreeDisk,
+                        Map<Node, Long> nodeToTotalDisk,
+                        Map<Node, Double> nodeToHeapUsage,
+                        Map<Node, Double> nodeToSystemLoadAverage,
+                        Map<String, Map<Node, String>> syspropSnitchToNodeToValue,
+                        Map<String, Map<Node, Double>> metricSnitchToNodeToValue) {
+        this.nodeToCoreCount = nodeToCoreCount;
+        this.nodeToDiskType = nodeToDiskType;
+        this.nodeToFreeDisk = nodeToFreeDisk;
+        this.nodeToTotalDisk = nodeToTotalDisk;
+        this.nodeToHeapUsage = nodeToHeapUsage;
+        this.nodeToSystemLoadAverage = nodeToSystemLoadAverage;
+        this.syspropSnitchToNodeToValue = syspropSnitchToNodeToValue;
+        this.metricSnitchToNodeToValue = metricSnitchToNodeToValue;
+    }
+
+    @Override
+    public Optional<Integer> getCoresCount(Node node) {
+        return Optional.ofNullable(nodeToCoreCount.get(node));
+    }
+
+    @Override
+    public Optional<AttributeFetcher.DiskHardwareType> getDiskType(Node node) {
+        return Optional.ofNullable(nodeToDiskType.get(node));
+    }
+
+    @Override
+    public Optional<Long> getFreeDisk(Node node) {
+        return Optional.ofNullable(nodeToFreeDisk.get(node));
+    }
+
+    @Override
+    public Optional<Long> getTotalDisk(Node node) {
+        return Optional.ofNullable(nodeToTotalDisk.get(node));
+    }
+
+    @Override
+    public Optional<Double> getHeapUsage(Node node) {
+        return Optional.ofNullable(nodeToHeapUsage.get(node));
+    }
+
+    @Override
+    public Optional<Double> getSystemLoadAverage(Node node) {
+        return Optional.ofNullable(nodeToSystemLoadAverage.get(node));
+    }
+
+    @Override
+    public Optional<String> getSystemProperty(Node node, String name) {
+        Map<Node, String> nodeToValue = syspropSnitchToNodeToValue.get(AttributeFetcherImpl.getSystemPropertySnitchTag(name));
+        if (nodeToValue == null) {
+            return Optional.empty();
+        }
+        return Optional.ofNullable(nodeToValue.get(node));
+    }
+
+    @Override
+    public Optional<String> getEnvironmentVariable(Node node, String name) {
+        // TODO implement
+        return Optional.empty();
+    }
+
+    @Override
+    public Optional<Double> getMetric(Node node, String metricName, AttributeFetcher.NodeMetricRegistry registry) {
+        Map<Node, Double> nodeToValue = metricSnitchToNodeToValue.get(AttributeFetcherImpl.getMetricSnitchTag(metricName, registry));
+        if (nodeToValue == null) {
+            return Optional.empty();
+        }
+        return Optional.ofNullable(nodeToValue.get(node));
+    }
+
+    @Override
+    public Optional<Double> getMetric(String scope, String metricName) {
+        // TODO implement
+        return Optional.empty();
+    }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPlanFactoryImpl.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPlanFactoryImpl.java
new file mode 100644
index 0000000..3829372
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPlanFactoryImpl.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement.impl;
+
+import org.apache.solr.cluster.Node;
+import org.apache.solr.cluster.Replica;
+import org.apache.solr.cluster.SolrCollection;
+import org.apache.solr.cluster.placement.*;
+
+import java.util.Set;
+
+class PlacementPlanFactoryImpl implements PlacementPlanFactory {
+    @Override
+    public PlacementPlan createPlacementPlan(PlacementRequest request, Set<ReplicaPlacement> replicaPlacements) {
+        return new PlacementPlanImpl(request, replicaPlacements);
+    }
+
+    @Override
+    public ReplicaPlacement createReplicaPlacement(SolrCollection solrCollection, String shardName, Node node, Replica.ReplicaType replicaType) {
+        return new ReplicaPlacementImpl(solrCollection, shardName, node, replicaType);
+    }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPlanImpl.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPlanImpl.java
new file mode 100644
index 0000000..2dde07b
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPlanImpl.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement.impl;
+
+import java.util.Set;
+
+import org.apache.solr.cluster.placement.PlacementPlan;
+import org.apache.solr.cluster.placement.PlacementRequest;
+import org.apache.solr.cluster.placement.ReplicaPlacement;
+
+class PlacementPlanImpl implements PlacementPlan {
+
+  final PlacementRequest request;
+  final Set<ReplicaPlacement> replicaPlacements;
+
+  PlacementPlanImpl(PlacementRequest request, Set<ReplicaPlacement> replicaPlacements) {
+    this.request = request;
+    this.replicaPlacements = replicaPlacements;
+  }
+
+  @Override
+  public PlacementRequest getRequest() {
+    return request;
+  }
+
+  @Override
+  public Set<ReplicaPlacement> getReplicaPlacements() {
+    return replicaPlacements;
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPluginAssignStrategy.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPluginAssignStrategy.java
new file mode 100644
index 0000000..0bbf4e0
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPluginAssignStrategy.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement.impl;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.solr.client.solrj.cloud.SolrCloudManager;
+import org.apache.solr.cloud.api.collections.Assign;
+import org.apache.solr.cluster.Cluster;
+import org.apache.solr.cluster.placement.PlacementException;
+import org.apache.solr.cluster.placement.PlacementPlugin;
+import org.apache.solr.cluster.placement.PlacementPlan;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.ReplicaPosition;
+
+/**
+ * This assign strategy delegates placement computation to "plugin" code.
+ */
+public class PlacementPluginAssignStrategy implements Assign.AssignStrategy {
+
+  private static final PlacementPlanFactoryImpl PLACEMENT_PLAN_FACTORY = new PlacementPlanFactoryImpl();
+
+  private final PlacementPlugin plugin;
+  private final DocCollection collection;
+
+  /**
+   * @param collection the collection for which this assign request is done. In theory would be better to pass it into the
+   *                   {@link #assign} call below (which would allow reusing instances of {@link PlacementPluginAssignStrategy},
+   *                   but for now doing it here in order not to change the other Assign.AssignStrategy implementations.
+   */
+  public PlacementPluginAssignStrategy(DocCollection collection, PlacementPlugin plugin) {
+    this.collection = collection;
+    this.plugin = plugin;
+  }
+
+  public List<ReplicaPosition> assign(SolrCloudManager solrCloudManager, Assign.AssignRequest assignRequest)
+      throws Assign.AssignmentException, IOException, InterruptedException {
+
+    Cluster cluster = new SimpleClusterAbstractionsImpl.ClusterImpl(solrCloudManager);
+
+    PlacementRequestImpl placementRequest = PlacementRequestImpl.toPlacementRequest(cluster, collection, assignRequest);
+
+    final PlacementPlan placementPlan;
+    try {
+      placementPlan = plugin.computePlacement(cluster, placementRequest, new AttributeFetcherImpl(solrCloudManager), PLACEMENT_PLAN_FACTORY);
+    } catch (PlacementException pe) {
+      throw new Assign.AssignmentException(pe);
+    }
+
+    return ReplicaPlacementImpl.toReplicaPositions(placementPlan.getReplicaPlacements());
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPluginConfigImpl.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPluginConfigImpl.java
new file mode 100644
index 0000000..e6130a3
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementPluginConfigImpl.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement.impl;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.solr.client.solrj.cloud.SolrCloudManager;
+import org.apache.solr.cluster.placement.PlacementPlugin;
+import org.apache.solr.cluster.placement.PlacementPluginConfig;
+import org.apache.solr.cluster.placement.PlacementPluginFactory;
+import org.apache.solr.cluster.placement.plugins.SamplePluginAffinityReplicaPlacement;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.util.Utils;
+
+/**
+ * <p>This concrete class is implementing the config as visible by the placement plugins and contains the code transforming the
+ * plugin configuration (currently stored in {@code clusterprops.json} into a strongly typed abstraction (that will not
+ * change if internally plugin configuration is moved to some other place).</p>
+ *
+ * <p>This class also contains the (static) code dealing with instantiating the plugin factory config (it is config, even though
+ * of a slightly different type). This code is not accessed by the plugin code but used from the
+ * {@link org.apache.solr.cloud.api.collections.Assign} class.</p>
+ */
+public class PlacementPluginConfigImpl implements PlacementPluginConfig {
+  /**
+   * The key in {@code clusterprops.json} under which the plugin factory and the plugin configuration are defined.
+   */
+  final public static String PLACEMENT_PLUGIN_CONFIG_KEY = "placement-plugin";
+  /** Name of the property containing the factory class */
+  final public static String CONFIG_CLASS = "class";
+
+  // Separating configs into typed maps based on the element names in solr.xml
+  private final Map<String, String> stringConfigs;
+  private final Map<String, Long> longConfigs;
+  private final Map<String, Boolean> boolConfigs;
+  private final Map<String, Double> doubleConfigs;
+
+
+  private PlacementPluginConfigImpl(Map<String, String> stringConfigs,
+                                    Map<String, Long> longConfigs,
+                                    Map<String, Boolean> boolConfigs,
+                                    Map<String, Double> doubleConfigs) {
+    this.stringConfigs = stringConfigs;
+    this.longConfigs = longConfigs;
+    this.boolConfigs = boolConfigs;
+    this.doubleConfigs = doubleConfigs;
+  }
+
+  @Override
+  public String getStringConfig(String configName) {
+    return stringConfigs.get(configName);
+  }
+
+  @Override
+  public String getStringConfig(String configName, String defaultValue) {
+    String retval = stringConfigs.get(configName);
+    return retval != null ? retval : defaultValue;
+  }
+
+  @Override
+  public Boolean getBooleanConfig(String configName) {
+    return boolConfigs.get(configName);
+  }
+
+  @Override
+  public Boolean getBooleanConfig(String configName, boolean defaultValue) {
+    Boolean retval = boolConfigs.get(configName);
+    return retval != null ? retval : defaultValue;
+  }
+
+  @Override
+  public Long getLongConfig(String configName) {
+    return longConfigs.get(configName);
+  }
+
+  @Override
+  public Long getLongConfig(String configName, long defaultValue) {
+    Long  retval = longConfigs.get(configName);
+    return retval != null ? retval : defaultValue;
+  }
+
+  @Override
+  public Double getDoubleConfig(String configName) {
+    return doubleConfigs.get(configName);
+  }
+
+  @Override
+  public Double getDoubleConfig(String configName, double defaultValue) {
+    Double retval = doubleConfigs.get(configName);
+    return retval != null ? retval : defaultValue;
+  }
+
+  /**
+   * <p>Parses the {@link Map} obtained as the value for key {@link #PLACEMENT_PLUGIN_CONFIG_KEY} from
+   * the {@code clusterprops.json} configuration {@link Map} (obtained by calling
+   * {@link org.apache.solr.client.solrj.impl.ClusterStateProvider#getClusterProperties()}) and translates it into a
+   * configuration consumable by the plugin (and that will not change as Solr changes internally how and where it stores
+   * configuration).</p>
+   *
+   * <p>Configuration properties {@code class} and {@code name} are reserved: for defining the plugin factory class and
+   * a human readable plugin name. All other properties are plugin specific.</p>
+   *
+   * <p>See configuration example and how-to in {@link SamplePluginAffinityReplicaPlacement}.</p>
+   */
+  static PlacementPluginConfig createConfigFromProperties(Map<String, Object> pluginConfig) {
+    final Map<String, String> stringConfigs = new HashMap<>();
+    final Map<String, Long> longConfigs = new HashMap<>();
+    final Map<String, Boolean> boolConfigs = new HashMap<>();
+    final Map<String, Double> doubleConfigs = new HashMap<>();
+
+    for (Map.Entry<String, Object> e : pluginConfig.entrySet()) {
+      String key = e.getKey();
+      if (CONFIG_CLASS.equals(key)) {
+        continue;
+      }
+
+      if (key == null) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Missing config name attribute in parameter of " + PLACEMENT_PLUGIN_CONFIG_KEY);
+      }
+
+      Object value = e.getValue();
+
+      if (value == null) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Missing config value for parameter " + key + " of " + PLACEMENT_PLUGIN_CONFIG_KEY);
+      }
+
+      if (value instanceof String) {
+        stringConfigs.put(key, (String) value);
+      } else if (value instanceof Long) {
+        longConfigs.put(key, (Long) value);
+      } else if (value instanceof Boolean) {
+        boolConfigs.put(key, (Boolean) value);
+      } else if (value instanceof Double) {
+        doubleConfigs.put(key, (Double) value);
+      } else {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unsupported config type " + value.getClass().getName() +
+            " for parameter " + key + " of " + PLACEMENT_PLUGIN_CONFIG_KEY);
+      }
+    }
+
+    return new PlacementPluginConfigImpl(stringConfigs, longConfigs, boolConfigs, doubleConfigs);
+  }
+
+  /**
+   * <p>This is where the plugin configuration is being read (from wherever in Solr it lives, and this will likely change with time),
+   * a {@link org.apache.solr.cluster.placement.PlacementPluginFactory} (as configured) instantiated and a plugin instance
+   * created from this factory.</p>
+   *
+   * <p>The initial implementation you see here is crude! the configuration is read anew each time and the factory class
+   * as well as the plugin class instantiated each time.
+   * This has to be changed once the code is accepted overall, to register a listener that is notified when the configuration
+   * changes (see {@link org.apache.solr.common.cloud.ZkStateReader#registerClusterPropertiesListener})
+   * and that will either create a new instance of the plugin with new configuration using the existing factory (if the factory
+   * class has not changed - we need to keep track of this one) of create a new factory altogether (then a new plugin instance).</p>
+   */
+  @SuppressWarnings({"unchecked"})
+  public static PlacementPlugin getPlacementPlugin(SolrCloudManager solrCloudManager) {
+    Map<String, Object> props = solrCloudManager.getClusterStateProvider().getClusterProperties();
+    Map<String, Object> pluginConfigMap = (Map<String, Object>) props.get(PLACEMENT_PLUGIN_CONFIG_KEY);
+
+    if (pluginConfigMap == null) {
+      return null;
+    }
+
+    String pluginFactoryClassName = (String) pluginConfigMap.get(CONFIG_CLASS);
+
+    // Get the configured plugin factory class. Is there a way to load a resource in Solr without being in the context of
+    // CoreContainer? Here the placement code is unrelated to the presence of cores (and one can imagine it running on
+    // specialized nodes not having a CoreContainer). I guess the loading code below is not totally satisfying (although
+    // it's not the only place in Solr doing it that way), but I didn't find more satisfying alternatives. Open to suggestions.
+    PlacementPluginFactory placementPluginFactory;
+    try {
+      Class<? extends PlacementPluginFactory> factoryClazz =
+              Class.forName(pluginFactoryClassName, true, PlacementPluginConfigImpl.class.getClassLoader())
+                      .asSubclass(PlacementPluginFactory.class);
+
+      placementPluginFactory = factoryClazz.getConstructor().newInstance(); // no args constructor - that's why we introduced a factory...
+    } catch (Exception e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,  "Unable to instantiate placement-plugin factory: " +
+              Utils.toJSONString(pluginConfigMap) + " please review /clusterprops.json config for " + PLACEMENT_PLUGIN_CONFIG_KEY, e);
+    }
+
+    // Translate the config from the properties where they are defined into the abstraction seen by the plugin
+    PlacementPluginConfig pluginConfig = createConfigFromProperties(pluginConfigMap);
+
+    return placementPluginFactory.createPluginInstance(pluginConfig);
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementRequestImpl.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementRequestImpl.java
new file mode 100644
index 0000000..80cf6c5
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/PlacementRequestImpl.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement.impl;
+
+import java.util.EnumMap;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.solr.cloud.api.collections.Assign;
+import org.apache.solr.cluster.Cluster;
+import org.apache.solr.cluster.Node;
+import org.apache.solr.cluster.Replica;
+import org.apache.solr.cluster.SolrCollection;
+import org.apache.solr.cluster.placement.*;
+import org.apache.solr.common.cloud.DocCollection;
+
+public class PlacementRequestImpl implements PlacementRequest {
+  private final SolrCollection solrCollection;
+  private final Set<String> shardNames;
+  private final Set<Node> targetNodes;
+  private final EnumMap<Replica.ReplicaType, Integer> countReplicas = new EnumMap<>(Replica.ReplicaType.class);
+
+  private PlacementRequestImpl(SolrCollection solrCollection,
+                               Set<String> shardNames, Set<Node> targetNodes,
+                               int countNrtReplicas, int countTlogReplicas, int countPullReplicas) {
+    this.solrCollection = solrCollection;
+    this.shardNames = shardNames;
+    this.targetNodes = targetNodes;
+    // Initializing map for all values of enum, so unboxing always possible later without checking for null
+    countReplicas.put(Replica.ReplicaType.NRT, countNrtReplicas);
+    countReplicas.put(Replica.ReplicaType.TLOG, countTlogReplicas);
+    countReplicas.put(Replica.ReplicaType.PULL, countPullReplicas);
+  }
+
+  @Override
+  public SolrCollection getCollection() {
+    return solrCollection;
+  }
+
+  @Override
+  public Set<String> getShardNames() {
+    return shardNames;
+  }
+
+  @Override
+  public Set<Node> getTargetNodes() {
+    return targetNodes;
+  }
+
+  @Override
+  public int getCountReplicasToCreate(Replica.ReplicaType replicaType) {
+    return countReplicas.get(replicaType);
+
+  }
+
+  /**
+   * Returns a {@link PlacementRequest} that can be consumed by a plugin based on an internal Assign.AssignRequest
+   * for adding replicas + additional info (upon creation of a new collection or adding replicas to an existing one).
+   */
+  static PlacementRequestImpl toPlacementRequest(Cluster cluster, DocCollection docCollection,
+                                                 Assign.AssignRequest assignRequest) throws Assign.AssignmentException {
+    SolrCollection solrCollection = new SimpleClusterAbstractionsImpl.SolrCollectionImpl(docCollection);
+    Set<String> shardNames = new HashSet<>(assignRequest.shardNames);
+    if (shardNames.size() < 1) {
+      throw new Assign.AssignmentException("Bad assign request: no shards specified for collection " + docCollection.getName());
+    }
+
+    final Set<Node> nodes;
+    // If no nodes specified, use all live nodes. If nodes are specified, use specified list.
+    if (assignRequest.nodes != null) {
+      nodes = SimpleClusterAbstractionsImpl.NodeImpl.getNodes(assignRequest.nodes);
+      if (nodes.isEmpty()) {
+        throw new Assign.AssignmentException("Bad assign request: empty list of nodes for collection " + docCollection.getName());
+      }
+    } else {
+      nodes = cluster.getLiveNodes();
+      if (nodes.isEmpty()) {
+        throw new Assign.AssignmentException("Impossible assign request: no live nodes for collection " + docCollection.getName());
+      }
+    }
+
+    return new PlacementRequestImpl(solrCollection, shardNames, nodes,
+        assignRequest.numNrtReplicas, assignRequest.numTlogReplicas, assignRequest.numPullReplicas);
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/ReplicaPlacementImpl.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/ReplicaPlacementImpl.java
new file mode 100644
index 0000000..0bf7564
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/ReplicaPlacementImpl.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.solr.cluster.Node;
+import org.apache.solr.cluster.Replica;
+import org.apache.solr.cluster.SolrCollection;
+import org.apache.solr.cluster.placement.ReplicaPlacement;
+import org.apache.solr.common.cloud.ReplicaPosition;
+
+class ReplicaPlacementImpl implements ReplicaPlacement {
+  private final SolrCollection solrCollection;
+  private final String shardName;
+  private final Node node;
+  private final Replica.ReplicaType replicaType;
+
+  ReplicaPlacementImpl(SolrCollection solrCollection, String shardName, Node node, Replica.ReplicaType replicaType) {
+    this.solrCollection = solrCollection;
+    this.shardName = shardName;
+    this.node = node;
+    this.replicaType = replicaType;
+  }
+
+  @Override
+  public SolrCollection getCollection() {
+    return solrCollection;
+  }
+
+  @Override
+  public String getShardName() {
+    return shardName;
+  }
+
+  @Override
+  public Node getNode() {
+    return node;
+  }
+
+  @Override
+  public Replica.ReplicaType getReplicaType() {
+    return replicaType;
+  }
+
+  /**
+   * Translates a set of {@link ReplicaPlacement} returned by a plugin into a list of {@link ReplicaPosition} expected
+   * by {@link org.apache.solr.cloud.api.collections.Assign.AssignStrategy}
+   */
+  static List<ReplicaPosition> toReplicaPositions(Set<ReplicaPlacement> replicaPlacementSet) {
+    // The replica index in ReplicaPosition is not as strict a concept as it might seem. It is used in rules
+    // based placement (for sorting replicas) but its presence in ReplicaPosition is not justified (and when the code
+    // is executing here, it means rules based placement is not used).
+    // Looking at ReplicaAssigner.tryAllPermutations, it is well possible to create replicas with same index
+    // living on a given node for the same shard. This likely never happens because of the way replicas are
+    // placed on nodes (never two on the same node for same shard). Adopting the same shortcut/bad design here,
+    // but index should be removed at some point from ReplicaPosition.
+    List<ReplicaPosition> replicaPositions = new ArrayList<>(replicaPlacementSet.size());
+    int index = 0; // This really an arbitrary value when adding replicas and a possible source of core name collisions
+    for (ReplicaPlacement placement : replicaPlacementSet) {
+      replicaPositions.add(new ReplicaPosition(placement.getShardName(), index++, SimpleClusterAbstractionsImpl.ReplicaImpl.toCloudReplicaType(placement.getReplicaType()), placement.getNode().getName()));
+    }
+
+    return replicaPositions;
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/impl/SimpleClusterAbstractionsImpl.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/SimpleClusterAbstractionsImpl.java
new file mode 100644
index 0000000..6ea2d24
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/SimpleClusterAbstractionsImpl.java
@@ -0,0 +1,392 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement.impl;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.stream.Collectors;
+
+import com.google.common.collect.Maps;
+import org.apache.solr.client.solrj.cloud.SolrCloudManager;
+import org.apache.solr.cluster.*;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.util.Pair;
+
+import javax.annotation.Nonnull;
+
+/**
+ * <p>The implementation of the cluster abstractions from {@link org.apache.solr.cluster} as static inner classes of this
+ * one are a very straightforward approach
+ * for an initial implementation of the placement plugins, but are likely not the right implementations for the long term.</p>
+ *
+ * <p>Indeed there's a delay between the moment the Collection API computes a placement for a given command and when
+ * this placement decision is actually executed and Zookeeper for example updated with the new state (and that state visible
+ * to the node or nodes). Under high load when a large number of placement requests are computed, the naive implementation
+ * presented here could in some cases provide the same cluster state view to all placement requests over a period of time
+ * that can extend to over a minute and have the resulting placement decisions all place replicas on the same nodes,
+ * eventually leading to severe imbalance of the cluster.</p>
+ *
+ * <p>By modifying the cluster abstractions implementations (without changing the API seen by placement plugins) to provide
+ * a view of the cluster that anticipates the way the cluster will be after in flight placement decisions are taken
+ * into account, the underlying Solr side framework supporting placement plugins can compensate to a point the delay
+ * between placement decision and that decision being observable.</p>
+ */
+class SimpleClusterAbstractionsImpl {
+
+  static class ClusterImpl implements Cluster {
+    private final Set<Node> liveNodes;
+    private final ClusterState clusterState;
+
+    ClusterImpl(SolrCloudManager solrCloudManager) throws IOException {
+      liveNodes = NodeImpl.getNodes(solrCloudManager.getClusterStateProvider().getLiveNodes());
+      clusterState = solrCloudManager.getClusterStateProvider().getClusterState();
+    }
+
+    @Override
+    public Set<Node> getLiveNodes() {
+      return liveNodes;
+    }
+
+    @Override
+    public SolrCollection getCollection(String collectionName) {
+      return SolrCollectionImpl.createCollectionFacade(clusterState, collectionName);
+    }
+
+    @Override
+    @Nonnull
+    public Iterator<SolrCollection> iterator() {
+      return clusterState.getCollectionsMap().values().stream().map(SolrCollectionImpl::fromDocCollection).collect(Collectors.toSet()).iterator();
+    }
+
+    @Override
+    public Iterable<SolrCollection> collections() {
+      return ClusterImpl.this::iterator;
+    }
+  }
+
+
+  static class NodeImpl implements Node {
+    public final String nodeName;
+
+    /**
+     * Transforms a collection of node names into a set of {@link Node} instances.
+     */
+    static Set<Node> getNodes(Collection<String> nodeNames) {
+      return nodeNames.stream().map(NodeImpl::new).collect(Collectors.toSet());
+    }
+
+    NodeImpl(String nodeName) {
+      this.nodeName = nodeName;
+    }
+
+    @Override
+    public String getName() {
+      return nodeName;
+    }
+
+    @Override
+    public String toString() {
+      return getClass().getSimpleName() + "(" + getName() + ")";
+    }
+
+    /**
+     * This class ends up as a key in Maps in {@link org.apache.solr.cluster.placement.AttributeValues}.
+     * It is important to implement this method comparing node names given that new instances of {@link Node} are created
+     * with names equal to existing instances (See {@link ReplicaImpl} constructor).
+     */
+    public boolean equals(Object obj) {
+      if (obj == null) { return false; }
+      if (obj == this) { return true; }
+      if (obj.getClass() != getClass()) { return false; }
+      NodeImpl other = (NodeImpl) obj;
+      return Objects.equals(this.nodeName, other.nodeName);
+    }
+
+    public int hashCode() {
+      return Objects.hashCode(nodeName);
+    }
+  }
+
+
+  static class SolrCollectionImpl implements SolrCollection {
+    private final String collectionName;
+    /** Map from {@link Shard#getShardName()} to {@link Shard} */
+    private final Map<String, Shard> shards;
+    private final DocCollection docCollection;
+
+    static SolrCollection createCollectionFacade(ClusterState clusterState, String collectionName) {
+      return fromDocCollection(clusterState.getCollectionOrNull(collectionName));
+    }
+
+    static SolrCollection fromDocCollection(DocCollection docCollection) {
+      return docCollection == null ? null : new SolrCollectionImpl(docCollection);
+    }
+
+    SolrCollectionImpl(DocCollection docCollection) {
+      this.collectionName = docCollection.getName();
+      this.shards = ShardImpl.getShards(this, docCollection.getSlices());
+      this.docCollection = docCollection;
+    }
+
+    @Override
+    public String getName() {
+      return collectionName;
+    }
+
+    @Override
+    public Shard getShard(String name) {
+      return shards.get(name);
+    }
+
+    @Override
+    @Nonnull
+    public Iterator<Shard> iterator() {
+      return shards.values().iterator();
+    }
+
+    @Override
+    public Iterable<Shard> shards() {
+      return SolrCollectionImpl.this::iterator;
+    }
+
+    @Override
+    public String getCustomProperty(String customPropertyName) {
+      return docCollection.getStr(customPropertyName);
+    }
+  }
+
+
+  static class ShardImpl implements Shard {
+    private final String shardName;
+    private final SolrCollection collection;
+    private final ShardState shardState;
+    private final Map<String, Replica> replicas;
+    private final Replica leader;
+
+    /**
+     * Transforms {@link Slice}'s of a {@link org.apache.solr.common.cloud.DocCollection} into a map of {@link Shard}'s,
+     * keyed by shard name ({@link Shard#getShardName()}).
+     */
+    static Map<String, Shard> getShards(SolrCollection solrCollection, Collection<Slice> slices) {
+      Map<String, Shard> shards = Maps.newHashMap();
+
+      for (Slice slice : slices) {
+        String shardName = slice.getName();
+        shards.put(shardName, new ShardImpl(shardName, solrCollection, slice));
+      }
+
+      return shards;
+    }
+
+    private ShardImpl(String shardName, SolrCollection collection, Slice slice) {
+      this.shardName = shardName;
+      this.collection = collection;
+      this.shardState = translateState(slice.getState());
+
+      Pair<Map<String, Replica>, Replica> pair = ReplicaImpl.getReplicas(slice.getReplicas(), this);
+      replicas = pair.first();
+      leader = pair.second();
+    }
+
+    private ShardState translateState(Slice.State state) {
+      switch (state) {
+        case ACTIVE: return ShardState.ACTIVE;
+        case INACTIVE: return ShardState.INACTIVE;
+        case CONSTRUCTION: return ShardState.CONSTRUCTION;
+        case RECOVERY: return ShardState.RECOVERY;
+        case RECOVERY_FAILED: return ShardState.RECOVERY_FAILED;
+        default: throw new RuntimeException("Unexpected " + state);
+      }
+    }
+
+    @Override
+    public String getShardName() {
+      return shardName;
+    }
+
+    @Override
+    public SolrCollection getCollection() {
+      return collection;
+    }
+
+    @Override
+    public Replica getReplica(String name) {
+      return replicas.get(name);
+    }
+
+    @Override
+    @Nonnull
+    public Iterator<Replica> iterator() {
+      return replicas.values().iterator();
+    }
+
+    @Override
+    public Iterable<Replica> replicas() {
+      return ShardImpl.this::iterator;
+    }
+
+    @Override
+    public Replica getLeader() {
+      return leader;
+    }
+
+    @Override
+    public ShardState getState() {
+      return shardState;
+    }
+
+    public boolean equals(Object obj) {
+      if (obj == null) { return false; }
+      if (obj == this) { return true; }
+      if (obj.getClass() != getClass()) { return false; }
+      ShardImpl other = (ShardImpl) obj;
+      return Objects.equals(this.shardName, other.shardName)
+              && Objects.equals(this.collection, other.collection)
+              && Objects.equals(this.shardState, other.shardState)
+              && Objects.equals(this.replicas, other.replicas)
+              && Objects.equals(this.leader, other.leader);
+    }
+
+    public int hashCode() {
+      return Objects.hash(shardName, collection, shardState);
+    }
+  }
+
+
+  static class ReplicaImpl implements Replica {
+    private final String replicaName;
+    private final String coreName;
+    private final Shard shard;
+    private final ReplicaType replicaType;
+    private final ReplicaState replicaState;
+    private final Node node;
+
+    /**
+     * Transforms {@link org.apache.solr.common.cloud.Replica}'s of a {@link Slice} into a map of {@link Replica}'s,
+     * keyed by replica name ({@link Replica#getReplicaName()}). Also returns in the
+     */
+    static Pair<Map<String, Replica>, Replica> getReplicas(Collection<org.apache.solr.common.cloud.Replica> sliceReplicas, Shard shard) {
+      Map<String, Replica> replicas = Maps.newHashMap();
+      Replica leader = null;
+
+      for (org.apache.solr.common.cloud.Replica sliceReplica : sliceReplicas) {
+        String replicaName = sliceReplica.getName();
+        Replica replica = new ReplicaImpl(replicaName, shard, sliceReplica);
+        replicas.put(replicaName, replica);
+
+        if (sliceReplica.isLeader()) {
+          leader = replica;
+        }
+      }
+
+      return new Pair<>(replicas, leader);
+    }
+
+    private ReplicaImpl(String replicaName, Shard shard, org.apache.solr.common.cloud.Replica sliceReplica) {
+      this.replicaName = replicaName;
+      this.coreName = sliceReplica.getCoreName();
+      this.shard = shard;
+      this.replicaType = translateType(sliceReplica.getType());
+      this.replicaState = translateState(sliceReplica.getState());
+      // Note this node might not be live, and if it is it is a different instance from the Nodes in Cluster, but that's ok.
+      this.node = new NodeImpl(sliceReplica.getNodeName());
+    }
+
+    private Replica.ReplicaType translateType(org.apache.solr.common.cloud.Replica.Type type) {
+      switch (type) {
+        case NRT: return Replica.ReplicaType.NRT;
+        case TLOG: return Replica.ReplicaType.TLOG;
+        case PULL: return Replica.ReplicaType.PULL;
+        default: throw new RuntimeException("Unexpected " + type);
+      }
+    }
+
+    private Replica.ReplicaState translateState(org.apache.solr.common.cloud.Replica.State state) {
+      switch (state) {
+        case ACTIVE: return Replica.ReplicaState.ACTIVE;
+        case DOWN: return Replica.ReplicaState.DOWN;
+        case RECOVERING: return Replica.ReplicaState.RECOVERING;
+        case RECOVERY_FAILED: return Replica.ReplicaState.RECOVERY_FAILED;
+        default: throw new RuntimeException("Unexpected " + state);
+      }
+    }
+
+    @Override
+    public Shard getShard() {
+      return shard;
+    }
+
+    @Override
+    public ReplicaType getType() {
+      return replicaType;
+    }
+
+    @Override
+    public ReplicaState getState() {
+      return replicaState;
+    }
+
+    @Override
+    public String getReplicaName() {
+      return replicaName;
+    }
+
+    @Override
+    public String getCoreName() {
+      return coreName;
+    }
+
+    @Override
+    public Node getNode() {
+      return node;
+    }
+
+    /**
+     * Translating a plugin visible ReplicaType to the internal Solr enum {@link org.apache.solr.common.cloud.Replica.Type}.
+     * The obvious approach would have been to add the internal Solr enum value as a parameter in the ReplicaType enum,
+     * but that would have leaked an internal SolrCloud implementation class to the plugin API.
+     */
+    static org.apache.solr.common.cloud.Replica.Type toCloudReplicaType(ReplicaType type) {
+      switch (type) {
+        case NRT: return org.apache.solr.common.cloud.Replica.Type.NRT;
+        case TLOG: return org.apache.solr.common.cloud.Replica.Type.TLOG;
+        case PULL: return org.apache.solr.common.cloud.Replica.Type.PULL;
+        default: throw new IllegalArgumentException("Unknown " + type);
+      }
+    }
+
+    public boolean equals(Object obj) {
+      if (obj == null) { return false; }
+      if (obj == this) { return true; }
+      if (obj.getClass() != getClass()) { return false; }
+      ReplicaImpl other = (ReplicaImpl) obj;
+      return Objects.equals(this.replicaName, other.replicaName)
+              && Objects.equals(this.coreName, other.coreName)
+              && Objects.equals(this.shard, other.shard)
+              && Objects.equals(this.replicaType, other.replicaType)
+              && Objects.equals(this.replicaState, other.replicaState)
+              && Objects.equals(this.node, other.node);
+    }
+
+    public int hashCode() {
+      return Objects.hash(replicaName, coreName, shard, replicaType, replicaState, node);
+    }
+  }
+}
diff --git a/solr/contrib/velocity/src/test/org/apache/solr/velocity/MockTool.java b/solr/core/src/java/org/apache/solr/cluster/placement/impl/package-info.java
similarity index 67%
copy from solr/contrib/velocity/src/test/org/apache/solr/velocity/MockTool.java
copy to solr/core/src/java/org/apache/solr/cluster/placement/impl/package-info.java
index c6287fd..c80cda3 100644
--- a/solr/contrib/velocity/src/test/org/apache/solr/velocity/MockTool.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/impl/package-info.java
@@ -14,21 +14,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.solr.velocity;
 
-import org.apache.solr.core.SolrCore;
-import org.apache.velocity.tools.generic.LocaleConfig;
-
-public class MockTool extends LocaleConfig {
-  private final SolrCore core;
-
-  public MockTool(SolrCore core) {
-    this.core = core;
-  }
-
-  public String star(String str) {
-    return "** " + str + " **";
-  }
-
-  public SolrCore getCore() { return core; }
-}
+/**
+ * Implementation classes (not visible to plugins, subject to change at any time) for the interfaces in {@link org.apache.solr.cluster.placement}
+ * and to make them work with the rest of Solr.
+ */
+package org.apache.solr.cluster.placement.impl;
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/package-info.java b/solr/core/src/java/org/apache/solr/cluster/placement/package-info.java
new file mode 100644
index 0000000..76a1dae
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/package-info.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <p>This package contains interfaces visible by plugins (i.e. contributed code) implementing cluster elasticity,
+ * placement and scalability, as well as a few examples on how plugins can be implemented.
+ *
+ * <p>Initially, only placement related plugins are supported.
+ *
+ * <p>The entry point is the {@link org.apache.solr.cluster.placement.PlacementPluginFactory} building instances
+ * of the {@link org.apache.solr.cluster.placement.PlacementPlugin} interface where the placement computation is implemented.
+ *
+ * <p>From there, one will access the interfaces that allow navigating the cluster topology, see {@link org.apache.solr.cluster}.
+ *
+ * <p>Plugin code:
+ * <ul>
+ *   <li>Gets work to be done by receiving a {@link org.apache.solr.cluster.placement.PlacementRequest},</li>
+ *   <li>Can obtain more info using {@link org.apache.solr.cluster.placement.AttributeFetcher} and building an
+ *   {@link org.apache.solr.cluster.placement.AttributeValues}</li>
+ *   <li>Uses the values from {@link org.apache.solr.cluster.placement.AttributeValues} as well as cluster state and
+ *   {@link org.apache.solr.cluster.SolrCollection#getCustomProperty} and other data to compute placement,</li>
+ *   <li>Placement decisions are returned to Solr using an instance of {@link org.apache.solr.cluster.placement.PlacementPlan}
+ *   built using the {@link org.apache.solr.cluster.placement.PlacementPlanFactory}</li>
+ * </ul>
+ */
+package org.apache.solr.cluster.placement;
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/SamplePluginAffinityReplicaPlacement.java b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/SamplePluginAffinityReplicaPlacement.java
new file mode 100644
index 0000000..d738fb8
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/SamplePluginAffinityReplicaPlacement.java
@@ -0,0 +1,509 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement.plugins;
+
+import com.google.common.collect.*;
+import org.apache.solr.cluster.*;
+import org.apache.solr.cluster.placement.*;
+import org.apache.solr.common.util.Pair;
+import org.apache.solr.common.util.SuppressForbidden;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.invoke.MethodHandles;
+import java.util.*;
+import java.util.stream.Collectors;
+
+/**
+ * <p>Implements placing replicas in a way that replicate past Autoscaling config defined
+ * <a href="https://github.com/lucidworks/fusion-cloud-native/blob/master/policy.json#L16">here</a>.</p>
+ *
+ * <p>This specification is doing the following:
+ * <p><i>Spread replicas per shard as evenly as possible across multiple availability zones (given by a sys prop),
+ * assign replicas based on replica type to specific kinds of nodes (another sys prop), and avoid having more than
+ * one replica per shard on the same node.<br>
+ * Only after these constraints are satisfied do minimize cores per node or disk usage.</i></p>
+ *
+ * <p>Overall strategy of this plugin:</p>
+ * <ul><li>
+ *     The set of nodes in the cluster is obtained and transformed into 3 independent sets (that can overlap) of nodes
+ *     accepting each of the three replica types.
+ * </li><li>
+ *     For each shard on which placing replicas is required and then for each replica type to place (starting with NRT, then TLOG then PULL): <ul>
+ *         <li>The set of candidates nodes corresponding to the replica type is used and from that set are removed nodes
+ *         that already have a replica (of any type) for that shard</li>
+ *         <li>If there are not enough nodes, an error is thrown (this is checked further down during processing).</li>
+ *         <li>The number of (already existing) replicas of the current type on each Availability Zone is collected.</li>
+ *         <li>Separate the set of available nodes to as many subsets (possibly some are empty) as there are Availability Zones
+ *         defined for the candidate nodes</li>
+ *         <li>In each AZ nodes subset, sort the nodes by increasing total number of cores count, with possibly a condition
+ *         that pushes nodes with low disk space to the end of the list? Or a weighted combination of the relative
+ *         importance of these two factors? Some randomization? Marking as non available nodes with not enough disk space?
+ *         These and other are likely aspects to be played with once the plugin is tested or observed to be running in prod,
+ *         don't expect the initial code drop(s) to do all of that.</li>
+ *         <li>Iterate over the number of replicas to place (for the current replica type for the current shard):
+ *         <ul>
+ *             <li>Based on the number of replicas per AZ collected previously, pick the non empty set of nodes having the
+ *             lowest number of replicas. Then pick the first node in that set. That's the node the replica is placed one.
+ *             Remove the node from the set of available nodes for the given AZ and increase the number of replicas placed
+ *             on that AZ.</li>
+ *         </ul></li>
+ *         <li>During this process, the number of cores on the nodes in general is tracked to take into account placement
+ *         decisions so that not all shards decide to put their replicas on the same nodes (they might though if these are
+ *         the less loaded nodes).</li>
+ *     </ul>
+ * </li>
+ * </ul>
+ *
+ * <p>This code is a realistic placement computation, based on a few assumptions. The code is written in such a way to
+ * make it relatively easy to adapt it to (somewhat) different assumptions. Configuration options could be introduced
+ * to allow configuration base option selection as well...</p>
+ *
+ * <p>In order to configure this plugin to be used for placement decisions, the following {@code curl} command (or something
+ * equivalent) has to be executed once the cluster is already running in order to set
+ * the appropriate Zookeeper stored configuration. Replace {@code localhost:8983} by one of your servers' IP address and port.</p>
+ *
+ * <pre>
+ *
+  curl -X POST -H 'Content-type:application/json' -d '{
+    "set-placement-plugin": {
+      "class": "org.apache.solr.cluster.placement.plugins.SamplePluginAffinityReplicaPlacement$Factory",
+      "minimalFreeDiskGB": 10,
+      "deprioritizedFreeDiskGB": 50
+    }
+  }' http://localhost:8983/api/cluster
+ * </pre>
+ *
+ * <p>The consequence will be the creation of an element in the Zookeeper file {@code /clusterprops.json} as follows:</p>
+ *
+ * <pre>
+ *
+ * "placement-plugin":{
+ *     "class":"org.apache.solr.cluster.placement.plugins.SamplePluginAffinityReplicaPlacement$Factory",
+ *     "minimalFreeDiskGB":10,
+ *     "deprioritizedFreeDiskGB":50}
+ * </pre>
+ *
+ * <p>In order to delete the placement-plugin section from {@code /clusterprops.json} (and to fallback to either Legacy
+ * or rule based placement if configured for a collection), execute:</p>
+ *
+ * <pre>
+ *
+  curl -X POST -H 'Content-type:application/json' -d '{
+    "set-placement-plugin" : null
+  }' http://localhost:8983/api/cluster
+ * </pre>
+ */
+public class SamplePluginAffinityReplicaPlacement implements PlacementPlugin {
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  /**
+   * This factory is instantiated by config from its class name. Using it is the only way to create instances of
+   * {@link SamplePluginAffinityReplicaPlacement}.
+   */
+  static public class Factory implements PlacementPluginFactory {
+
+    /**
+     * Empty public constructor is used to instantiate this factory. Using a factory pattern to allow the factory to do one
+     * time costly operations if needed, and to only have to instantiate a default constructor class by name, rather than
+     * having to call a constructor with more parameters (if we were to instantiate the plugin class directly without going
+     * through a factory).
+     */
+    public Factory() {
+    }
+
+    @Override
+    public PlacementPlugin createPluginInstance(PlacementPluginConfig config) {
+      final long minimalFreeDiskGB = config.getLongConfig("minimalFreeDiskGB", 20L);
+      final long deprioritizedFreeDiskGB = config.getLongConfig("deprioritizedFreeDiskGB", 100L);
+      return new SamplePluginAffinityReplicaPlacement(minimalFreeDiskGB, deprioritizedFreeDiskGB);
+    }
+  }
+
+
+  /**
+   * <p>Name of the system property on a node indicating which (public cloud) Availability Zone that node is in. The value
+   * is any string, different strings denote different availability zones.
+   *
+   * <p>Nodes on which this system property is not defined are considered being in the same Availability Zone
+   * {@link #UNDEFINED_AVAILABILITY_ZONE} (hopefully the value of this constant is not the name of a real Availability Zone :).
+   */
+  public static final String AVAILABILITY_ZONE_SYSPROP = "availability_zone";
+  /** This is the "AZ" name for nodes that do not define an AZ. Should not match a real AZ name (I think we're safe) */
+  public static final String UNDEFINED_AVAILABILITY_ZONE = "uNd3f1NeD";
+
+  /**
+   * <p>Name of the system property on a node indicating the type of replicas allowed on that node.
+   * The value of that system property is a comma separated list or a single string of value names of
+   * {@link org.apache.solr.cluster.Replica.ReplicaType} (case insensitive). If that property is not defined, that node is
+   * considered accepting all replica types (i.e. undefined is equivalent to {@code "NRT,Pull,tlog"}).
+   *
+   * <p>See {@link #getNodesPerReplicaType}.
+   */
+  public static final String REPLICA_TYPE_SYSPROP = "replica_type";
+
+  /**
+   * If a node has strictly less GB of free disk than this value, the node is excluded from assignment decisions.
+   * Set to 0 or less to disable.
+   */
+  private final long minimalFreeDiskGB;
+
+  /**
+   * Replica allocation will assign replicas to nodes with at least this number of GB of free disk space regardless
+   * of the number of cores on these nodes rather than assigning replicas to nodes with less than this amount of free
+   * disk space if that's an option (if that's not an option, replicas can still be assigned to nodes with less than this
+   * amount of free space).
+   */
+  private final long deprioritizedFreeDiskGB;
+
+  /**
+   * The factory has decoded the configuration for the plugin instance and passes it the parameters it needs.
+   */
+  private SamplePluginAffinityReplicaPlacement(long minimalFreeDiskGB, long deprioritizedFreeDiskGB) {
+    this.minimalFreeDiskGB = minimalFreeDiskGB;
+    this.deprioritizedFreeDiskGB = deprioritizedFreeDiskGB;
+  }
+
+  @SuppressForbidden(reason = "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
+  public PlacementPlan computePlacement(Cluster cluster, PlacementRequest request, AttributeFetcher attributeFetcher,
+                                        PlacementPlanFactory placementPlanFactory) throws PlacementException {
+    Set<Node> nodes = request.getTargetNodes();
+    SolrCollection solrCollection = request.getCollection();
+
+    // Request all needed attributes
+    attributeFetcher.requestNodeSystemProperty(AVAILABILITY_ZONE_SYSPROP).requestNodeSystemProperty(REPLICA_TYPE_SYSPROP);
+    attributeFetcher.requestNodeCoreCount().requestNodeFreeDisk();
+    attributeFetcher.fetchFrom(nodes);
+    final AttributeValues attrValues = attributeFetcher.fetchAttributes();
+
+    // Split the set of nodes into 3 sets of nodes accepting each replica type (sets can overlap if nodes accept multiple replica types)
+    // These subsets sets are actually maps, because we capture the number of cores (of any replica type) present on each node.
+    // Also get the number of currently existing cores per node, so we can keep update as we place new cores to not end up
+    // always selecting the same node(s).
+    Pair<EnumMap<Replica.ReplicaType, Set<Node>>, Map<Node, Integer>> p = getNodesPerReplicaType(nodes, attrValues);
+
+    EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes = p.first();
+    Map<Node, Integer> coresOnNodes = p.second();
+
+    // All available zones of live nodes. Due to some nodes not being candidates for placement, and some existing replicas
+    // being one availability zones that might be offline (i.e. their nodes are not live), this set might contain zones
+    // on which it is impossible to place replicas. That's ok.
+    ImmutableSet<String> availabilityZones = getZonesFromNodes(nodes, attrValues);
+
+    // Build the replica placement decisions here
+    Set<ReplicaPlacement> replicaPlacements = new HashSet<>();
+
+    // Let's now iterate on all shards to create replicas for and start finding home sweet homes for the replicas
+    for (String shardName : request.getShardNames()) {
+      // Iterate on the replica types in the enum order. We place more strategic replicas first
+      // (NRT is more strategic than TLOG more strategic than PULL). This is in case we eventually decide that less
+      // strategic replica placement impossibility is not a problem that should lead to replica placement computation
+      // failure. Current code does fail if placement is impossible (constraint is at most one replica of a shard on any node).
+      for (Replica.ReplicaType replicaType : Replica.ReplicaType.values()) {
+        makePlacementDecisions(solrCollection, shardName, availabilityZones, replicaType, request.getCountReplicasToCreate(replicaType),
+                attrValues, replicaTypeToNodes, coresOnNodes, placementPlanFactory, replicaPlacements);
+      }
+    }
+
+    return placementPlanFactory.createPlacementPlan(request, replicaPlacements);
+  }
+
+  private ImmutableSet<String> getZonesFromNodes(Set<Node> nodes, final AttributeValues attrValues) {
+    Set<String> azs = new HashSet<>();
+
+    for (Node n : nodes) {
+      azs.add(getNodeAZ(n, attrValues));
+    }
+
+    return ImmutableSet.copyOf(azs);
+  }
+
+  /**
+   * Resolves the AZ of a node and takes care of nodes that have no defined AZ in system property {@link #AVAILABILITY_ZONE_SYSPROP}
+   * to then return {@link #UNDEFINED_AVAILABILITY_ZONE} as the AZ name.
+   */
+  private String getNodeAZ(Node n, final AttributeValues attrValues) {
+    Optional<String> nodeAz = attrValues.getSystemProperty(n, AVAILABILITY_ZONE_SYSPROP);
+    // All nodes with undefined AZ will be considered part of the same AZ. This also works for deployments that do not care about AZ's
+    return nodeAz.orElse(UNDEFINED_AVAILABILITY_ZONE);
+  }
+
+  /**
+   * This class captures an availability zone and the nodes that are legitimate targets for replica placement in that
+   * Availability Zone. Instances are used as values in a {@link TreeMap} in which the total number of already
+   * existing replicas in the AZ is the key. This allows easily picking the set of nodes from which to select a node for
+   * placement in order to balance the number of replicas per AZ. Picking one of the nodes from the set is done using
+   * different criteria unrelated to the Availability Zone (picking the node is based on the {@link CoresAndDiskComparator}
+   * ordering).
+   */
+  private static class AzWithNodes {
+    final String azName;
+    List<Node> availableNodesForPlacement;
+    boolean hasBeenSorted;
+
+    AzWithNodes(String azName, List<Node> availableNodesForPlacement) {
+      this.azName = azName;
+      this.availableNodesForPlacement = availableNodesForPlacement;
+      // Once the list is sorted to an order we're happy with, this flag is set to true to avoid sorting multiple times
+      // unnecessarily.
+      this.hasBeenSorted = false;
+    }
+  }
+
+  /**
+   * Given the set of all nodes on which to do placement and fetched attributes, builds the sets representing
+   * candidate nodes for placement of replicas of each replica type.
+   * These sets are packaged and returned in an EnumMap keyed by replica type (1st member of the Pair).
+   * Also builds the number of existing cores on each node present in the returned EnumMap (2nd member of the returned Pair).
+   * Nodes for which the number of cores is not available for whatever reason are excluded from acceptable candidate nodes
+   * as it would not be possible to make any meaningful placement decisions.
+   * @param nodes all nodes on which this plugin should compute placement
+   * @param attrValues attributes fetched for the nodes. This method uses system property {@link #REPLICA_TYPE_SYSPROP} as
+   *                   well as the number of cores on each node.
+   */
+  private Pair<EnumMap<Replica.ReplicaType, Set<Node>>, Map<Node, Integer>> getNodesPerReplicaType(Set<Node> nodes, final AttributeValues attrValues) {
+    EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes = new EnumMap<>(Replica.ReplicaType.class);
+    Map<Node, Integer> coresOnNodes = Maps.newHashMap();
+
+    for (Replica.ReplicaType replicaType : Replica.ReplicaType.values()) {
+      replicaTypeToNodes.put(replicaType, new HashSet<>());
+    }
+
+    for (Node node : nodes) {
+      // Exclude nodes with unknown or too small disk free space
+      if (attrValues.getFreeDisk(node).isEmpty()) {
+        if (log.isWarnEnabled()) {
+          log.warn("Unknown free disk on node {}, excluding it from placement decisions.", node.getName());
+        }
+        // We rely later on the fact that the free disk optional is present (see CoresAndDiskComparator), be careful it you change anything here.
+        continue;
+      } if (attrValues.getFreeDisk(node).get() < minimalFreeDiskGB) {
+        if (log.isWarnEnabled()) {
+          log.warn("Node {} free disk ({}GB) lower than configured minimum {}GB, excluding it from placement decisions.", node.getName(), attrValues.getFreeDisk(node).get(), minimalFreeDiskGB);
+        }
+        continue;
+      }
+
+      if (attrValues.getCoresCount(node).isEmpty()) {
+        if (log.isWarnEnabled()) {
+          log.warn("Unknown number of cores on node {}, excluding it from placement decisions.", node.getName());
+        }
+        // We rely later on the fact that the number of cores optional is present (see CoresAndDiskComparator), be careful it you change anything here.
+        continue;
+      }
+
+      Integer coresCount = attrValues.getCoresCount(node).get();
+      coresOnNodes.put(node, coresCount);
+
+      String supportedReplicaTypes = attrValues.getSystemProperty(node, REPLICA_TYPE_SYSPROP).isPresent() ? attrValues.getSystemProperty(node, REPLICA_TYPE_SYSPROP).get() : null;
+      // If property not defined or is only whitespace on a node, assuming node can take any replica type
+      if (supportedReplicaTypes == null || supportedReplicaTypes.isBlank()) {
+        for (Replica.ReplicaType rt : Replica.ReplicaType.values()) {
+          replicaTypeToNodes.get(rt).add(node);
+        }
+      } else {
+        Set<String> acceptedTypes = Arrays.stream(supportedReplicaTypes.split(",")).map(String::trim).map(s -> s.toLowerCase(Locale.ROOT)).collect(Collectors.toSet());
+        for (Replica.ReplicaType rt : Replica.ReplicaType.values()) {
+          if (acceptedTypes.contains(rt.name().toLowerCase(Locale.ROOT))) {
+            replicaTypeToNodes.get(rt).add(node);
+          }
+        }
+      }
+    }
+    return new Pair<>(replicaTypeToNodes, coresOnNodes);
+  }
+
+  /**
+   * <p>Picks nodes from {@code targetNodes} for placing {@code numReplicas} replicas.
+   *
+   * <p>The criteria used in this method are, in this order:
+   * <ol>
+   *     <li>No more than one replica of a given shard on a given node (strictly enforced)</li>
+   *     <li>Balance as much as possible the number of replicas of the given {@link org.apache.solr.cluster.Replica.ReplicaType} over available AZ's.
+   *     This balancing takes into account existing replicas <b>of the corresponding replica type</b>, if any.</li>
+   *     <li>Place replicas is possible on nodes having more than a certain amount of free disk space (note that nodes with a too small
+   *     amount of free disk space were eliminated as placement targets earlier, in {@link #getNodesPerReplicaType}). There's
+   *     a threshold here rather than sorting on the amount of free disk space, because sorting on that value would in
+   *     practice lead to never considering the number of cores on a node.</li>
+   *     <li>Place replicas on nodes having a smaller number of cores (the number of cores considered
+   *     for this decision includes decisions made during the processing of the placement request)</li>
+   * </ol>
+   */
+  @SuppressForbidden(reason = "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
+  private void makePlacementDecisions(SolrCollection solrCollection, String shardName, ImmutableSet<String> availabilityZones,
+                                      Replica.ReplicaType replicaType, int numReplicas, final AttributeValues attrValues,
+                                      EnumMap<Replica.ReplicaType, Set<Node>> replicaTypeToNodes, Map<Node, Integer> coresOnNodes,
+                                      PlacementPlanFactory placementPlanFactory, Set<ReplicaPlacement> replicaPlacements) throws PlacementException {
+    // Build the set of candidate nodes, i.e. nodes not having (yet) a replica of the given shard
+    Set<Node> candidateNodes = new HashSet<>(replicaTypeToNodes.get(replicaType));
+
+    // Count existing replicas per AZ. We count only instances the type of replica for which we need to do placement. This
+    // can be changed in the loop below if we want to count all replicas for the shard.
+    Map<String, Integer> azToNumReplicas = Maps.newHashMap();
+    // Add all "interesting" AZ's, i.e. AZ's for which there's a chance we can do placement.
+    for (String az : availabilityZones) {
+      azToNumReplicas.put(az, 0);
+    }
+
+    Shard shard = solrCollection.getShard(shardName);
+    if (shard != null) {
+      // shard is non null if we're adding replicas to an already existing collection.
+      // If we're creating the collection, the shards do not exist yet.
+      for (Replica replica : shard.replicas()) {
+        // Nodes already having any type of replica for the shard can't get another replica.
+        candidateNodes.remove(replica.getNode());
+        // The node's AZ has to be counted as having a replica if it has a replica of the same type as the one we need
+        // to place here (remove the "if" below to balance the number of replicas per AZ across all replica types rather
+        // than within each replica type, but then there's a risk that all NRT replicas for example end up on the same AZ).
+        // Note that if in the cluster nodes are configured to accept a single replica type and not multiple ones, the
+        // two options are equivalent (governed by system property AVAILABILITY_ZONE_SYSPROP on each node)
+        if (replica.getType() == replicaType) {
+          final String az = getNodeAZ(replica.getNode(), attrValues);
+          if (azToNumReplicas.containsKey(az)) {
+            // We do not count replicas on AZ's for which we don't have any node to place on because it would not help
+            // the placement decision. If we did want to do that, note the dereferencing below can't be assumed as the
+            // entry will not exist in the map.
+            azToNumReplicas.put(az, azToNumReplicas.get(az) + 1);
+          }
+        }
+      }
+    }
+
+    // We now have the set of real candidate nodes, we've enforced "No more than one replica of a given shard on a given node".
+    // We also counted for the shard and replica type under consideration how many replicas were per AZ, so we can place
+    // (or try to place) replicas on AZ's that have fewer replicas
+
+    // Get the candidate nodes per AZ in order to build (further down) a mapping of AZ to placement candidates.
+    Map<String, List<Node>> nodesPerAz = Maps.newHashMap();
+    for (Node node : candidateNodes) {
+      String nodeAz = getNodeAZ(node, attrValues);
+      List<Node> nodesForAz = nodesPerAz.computeIfAbsent(nodeAz, k -> new ArrayList<>());
+      nodesForAz.add(node);
+    }
+
+    // Build a treeMap sorted by the number of replicas per AZ and including candidates nodes suitable for placement on the
+    // AZ, so we can easily select the next AZ to get a replica assignment and quickly (constant time) decide if placement
+    // on this AZ is possible or not.
+    TreeMultimap<Integer, AzWithNodes> azByExistingReplicas = TreeMultimap.create(Comparator.naturalOrder(), Ordering.arbitrary());
+    for (Map.Entry<String, List<Node>> e : nodesPerAz.entrySet()) {
+      azByExistingReplicas.put(azToNumReplicas.get(e.getKey()), new AzWithNodes(e.getKey(), e.getValue()));
+    }
+
+    CoresAndDiskComparator coresAndDiskComparator = new CoresAndDiskComparator(attrValues, coresOnNodes, deprioritizedFreeDiskGB);
+
+    // Now we have for each AZ on which we might have a chance of placing a replica, the list of candidate nodes for replicas
+    // (candidate: does not already have a replica of this shard and is in the corresponding AZ).
+    // We must now select those of the nodes on which we actually place the replicas, and will do that based on the total
+    // number of cores already present on these nodes as well as the free disk space.
+    // We sort once by the order related to number of cores and disk space each list of nodes on an AZ. We do not sort all
+    // of them ahead of time because we might be placing a small number of replicas and it might be wasted work.
+    for (int i = 0; i < numReplicas; i++) {
+      // Pick the AZ having the lowest number of replicas for this shard, and if that AZ has available nodes, pick the
+      // most appropriate one (based on number of cores and disk space constraints). In the process, remove entries (AZ's)
+      // that do not have nodes to place replicas on because these are useless to us.
+      Map.Entry<Integer, AzWithNodes> azWithNodesEntry = null;
+      for (Iterator<Map.Entry<Integer, AzWithNodes>> it = azByExistingReplicas.entries().iterator(); it.hasNext(); ) {
+        Map.Entry<Integer, AzWithNodes> entry = it.next();
+        if (!entry.getValue().availableNodesForPlacement.isEmpty()) {
+          azWithNodesEntry = entry;
+          // Remove this entry. Will add it back after a node has been removed from the list of available nodes and the number
+          // of replicas on the AZ has been increased by one (search for "azByExistingReplicas.put" below).
+          it.remove();
+          break;
+        } else {
+          it.remove();
+        }
+      }
+
+      if (azWithNodesEntry == null) {
+        // This can happen because not enough nodes for the placement request or already too many nodes with replicas of
+        // the shard that can't accept new replicas or not enough nodes with enough free disk space.
+        throw new PlacementException("Not enough nodes to place " + numReplicas + " replica(s) of type " + replicaType +
+                " for shard " + shardName + " of collection " + solrCollection.getName());
+      }
+
+      AzWithNodes azWithNodes = azWithNodesEntry.getValue();
+      List<Node> nodes = azWithNodes.availableNodesForPlacement;
+
+      if (!azWithNodes.hasBeenSorted) {
+        // Make sure we do not tend to use always the same nodes (within an AZ) if all conditions are identical (well, this
+        // likely is not the case since after having added a replica to a node its number of cores increases for the next
+        // placement decision, but let's be defensive here, given that multiple concurrent placement decisions might see
+        // the same initial cluster state, and we want placement to be reasonable even in that case without creating an
+        // unnecessary imbalance).
+        // For example, if all nodes have 0 cores and same amount of free disk space, ideally we want to pick a random node
+        // for placement, not always the same one due to some internal ordering.
+        Collections.shuffle(nodes, new Random());
+
+        // Sort by increasing number of cores but pushing nodes with low free disk space to the end of the list
+        nodes.sort(coresAndDiskComparator);
+
+        azWithNodes.hasBeenSorted = true;
+      }
+
+      Node assignTarget = nodes.remove(0);
+
+      // Insert back a corrected entry for the AZ: one more replica living there and one less node that can accept new replicas
+      // (the remaining candidate node list might be empty, in which case it will be cleaned up on the next iteration).
+      azByExistingReplicas.put(azWithNodesEntry.getKey() + 1, azWithNodes);
+
+      // Track that the node has one more core. These values are only used during the current run of the plugin.
+      coresOnNodes.merge(assignTarget, 1, Integer::sum);
+
+      // Register the replica assignment just decided
+      replicaPlacements.add(placementPlanFactory.createReplicaPlacement(solrCollection, shardName, assignTarget, replicaType));
+    }
+  }
+
+  /**
+   * Comparator implementing the placement strategy based on free space and number of cores: we want to place new replicas
+   * on nodes with the less number of cores, but only if they do have enough disk space (expressed as a threshold value).
+   */
+  static class CoresAndDiskComparator implements Comparator<Node> {
+    private final AttributeValues attrValues;
+    private final Map<Node, Integer> coresOnNodes;
+    private final long deprioritizedFreeDiskGB;
+
+
+    /**
+     * The data we sort on is not part of the {@link Node} instances but has to be retrieved from the attributes and configuration.
+     * The number of cores per node is passed in a map whereas the free disk is fetched from the attributes due to the
+     * fact that we update the number of cores per node as we do allocations, but we do not update the free disk. The
+     * attrValues correpsonding to the number of cores per node are the initial values, but we want to comapre the actual
+     * value taking into account placement decisions already made during the current execution of the placement plugin.
+     */
+    CoresAndDiskComparator(AttributeValues attrValues, Map<Node, Integer> coresOnNodes, long deprioritizedFreeDiskGB) {
+      this.attrValues = attrValues;
+      this.coresOnNodes = coresOnNodes;
+      this.deprioritizedFreeDiskGB = deprioritizedFreeDiskGB;
+    }
+
+    @Override
+    public int compare(Node a, Node b) {
+      // Note all nodes do have free disk defined. This has been verified earlier.
+      boolean aHasLowFreeSpace = attrValues.getFreeDisk(a).get() < deprioritizedFreeDiskGB;
+      boolean bHasLowFreeSpace = attrValues.getFreeDisk(b).get() < deprioritizedFreeDiskGB;
+      if (aHasLowFreeSpace != bHasLowFreeSpace) {
+        // A node with low free space should be considered > node with high free space since it needs to come later in sort order
+        return Boolean.compare(aHasLowFreeSpace, bHasLowFreeSpace);
+      }
+      // The ordering on the number of cores is the natural order.
+      return Integer.compare(coresOnNodes.get(a), coresOnNodes.get(b));
+    }
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/SamplePluginMinimizeCores.java b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/SamplePluginMinimizeCores.java
new file mode 100644
index 0000000..54520fc
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/SamplePluginMinimizeCores.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement.plugins;
+
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.Map;
+
+import com.google.common.collect.Ordering;
+import com.google.common.collect.TreeMultimap;
+import org.apache.solr.cluster.Cluster;
+import org.apache.solr.cluster.Node;
+import org.apache.solr.cluster.Replica;
+import org.apache.solr.cluster.SolrCollection;
+import org.apache.solr.cluster.placement.*;
+import org.apache.solr.common.util.SuppressForbidden;
+
+/**
+ * <p>Implements placing replicas to minimize number of cores per {@link Node}, while not placing two replicas of the same
+ * shard on the same node.</p>
+ *
+ * <p>Warning: not really tested. See {@link SamplePluginAffinityReplicaPlacement} for a more realistic example.</p>
+ */
+public class SamplePluginMinimizeCores implements PlacementPlugin {
+
+  private final PlacementPluginConfig config;
+
+  private SamplePluginMinimizeCores(PlacementPluginConfig config) {
+    this.config = config;
+  }
+
+  static public class Factory implements PlacementPluginFactory {
+
+    /**
+     * Empty public constructor is used to instantiate this factory based on configuration in solr.xml, element
+     * {@code <placementPluginFactory>} in element {@code <solrcloud>}.
+     */
+    public Factory() {
+    }
+
+    @Override
+    public PlacementPlugin createPluginInstance(PlacementPluginConfig config) {
+      return new SamplePluginMinimizeCores(config);
+    }
+  }
+
+  @SuppressForbidden(reason = "Ordering.arbitrary() has no equivalent in Comparator class. Rather reuse than copy.")
+  public PlacementPlan computePlacement(Cluster cluster, PlacementRequest request, AttributeFetcher attributeFetcher,
+                                        PlacementPlanFactory placementPlanFactory) throws PlacementException {
+    int totalReplicasPerShard = 0;
+    for (Replica.ReplicaType rt : Replica.ReplicaType.values()) {
+      totalReplicasPerShard += request.getCountReplicasToCreate(rt);
+    }
+
+    if (cluster.getLiveNodes().size() < totalReplicasPerShard) {
+      throw new PlacementException("Cluster size too small for number of replicas per shard");
+    }
+
+    // Get number of cores on each Node
+    TreeMultimap<Integer, Node> nodesByCores = TreeMultimap.create(Comparator.naturalOrder(), Ordering.arbitrary());
+
+    Set<Node> nodes = request.getTargetNodes();
+
+    attributeFetcher.requestNodeCoreCount();
+    attributeFetcher.fetchFrom(nodes);
+    AttributeValues attrValues = attributeFetcher.fetchAttributes();
+
+
+    // Get the number of cores on each node and sort the nodes by increasing number of cores
+    for (Node node : nodes) {
+      if (attrValues.getCoresCount(node).isEmpty()) {
+        throw new PlacementException("Can't get number of cores in " + node);
+      }
+      nodesByCores.put(attrValues.getCoresCount(node).get(), node);
+    }
+
+    Set<ReplicaPlacement> replicaPlacements = new HashSet<>(totalReplicasPerShard * request.getShardNames().size());
+
+    // Now place all replicas of all shards on nodes, by placing on nodes with the smallest number of cores and taking
+    // into account replicas placed during this computation. Note that for each shard we must place replicas on different
+    // nodes, when moving to the next shard we use the nodes sorted by their updated number of cores (due to replica
+    // placements for previous shards).
+    for (String shardName : request.getShardNames()) {
+      // Assign replicas based on the sort order of the nodesByCores tree multimap to put replicas on nodes with less
+      // cores first. We only need totalReplicasPerShard nodes given that's the number of replicas to place.
+      // We assign based on the passed nodeEntriesToAssign list so the right nodes get replicas.
+      ArrayList<Map.Entry<Integer, Node>> nodeEntriesToAssign = new ArrayList<>(totalReplicasPerShard);
+      Iterator<Map.Entry<Integer, Node>> treeIterator = nodesByCores.entries().iterator();
+      for (int i = 0; i < totalReplicasPerShard; i++) {
+        nodeEntriesToAssign.add(treeIterator.next());
+      }
+
+      // Update the number of cores each node will have once the assignments below got executed so the next shard picks the
+      // lowest loaded nodes for its replicas.
+      for (Map.Entry<Integer, Node> e : nodeEntriesToAssign) {
+        int coreCount = e.getKey();
+        Node node = e.getValue();
+        nodesByCores.remove(coreCount, node);
+        nodesByCores.put(coreCount + 1, node);
+      }
+
+      for (Replica.ReplicaType replicaType : Replica.ReplicaType.values()) {
+        placeReplicas(request.getCollection(), nodeEntriesToAssign, placementPlanFactory, replicaPlacements, shardName, request, replicaType);
+      }
+    }
+
+    return placementPlanFactory.createPlacementPlan(request, replicaPlacements);
+  }
+
+  private void placeReplicas(SolrCollection solrCollection, ArrayList<Map.Entry<Integer, Node>> nodeEntriesToAssign,
+                             PlacementPlanFactory placementPlanFactory, Set<ReplicaPlacement> replicaPlacements,
+                             String shardName, PlacementRequest request, Replica.ReplicaType replicaType) {
+    for (int replica = 0; replica < request.getCountReplicasToCreate(replicaType); replica++) {
+      final Map.Entry<Integer, Node> entry = nodeEntriesToAssign.remove(0);
+      final Node node = entry.getValue();
+
+      replicaPlacements.add(placementPlanFactory.createReplicaPlacement(solrCollection, shardName, node, replicaType));
+    }
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/SamplePluginRandomPlacement.java b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/SamplePluginRandomPlacement.java
new file mode 100644
index 0000000..eecb57f
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/SamplePluginRandomPlacement.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cluster.placement.plugins;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.solr.cluster.Cluster;
+import org.apache.solr.cluster.Node;
+import org.apache.solr.cluster.Replica;
+import org.apache.solr.cluster.SolrCollection;
+import org.apache.solr.cluster.placement.*;
+
+/**
+ * Implements random placement for new collection creation while preventing two replicas of same shard from being placed on same node.
+ *
+ * <p>Warning: not really tested. See {@link SamplePluginAffinityReplicaPlacement} for a more realistic example.</p>
+ */
+public class SamplePluginRandomPlacement implements PlacementPlugin {
+
+  private final PlacementPluginConfig config;
+
+  private SamplePluginRandomPlacement(PlacementPluginConfig config) {
+    this.config = config;
+  }
+
+  static public class Factory implements PlacementPluginFactory {
+    @Override
+    public PlacementPlugin createPluginInstance(PlacementPluginConfig config) {
+      return new SamplePluginRandomPlacement(config);
+    }
+  }
+
+  public PlacementPlan computePlacement(Cluster cluster, PlacementRequest request, AttributeFetcher attributeFetcher,
+                                        PlacementPlanFactory placementPlanFactory) throws PlacementException {
+    int totalReplicasPerShard = 0;
+    for (Replica.ReplicaType rt : Replica.ReplicaType.values()) {
+      totalReplicasPerShard += request.getCountReplicasToCreate(rt);
+    }
+
+    if (cluster.getLiveNodes().size() < totalReplicasPerShard) {
+      throw new PlacementException("Cluster size too small for number of replicas per shard");
+    }
+
+    Set<ReplicaPlacement> replicaPlacements = new HashSet<>(totalReplicasPerShard * request.getShardNames().size());
+
+    // Now place randomly all replicas of all shards on available nodes
+    for (String shardName : request.getShardNames()) {
+      // Shuffle the nodes for each shard so that replicas for a shard are placed on distinct yet random nodes
+      ArrayList<Node> nodesToAssign = new ArrayList<>(cluster.getLiveNodes());
+      Collections.shuffle(nodesToAssign, new Random());
+
+      for (Replica.ReplicaType rt : Replica.ReplicaType.values()) {
+        placeForReplicaType(request.getCollection(), nodesToAssign, placementPlanFactory, replicaPlacements, shardName, request, rt);
+      }
+    }
+
+    return placementPlanFactory.createPlacementPlan(request, replicaPlacements);
+  }
+
+  private void placeForReplicaType(SolrCollection solrCollection, ArrayList<Node> nodesToAssign, PlacementPlanFactory placementPlanFactory,
+                                   Set<ReplicaPlacement> replicaPlacements,
+                                   String shardName, PlacementRequest request, Replica.ReplicaType replicaType) {
+    for (int replica = 0; replica < request.getCountReplicasToCreate(replicaType); replica++) {
+      Node node = nodesToAssign.remove(0);
+
+      replicaPlacements.add(placementPlanFactory.createReplicaPlacement(solrCollection, shardName, node, replicaType));
+    }
+  }
+}
diff --git a/solr/contrib/velocity/src/test/org/apache/solr/velocity/MockTool.java b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/package-info.java
similarity index 67%
rename from solr/contrib/velocity/src/test/org/apache/solr/velocity/MockTool.java
rename to solr/core/src/java/org/apache/solr/cluster/placement/plugins/package-info.java
index c6287fd..1595679 100644
--- a/solr/contrib/velocity/src/test/org/apache/solr/velocity/MockTool.java
+++ b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/package-info.java
@@ -14,21 +14,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.solr.velocity;
 
-import org.apache.solr.core.SolrCore;
-import org.apache.velocity.tools.generic.LocaleConfig;
-
-public class MockTool extends LocaleConfig {
-  private final SolrCore core;
-
-  public MockTool(SolrCore core) {
-    this.core = core;
-  }
-
-  public String star(String str) {
-    return "** " + str + " **";
-  }
-
-  public SolrCore getCore() { return core; }
-}
+/**
+ * Sample plugin implementations.
+ */
+package org.apache.solr.cluster.placement.plugins;
\ No newline at end of file
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index 6480fa8..9e0a890 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -87,6 +87,7 @@
 import org.apache.solr.core.backup.repository.BackupRepository;
 import org.apache.solr.core.backup.repository.BackupRepositoryFactory;
 import org.apache.solr.filestore.PackageStoreAPI;
+import org.apache.solr.handler.ClusterAPI;
 import org.apache.solr.handler.RequestHandlerBase;
 import org.apache.solr.handler.SnapShooter;
 import org.apache.solr.handler.admin.CollectionsHandler;
@@ -719,6 +720,9 @@
     createHandler(ZK_PATH, ZookeeperInfoHandler.class.getName(), ZookeeperInfoHandler.class);
     createHandler(ZK_STATUS_PATH, ZookeeperStatusHandler.class.getName(), ZookeeperStatusHandler.class);
     collectionsHandler = createHandler(COLLECTIONS_HANDLER_PATH, cfg.getCollectionsHandlerClass(), CollectionsHandler.class);
+    ClusterAPI clusterAPI = new ClusterAPI(collectionsHandler);
+    containerHandlers.getApiBag().registerObject(clusterAPI);
+    containerHandlers.getApiBag().registerObject(clusterAPI.commands);
     /*
      * HealthCheckHandler needs to be initialized before InfoHandler, since the later one will call CoreContainer.getHealthCheckHandler().
      * We don't register the handler here because it'll be registered inside InfoHandler
diff --git a/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java b/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java
index 7a10f89..532f1dd 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java
@@ -16,8 +16,12 @@
  */
 package org.apache.solr.core;
 
-import com.google.common.annotations.VisibleForTesting;
-import java.io.*;
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
 import java.lang.invoke.MethodHandles;
 import java.lang.reflect.Constructor;
 import java.net.MalformedURLException;
@@ -30,14 +34,26 @@
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.PathMatcher;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableSet;
 import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.util.*;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.ResourceLoader;
+import org.apache.lucene.analysis.util.ResourceLoaderAware;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.apache.lucene.analysis.util.TokenizerFactory;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.PostingsFormat;
@@ -83,27 +99,6 @@
   private CoreContainer coreContainer;
   private PackageListeningClassLoader schemaLoader ;
 
-  private PackageListeningClassLoader createSchemaLoader() {
-    CoreContainer cc = getCoreContainer();
-    if (cc == null) {
-      //corecontainer not available . can't load from packages
-      return null;
-    }
-    return new PackageListeningClassLoader(cc, this, pkg -> {
-      if (getSolrConfig() == null) return null;
-      return getSolrConfig().maxPackageVersion(pkg);
-    }, () -> {
-      if(getCoreContainer() == null || config == null || coreName == null || coreId==null) return;
-      try (SolrCore c = getCoreContainer().getCore(coreName, coreId)) {
-        if (c != null) {
-          c.fetchLatestSchema();
-        }
-      }
-    });
-  }
-
-
-
   private final List<SolrCoreAware> waitingForCore = Collections.synchronizedList(new ArrayList<SolrCoreAware>());
   private final List<SolrInfoBean> infoMBeans = Collections.synchronizedList(new ArrayList<SolrInfoBean>());
   private final List<ResourceLoaderAware> waitingForResources = Collections.synchronizedList(new ArrayList<ResourceLoaderAware>());
@@ -476,8 +471,9 @@
         }
       }
     }
-
     Class<? extends T> clazz = null;
+    clazz = getPackageClass(cname, expectedType);
+    if(clazz != null) return clazz;
     try {
       // first try legacy analysis patterns, now replaced by Lucene's Analysis package:
       final Matcher m = legacyAnalysisPattern.matcher(cname);
@@ -540,6 +536,24 @@
     }
   }
 
+  private  <T> Class<? extends T> getPackageClass(String cname, Class<T> expectedType) {
+    PluginInfo.ClassName cName = PluginInfo.parseClassName(cname);
+    if (cName.pkg == null) return null;
+    ResourceLoaderAware aware = CURRENT_AWARE.get();
+    if (aware != null) {
+      //this is invoked from a component
+      //let's check if it's a schema component
+      @SuppressWarnings("rawtypes")
+      Class type = assertAwareCompatibility(ResourceLoaderAware.class, aware);
+      if (schemaResourceLoaderComponents.contains(type)) {
+        //this is a schema component
+        //lets use schema classloader
+        return getSchemaLoader().findClass(cname, expectedType);
+      }
+    }
+    return null;
+  }
+
   static final String[] empty = new String[0];
 
   @Override
@@ -679,7 +693,13 @@
       }
 
       for (ResourceLoaderAware aware : arr) {
-        aware.inform(loader);
+        CURRENT_AWARE.set(aware);
+        try{
+          aware.inform(loader);
+        } finally {
+          CURRENT_AWARE.remove();
+        }
+
       }
     }
   }
@@ -749,6 +769,7 @@
         ResourceLoaderAware.class, new Class<?>[]{
             // DO NOT ADD THINGS TO THIS LIST -- ESPECIALLY THINGS THAT CAN BE CREATED DYNAMICALLY
             // VIA RUNTIME APIS -- UNTILL CAREFULLY CONSIDERING THE ISSUES MENTIONED IN SOLR-8311
+            // evaluate if this must go into schemaResourceLoaderComponents
             CharFilterFactory.class,
             TokenFilterFactory.class,
             TokenizerFactory.class,
@@ -758,11 +779,21 @@
     );
   }
 
+  /**If these components are trying to load classes, use schema classloader
+   *
+   */
+  @SuppressWarnings("rawtypes")
+  private static final ImmutableSet<Class> schemaResourceLoaderComponents = ImmutableSet.of(
+      CharFilterFactory.class,
+      TokenFilterFactory.class,
+      TokenizerFactory.class,
+      FieldType.class);
+
   /**
    * Utility function to throw an exception if the class is invalid
    */
   @SuppressWarnings({"rawtypes"})
-  public static void assertAwareCompatibility(Class aware, Object obj) {
+  public static Class assertAwareCompatibility(Class aware, Object obj) {
     Class[] valid = awareCompatibility.get(aware);
     if (valid == null) {
       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
@@ -770,7 +801,7 @@
     }
     for (Class v : valid) {
       if (v.isInstance(obj)) {
-        return;
+        return v;
       }
     }
     StringBuilder builder = new StringBuilder();
@@ -800,6 +831,25 @@
     return Collections.unmodifiableList(infoMBeans);
   }
 
+  private PackageListeningClassLoader createSchemaLoader() {
+    CoreContainer cc = getCoreContainer();
+    if (cc == null) {
+      //corecontainer not available . can't load from packages
+      return null;
+    }
+    return new PackageListeningClassLoader(cc, this, pkg -> {
+      if (getSolrConfig() == null) return null;
+      return getSolrConfig().maxPackageVersion(pkg);
+    }, () -> {
+      if(getCoreContainer() == null || config == null || coreName == null || coreId==null) return;
+      try (SolrCore c = getCoreContainer().getCore(coreName, coreId)) {
+        if (c != null) {
+          c.fetchLatestSchema();
+        }
+      }
+    });
+  }
+
 
   public static void persistConfLocally(SolrResourceLoader loader, String resourceName, byte[] content) {
     // Persist locally
@@ -831,4 +881,7 @@
     }
   }
 
+  //This is to verify if this requires to use the schema classloader for classes loaded from packages
+  public static final ThreadLocal<ResourceLoaderAware> CURRENT_AWARE = new ThreadLocal<>();
+
 }
diff --git a/solr/core/src/java/org/apache/solr/handler/ClusterAPI.java b/solr/core/src/java/org/apache/solr/handler/ClusterAPI.java
new file mode 100644
index 0000000..5ce564e
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/handler/ClusterAPI.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.handler;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.solr.api.Command;
+import org.apache.solr.api.EndPoint;
+import org.apache.solr.api.PayloadObj;
+import org.apache.solr.client.solrj.request.beans.ClusterPropInfo;
+import org.apache.solr.cluster.placement.impl.PlacementPluginConfigImpl;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.annotation.JsonProperty;
+import org.apache.solr.common.cloud.ClusterProperties;
+import org.apache.solr.common.params.DefaultSolrParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.ReflectMapWriter;
+import org.apache.solr.common.util.Utils;
+import org.apache.solr.core.CoreContainer;
+import org.apache.solr.handler.admin.CollectionsHandler;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+
+import static org.apache.solr.client.solrj.SolrRequest.METHOD.DELETE;
+import static org.apache.solr.client.solrj.SolrRequest.METHOD.GET;
+import static org.apache.solr.client.solrj.SolrRequest.METHOD.POST;
+import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.REQUESTID;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDROLE;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.CLUSTERPROP;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.OVERSEERSTATUS;
+import static org.apache.solr.common.params.CollectionParams.CollectionAction.REMOVEROLE;
+import static org.apache.solr.security.PermissionNameProvider.Name.COLL_EDIT_PERM;
+import static org.apache.solr.security.PermissionNameProvider.Name.COLL_READ_PERM;
+
+public class ClusterAPI {
+  private final CoreContainer coreContainer;
+  private final CollectionsHandler collectionsHandler;
+
+  public  final Commands commands = new Commands();
+
+  public ClusterAPI(CollectionsHandler ch) {
+    this.collectionsHandler = ch;
+    this.coreContainer = ch.getCoreContainer();
+  }
+
+
+  @EndPoint(method = GET,
+      path = "/cluster/overseer",
+      permission = COLL_READ_PERM)
+  public void getOverseerStatus(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
+    coreContainer.getCollectionsHandler().handleRequestBody(wrapParams(req, "action", OVERSEERSTATUS.toString()), rsp);
+  }
+
+  @EndPoint(method = GET,
+      path = "/cluster",
+      permission = COLL_READ_PERM)
+  public void getCluster(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
+    CollectionsHandler.CollectionOperation.LIST_OP.execute(req, rsp, coreContainer.getCollectionsHandler());
+  }
+
+  @EndPoint(method = DELETE,
+      path = "/cluster/command-status/{id}",
+      permission = COLL_EDIT_PERM)
+  public void deleteCommandStatus(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
+    wrapParams(req, REQUESTID, req.getPathTemplateValues().get("id"));
+    CollectionsHandler.CollectionOperation.DELETESTATUS_OP.execute(req, rsp, coreContainer.getCollectionsHandler());
+  }
+
+  public static SolrQueryRequest wrapParams(SolrQueryRequest req, Object... def) {
+    Map<String, Object> m = Utils.makeMap(def);
+    return wrapParams(req, m);
+  }
+
+  @SuppressWarnings({"unchecked", "rawtypes"})
+  public static SolrQueryRequest wrapParams(SolrQueryRequest req, Map m) {
+    ModifiableSolrParams solrParams = new ModifiableSolrParams();
+    m.forEach((k, v) -> {
+      if(v == null) return;
+      solrParams.add(k.toString(), String.valueOf(v));
+    });
+    DefaultSolrParams dsp = new DefaultSolrParams(req.getParams(),solrParams);
+    req.setParams(dsp);
+    return req;
+  }
+
+  @EndPoint(method = GET,
+      path = "/cluster/command-status",
+      permission = COLL_READ_PERM)
+  public void getCommandStatus(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
+    CollectionsHandler.CollectionOperation.REQUESTSTATUS_OP.execute(req, rsp, collectionsHandler);
+  }
+
+  @EndPoint(method = GET,
+      path = "/cluster/nodes",
+      permission = COLL_READ_PERM)
+  public void getNodes(SolrQueryRequest req, SolrQueryResponse rsp) {
+    rsp.add("nodes", coreContainer.getZkController().getClusterState().getLiveNodes());
+  }
+
+  @EndPoint(method = POST,
+      path = "/cluster",
+      permission = COLL_EDIT_PERM)
+  public class Commands {
+    @Command(name = "add-role")
+    @SuppressWarnings({"rawtypes", "unchecked"})
+    public void addRole(PayloadObj<RoleInfo> obj) throws Exception {
+      RoleInfo info = obj.get();
+      Map m = info.toMap(new HashMap<>());
+      m.put("action", ADDROLE.toString());
+      collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(), m), obj.getResponse());
+    }
+
+    @Command(name = "remove-role")
+    @SuppressWarnings({"rawtypes", "unchecked"})
+    public void removeRole(PayloadObj<RoleInfo> obj) throws Exception {
+      RoleInfo info = obj.get();
+      Map m = info.toMap(new HashMap<>());
+      m.put("action", REMOVEROLE.toString());
+      collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(), m), obj.getResponse());
+    }
+
+    @Command(name = "set-obj-property")
+    @SuppressWarnings({"rawtypes", "unchecked"})
+    public void setObjProperty(PayloadObj<ClusterPropInfo> obj) {
+      //Not using the object directly here because the API differentiate between {name:null} and {}
+      Map m = obj.getDataMap();
+      ClusterProperties clusterProperties = new ClusterProperties(coreContainer.getZkController().getZkClient());
+      try {
+        clusterProperties.setClusterProperties(m);
+      } catch (Exception e) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error in API", e);
+      }
+    }
+
+    @Command(name = "set-property")
+    @SuppressWarnings({"rawtypes", "unchecked"})
+    public void setProperty(PayloadObj<Map<String,String>> obj) throws Exception {
+      Map m =  obj.get();
+      m.put("action", CLUSTERPROP.toString());
+      collectionsHandler.handleRequestBody(wrapParams(obj.getRequest(),m ), obj.getResponse());
+    }
+
+    @Command(name = "set-placement-plugin")
+    public void setPlacementPlugin(PayloadObj<Map<String, Object>> obj) {
+      Map<String, Object> placementPluginConfig = obj.getDataMap();
+      ClusterProperties clusterProperties = new ClusterProperties(coreContainer.getZkController().getZkClient());
+      // When the json contains { "set-placement-plugin" : null }, the map is empty, not null.
+      final boolean unset = placementPluginConfig.isEmpty();
+      // Very basic sanity check. Real validation will be done when the config is used...
+      if (!unset && !placementPluginConfig.containsKey(PlacementPluginConfigImpl.CONFIG_CLASS)) {
+        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Must contain " + PlacementPluginConfigImpl.CONFIG_CLASS + " attribute (or be null)");
+      }
+      try {
+        // Need to reset to null first otherwise the mappings in placementPluginConfig are added to existing ones
+        // in /clusterprops.json rather than replacing them. If removing the config, that's all we do.
+        clusterProperties.setClusterProperties(
+                Collections.singletonMap(PlacementPluginConfigImpl.PLACEMENT_PLUGIN_CONFIG_KEY, null));
+        if (!unset) {
+          clusterProperties.setClusterProperties(
+                  Collections.singletonMap(PlacementPluginConfigImpl.PLACEMENT_PLUGIN_CONFIG_KEY, placementPluginConfig));
+        }
+      } catch (Exception e) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error in API", e);
+      }
+    }
+  }
+
+  public static class RoleInfo implements ReflectMapWriter {
+    @JsonProperty(required = true)
+    public String node;
+    @JsonProperty(required = true)
+    public String role;
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java b/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
index 0160473..4a1aadb 100644
--- a/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/SolrConfigHandler.java
@@ -60,6 +60,7 @@
 import org.apache.solr.common.util.CommandOperation;
 import org.apache.solr.common.util.ExecutorUtil;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SolrNamedThreadFactory;
 import org.apache.solr.common.util.StrUtils;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.core.ConfigOverlay;
@@ -76,7 +77,6 @@
 import org.apache.solr.schema.SchemaManager;
 import org.apache.solr.security.AuthorizationContext;
 import org.apache.solr.security.PermissionNameProvider;
-import org.apache.solr.common.util.SolrNamedThreadFactory;
 import org.apache.solr.util.RTimer;
 import org.apache.solr.util.SolrPluginUtils;
 import org.apache.solr.util.plugin.SolrCoreAware;
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionHandlerApi.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionHandlerApi.java
index b63f7bd..fe856df 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionHandlerApi.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionHandlerApi.java
@@ -17,7 +17,6 @@
 
 package org.apache.solr.handler.admin;
 
-import java.lang.invoke.MethodHandles;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumMap;
@@ -28,18 +27,11 @@
 import org.apache.solr.client.solrj.request.CollectionApiMapping.CommandMeta;
 import org.apache.solr.client.solrj.request.CollectionApiMapping.Meta;
 import org.apache.solr.client.solrj.request.CollectionApiMapping.V2EndPoint;
-import org.apache.solr.common.Callable;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.ClusterProperties;
-import org.apache.solr.common.util.CommandOperation;
 import org.apache.solr.handler.admin.CollectionsHandler.CollectionOperation;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public class CollectionHandlerApi extends BaseHandlerApiSupport {
-  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
   final CollectionsHandler handler;
   static Collection<ApiCommand> apiCommands = createCollMapping();
@@ -64,55 +56,10 @@
         }
       }
     }
-    //The following APIs have only V2 implementations
-    addApi(result, Meta.GET_NODES, params -> params.rsp.add("nodes", ((CollectionHandlerApi) params.apiHandler).handler.coreContainer.getZkController().getClusterState().getLiveNodes()));
-    addApi(result, Meta.SET_CLUSTER_PROPERTY_OBJ, params -> {
-      List<CommandOperation> commands = params.req.getCommands(true);
-      if (commands == null || commands.isEmpty()) throw new RuntimeException("Empty commands");
-      ClusterProperties clusterProperties = new ClusterProperties(((CollectionHandlerApi) params.apiHandler).handler.coreContainer.getZkController().getZkClient());
-
-      try {
-        clusterProperties.setClusterProperties(commands.get(0).getDataMap());
-      } catch (Exception e) {
-        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error in API", e);
-      }
-    });
-
-    for (Meta meta : Meta.values()) {
-      if (result.get(meta) == null) {
-        log.error("ERROR_INIT. No corresponding API implementation for : {}", meta.commandName);
-      }
-    }
 
     return result.values();
   }
 
-  private static void addApi(Map<Meta, ApiCommand> result, Meta metaInfo, Callable<ApiParams> fun) {
-    result.put(metaInfo, new ApiCommand() {
-      @Override
-      public CommandMeta meta() {
-        return metaInfo;
-      }
-
-      @Override
-      public void invoke(SolrQueryRequest req, SolrQueryResponse rsp, BaseHandlerApiSupport apiHandler) throws Exception {
-        fun.call(new ApiParams(req, rsp, apiHandler));
-      }
-    });
-  }
-
-  static class ApiParams {
-    final SolrQueryRequest req;
-    final SolrQueryResponse rsp;
-    final BaseHandlerApiSupport apiHandler;
-
-    ApiParams(SolrQueryRequest req, SolrQueryResponse rsp, BaseHandlerApiSupport apiHandler) {
-      this.req = req;
-      this.rsp = rsp;
-      this.apiHandler = apiHandler;
-    }
-  }
-
   public CollectionHandlerApi(CollectionsHandler handler) {
     this.handler = handler;
   }
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
index 4ef43cd..3c9e37c 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java
@@ -16,6 +16,24 @@
  */
 package org.apache.solr.handler.admin;
 
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.stream.Collectors;
+
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableSet;
 import org.apache.commons.io.IOUtils;
@@ -83,24 +101,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.stream.Collectors;
-
 import static org.apache.solr.client.solrj.response.RequestStatusState.COMPLETED;
 import static org.apache.solr.client.solrj.response.RequestStatusState.FAILED;
 import static org.apache.solr.client.solrj.response.RequestStatusState.NOT_FOUND;
@@ -223,17 +223,7 @@
   @Override
   public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
     // Make sure the cores is enabled
-    CoreContainer cores = getCoreContainer();
-    if (cores == null) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
-          "Core container instance missing");
-    }
-
-    // Make sure that the core is ZKAware
-    if (!cores.isZooKeeperAware()) {
-      throw new SolrException(ErrorCode.BAD_REQUEST,
-          "Solr instance is not running in SolrCloud mode.");
-    }
+    CoreContainer cores = checkErrors();
 
     // Pick the action
     SolrParams params = req.getParams();
@@ -256,6 +246,21 @@
     rsp.setHttpCaching(false);
   }
 
+  protected CoreContainer checkErrors() {
+    CoreContainer cores = getCoreContainer();
+    if (cores == null) {
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          "Core container instance missing");
+    }
+
+    // Make sure that the core is ZKAware
+    if (!cores.isZooKeeperAware()) {
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          "Solr instance is not running in SolrCloud mode.");
+    }
+    return cores;
+  }
+
   @SuppressWarnings({"unchecked"})
   void invokeAction(SolrQueryRequest req, SolrQueryResponse rsp, CoreContainer cores, CollectionAction action, CollectionOperation operation) throws Exception {
     if (!coreContainer.isZooKeeperAware()) {
diff --git a/solr/core/src/java/org/apache/solr/parser/CharStream.java b/solr/core/src/java/org/apache/solr/parser/CharStream.java
index c83353b..2e295cf 100644
--- a/solr/core/src/java/org/apache/solr/parser/CharStream.java
+++ b/solr/core/src/java/org/apache/solr/parser/CharStream.java
@@ -1,4 +1,4 @@
-/* Generated By:JavaCC: Do not edit this line. CharStream.java Version 5.0 */
+/* Generated By:JavaCC: Do not edit this line. CharStream.java Version 7.0 */
 /* JavaCCOptions:STATIC=false,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */
 package org.apache.solr.parser;
 
@@ -111,5 +111,10 @@
    */
   void Done();
 
+
+  void setTabSize(int i);
+  int getTabSize();
+  boolean getTrackLineColumn();
+  void setTrackLineColumn(boolean trackLineColumn);
 }
-/* JavaCC - OriginalChecksum=48b70e7c01825c8f301c7362bf1028d8 (do not edit this line) */
+/* (filtered)*/
diff --git a/solr/core/src/java/org/apache/solr/parser/FastCharStream.java b/solr/core/src/java/org/apache/solr/parser/FastCharStream.java
index 9bb4a03..19ecade 100644
--- a/solr/core/src/java/org/apache/solr/parser/FastCharStream.java
+++ b/solr/core/src/java/org/apache/solr/parser/FastCharStream.java
@@ -145,4 +145,24 @@
   public final int getBeginLine() {
     return 1;
   }
+
+  @Override
+  public void setTabSize(int i) {
+    throw new RuntimeException("Tab size not implemented.");
+  }
+
+  @Override
+  public int getTabSize() {
+    throw new RuntimeException("Tab size not implemented.");
+  }
+
+  @Override
+  public boolean getTrackLineColumn() {
+    return false;
+  }
+
+  @Override
+  public void setTrackLineColumn(boolean trackLineColumn) {
+    throw new RuntimeException("Line/Column tracking not implemented.");
+  }
 }
diff --git a/solr/core/src/java/org/apache/solr/parser/ParseException.java b/solr/core/src/java/org/apache/solr/parser/ParseException.java
index 7732a7d..e36fdba 100644
--- a/solr/core/src/java/org/apache/solr/parser/ParseException.java
+++ b/solr/core/src/java/org/apache/solr/parser/ParseException.java
@@ -1,5 +1,5 @@
-/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 5.0 */
-/* JavaCCOptions:KEEP_LINE_COL=null */
+/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 7.0 */
+/* JavaCCOptions:KEEP_LINE_COLUMN=true */
 package org.apache.solr.parser;
 
 /**
@@ -21,6 +21,11 @@
   private static final long serialVersionUID = 1L;
 
   /**
+   * The end of line string for this machine.
+   */
+  protected static String EOL = System.getProperty("line.separator", "\n");
+
+  /**
    * This constructor is used by the method "generateParseException"
    * in the generated parser.  Calling this constructor generates
    * a new object of this type with the fields "currentToken",
@@ -88,7 +93,7 @@
   private static String initialise(Token currentToken,
                            int[][] expectedTokenSequences,
                            String[] tokenImage) {
-    String eol = System.getProperty("line.separator", "\n");
+
     StringBuilder expected = new StringBuilder();
     int maxSize = 0;
     for (int i = 0; i < expectedTokenSequences.length; i++) {
@@ -101,7 +106,7 @@
       if (expectedTokenSequences[i][expectedTokenSequences[i].length - 1] != 0) {
         expected.append("...");
       }
-      expected.append(eol).append("    ");
+      expected.append(EOL).append("    ");
     }
     String retval = "Encountered \"";
     Token tok = currentToken.next;
@@ -118,20 +123,23 @@
       tok = tok.next;
     }
     retval += "\" at line " + currentToken.next.beginLine + ", column " + currentToken.next.beginColumn;
-    retval += "." + eol;
-    if (expectedTokenSequences.length == 1) {
-      retval += "Was expecting:" + eol + "    ";
+    retval += "." + EOL;
+    
+    
+    if (expectedTokenSequences.length == 0) {
+        // Nothing to add here
     } else {
-      retval += "Was expecting one of:" + eol + "    ";
+        if (expectedTokenSequences.length == 1) {
+          retval += "Was expecting:" + EOL + "    ";
+        } else {
+          retval += "Was expecting one of:" + EOL + "    ";
+        }
+        retval += expected.toString();
     }
-    retval += expected.toString();
+    
     return retval;
   }
 
-  /**
-   * The end of line string for this machine.
-   */
-  protected String eol = System.getProperty("line.separator", "\n");
 
   /**
    * Used to convert raw characters to their escaped version
@@ -144,8 +152,6 @@
       for (int i = 0; i < str.length(); i++) {
         switch (str.charAt(i))
         {
-           case 0 :
-              continue;
            case '\b':
               retval.append("\\b");
               continue;
@@ -184,4 +190,4 @@
    }
 
 }
-/* JavaCC - OriginalChecksum=25e1ae9ad9614c4ce31c4b83f8a7397b (do not edit this line) */
+/* (filtered)*/
diff --git a/solr/core/src/java/org/apache/solr/parser/QueryParser.java b/solr/core/src/java/org/apache/solr/parser/QueryParser.java
index f2b792e..4293879 100644
--- a/solr/core/src/java/org/apache/solr/parser/QueryParser.java
+++ b/solr/core/src/java/org/apache/solr/parser/QueryParser.java
@@ -1,3 +1,4 @@
+/* QueryParser.java */
 /* Generated By:JavaCC: Do not edit this line. QueryParser.java */
 package org.apache.solr.parser;
 
@@ -66,84 +67,88 @@
 
 // *   Query  ::= ( Clause )*
 // *   Clause ::= ["+", "-"] [<TERM> ":"] ( <TERM> | "(" Query ")" )
-  final public int Conjunction() throws ParseException {
-  int ret = CONJ_NONE;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+  final public 
+int Conjunction() throws ParseException {int ret = CONJ_NONE;
+    switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
     case AND:
-    case OR:
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case AND:
+    case OR:{
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case AND:{
         jj_consume_token(AND);
-            ret = CONJ_AND;
+ret = CONJ_AND;
         break;
-      case OR:
+        }
+      case OR:{
         jj_consume_token(OR);
-              ret = CONJ_OR;
+ret = CONJ_OR;
         break;
+        }
       default:
         jj_la1[0] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
       break;
+      }
     default:
       jj_la1[1] = jj_gen;
       ;
     }
-    {if (true) return ret;}
+{if ("" != null) return ret;}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public int Modifiers() throws ParseException {
-  int ret = MOD_NONE;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+  final public int Modifiers() throws ParseException {int ret = MOD_NONE;
+    switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
     case NOT:
     case PLUS:
-    case MINUS:
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case PLUS:
+    case MINUS:{
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case PLUS:{
         jj_consume_token(PLUS);
-              ret = MOD_REQ;
+ret = MOD_REQ;
         break;
-      case MINUS:
+        }
+      case MINUS:{
         jj_consume_token(MINUS);
-                 ret = MOD_NOT;
+ret = MOD_NOT;
         break;
-      case NOT:
+        }
+      case NOT:{
         jj_consume_token(NOT);
-               ret = MOD_NOT;
+ret = MOD_NOT;
         break;
+        }
       default:
         jj_la1[2] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
       break;
+      }
     default:
       jj_la1[3] = jj_gen;
       ;
     }
-    {if (true) return ret;}
+{if ("" != null) return ret;}
     throw new Error("Missing return statement in function");
-  }
+}
 
 // This makes sure that there is no garbage after the query string
-  final public Query TopLevelQuery(String field) throws ParseException, SyntaxError {
-  Query q;
+  final public Query TopLevelQuery(String field) throws ParseException, SyntaxError {Query q;
     q = Query(field);
     jj_consume_token(0);
-    {if (true) return q;}
+{if ("" != null) return q;}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public Query Query(String field) throws ParseException, SyntaxError {
-  List<BooleanClause> clauses = new ArrayList<BooleanClause>();
+  final public Query Query(String field) throws ParseException, SyntaxError {List<BooleanClause> clauses = new ArrayList<BooleanClause>();
   Query q;
   int conj, mods;
     if (jj_2_1(2)) {
       MultiTerm(field, clauses);
     } else {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
       case NOT:
       case PLUS:
       case MINUS:
@@ -159,11 +164,12 @@
       case RANGEEX_START:
       case LPARAMS:
       case FILTER:
-      case NUMBER:
+      case NUMBER:{
         mods = Modifiers();
         q = Clause(field);
-        addClause(clauses, CONJ_NONE, mods, q);
+addClause(clauses, CONJ_NONE, mods, q);
         break;
+        }
       default:
         jj_la1[4] = jj_gen;
         jj_consume_token(-1);
@@ -172,7 +178,7 @@
     }
     label_1:
     while (true) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
       case AND:
       case OR:
       case NOT:
@@ -190,9 +196,10 @@
       case RANGEEX_START:
       case LPARAMS:
       case FILTER:
-      case NUMBER:
+      case NUMBER:{
         ;
         break;
+        }
       default:
         jj_la1[5] = jj_gen;
         break label_1;
@@ -200,7 +207,7 @@
       if (jj_2_2(2)) {
         MultiTerm(field, clauses);
       } else {
-        switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+        switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
         case AND:
         case OR:
         case NOT:
@@ -218,12 +225,13 @@
         case RANGEEX_START:
         case LPARAMS:
         case FILTER:
-        case NUMBER:
+        case NUMBER:{
           conj = Conjunction();
           mods = Modifiers();
           q = Clause(field);
-        addClause(clauses, conj, mods, q);
+addClause(clauses, conj, mods, q);
           break;
+          }
         default:
           jj_la1[6] = jj_gen;
           jj_consume_token(-1);
@@ -231,33 +239,34 @@
         }
       }
     }
-    if (clauses.size() == 1 && clauses.get(0).getOccur() == BooleanClause.Occur.SHOULD) {
+if (clauses.size() == 1 && clauses.get(0).getOccur() == BooleanClause.Occur.SHOULD) {
       Query firstQuery = clauses.get(0).getQuery();
       if ( ! (firstQuery instanceof RawQuery) || ((RawQuery)firstQuery).getTermCount() == 1) {
-        {if (true) return rawToNormal(firstQuery);}
+        {if ("" != null) return rawToNormal(firstQuery);}
       }
     }
-    {if (true) return getBooleanQuery(clauses);}
+    {if ("" != null) return getBooleanQuery(clauses);}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public Query Clause(String field) throws ParseException, SyntaxError {
-  Query q;
+  final public Query Clause(String field) throws ParseException, SyntaxError {Query q;
   Token fieldToken=null, boost=null;
   Token localParams=null;
   int flags = 0;
     if (jj_2_3(2)) {
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case TERM:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case TERM:{
         fieldToken = jj_consume_token(TERM);
         jj_consume_token(COLON);
-                                  field = discardEscapeChar(fieldToken.image);
+field = discardEscapeChar(fieldToken.image);
         break;
-      case STAR:
+        }
+      case STAR:{
         jj_consume_token(STAR);
         jj_consume_token(COLON);
-                         field = "*";
+field = "*";
         break;
+        }
       default:
         jj_la1[7] = jj_gen;
         jj_consume_token(-1);
@@ -266,7 +275,7 @@
     } else {
       ;
     }
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+    switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
     case BAREOPER:
     case STAR:
     case QUOTED:
@@ -276,63 +285,69 @@
     case REGEXPTERM:
     case RANGEIN_START:
     case RANGEEX_START:
-    case NUMBER:
+    case NUMBER:{
       q = Term(field);
       break;
-    case LPAREN:
+      }
+    case LPAREN:{
       jj_consume_token(LPAREN);
       q = Query(field);
       jj_consume_token(RPAREN);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case CARAT:{
         jj_consume_token(CARAT);
         boost = jj_consume_token(NUMBER);
         break;
+        }
       default:
         jj_la1[8] = jj_gen;
         ;
       }
       break;
-    case FILTER:
+      }
+    case FILTER:{
       jj_consume_token(FILTER);
-                 flags=startFilter();
+flags=startFilter();
       q = Query(field);
       jj_consume_token(RPAREN);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case CARAT:{
         jj_consume_token(CARAT);
         boost = jj_consume_token(NUMBER);
         break;
+        }
       default:
         jj_la1[9] = jj_gen;
         ;
       }
-                                                                                             q=getFilter(q); restoreFlags(flags);
+q=getFilter(q); restoreFlags(flags);
       break;
-    case LPARAMS:
+      }
+    case LPARAMS:{
       localParams = jj_consume_token(LPARAMS);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case CARAT:{
         jj_consume_token(CARAT);
         boost = jj_consume_token(NUMBER);
         break;
+        }
       default:
         jj_la1[10] = jj_gen;
         ;
       }
-                                                           q=getLocalParams(field, localParams.image);
+q=getLocalParams(field, localParams.image);
       break;
+      }
     default:
       jj_la1[11] = jj_gen;
       jj_consume_token(-1);
       throw new ParseException();
     }
-    {if (true) return handleBoost(q, boost);}
+{if ("" != null) return handleBoost(q, boost);}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public Query Term(String field) throws ParseException, SyntaxError {
-  Token term, boost=null, fuzzySlop=null, goop1, goop2;
+  final public Query Term(String field) throws ParseException, SyntaxError {Token term, boost=null, fuzzySlop=null, goop1, goop2;
   boolean prefix = false;
   boolean wildcard = false;
   boolean fuzzy = false;
@@ -340,157 +355,181 @@
   boolean startInc=false;
   boolean endInc=false;
   Query q;
-    switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+    switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
     case BAREOPER:
     case STAR:
     case TERM:
     case PREFIXTERM:
     case WILDTERM:
     case REGEXPTERM:
-    case NUMBER:
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case TERM:
+    case NUMBER:{
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case TERM:{
         term = jj_consume_token(TERM);
         break;
-      case STAR:
+        }
+      case STAR:{
         term = jj_consume_token(STAR);
-                      wildcard=true;
+wildcard=true;
         break;
-      case PREFIXTERM:
+        }
+      case PREFIXTERM:{
         term = jj_consume_token(PREFIXTERM);
-                            prefix=true;
+prefix=true;
         break;
-      case WILDTERM:
+        }
+      case WILDTERM:{
         term = jj_consume_token(WILDTERM);
-                          wildcard=true;
+wildcard=true;
         break;
-      case REGEXPTERM:
+        }
+      case REGEXPTERM:{
         term = jj_consume_token(REGEXPTERM);
-                            regexp=true;
+regexp=true;
         break;
-      case NUMBER:
+        }
+      case NUMBER:{
         term = jj_consume_token(NUMBER);
         break;
-      case BAREOPER:
+        }
+      case BAREOPER:{
         term = jj_consume_token(BAREOPER);
-                          term.image = term.image.substring(0,1);
+term.image = term.image.substring(0,1);
         break;
+        }
       default:
         jj_la1[12] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
       case CARAT:
-      case FUZZY_SLOP:
-        switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-        case CARAT:
+      case FUZZY_SLOP:{
+        switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+        case CARAT:{
           jj_consume_token(CARAT);
           boost = jj_consume_token(NUMBER);
-          switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-          case FUZZY_SLOP:
+          switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+          case FUZZY_SLOP:{
             fuzzySlop = jj_consume_token(FUZZY_SLOP);
-                                                        fuzzy=true;
+fuzzy=true;
             break;
+            }
           default:
             jj_la1[13] = jj_gen;
             ;
           }
           break;
-        case FUZZY_SLOP:
+          }
+        case FUZZY_SLOP:{
           fuzzySlop = jj_consume_token(FUZZY_SLOP);
-                                 fuzzy=true;
-          switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-          case CARAT:
+fuzzy=true;
+          switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+          case CARAT:{
             jj_consume_token(CARAT);
             boost = jj_consume_token(NUMBER);
             break;
+            }
           default:
             jj_la1[14] = jj_gen;
             ;
           }
           break;
+          }
         default:
           jj_la1[15] = jj_gen;
           jj_consume_token(-1);
           throw new ParseException();
         }
         break;
+        }
       default:
         jj_la1[16] = jj_gen;
         ;
       }
-      q = handleBareTokenQuery(getField(field), term, fuzzySlop, prefix, wildcard, fuzzy, regexp);
+q = handleBareTokenQuery(getField(field), term, fuzzySlop, prefix, wildcard, fuzzy, regexp);
       break;
+      }
     case RANGEIN_START:
-    case RANGEEX_START:
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGEIN_START:
+    case RANGEEX_START:{
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case RANGEIN_START:{
         jj_consume_token(RANGEIN_START);
-                        startInc = true;
+startInc = true;
         break;
-      case RANGEEX_START:
+        }
+      case RANGEEX_START:{
         jj_consume_token(RANGEEX_START);
         break;
+        }
       default:
         jj_la1[17] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGE_GOOP:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case RANGE_GOOP:{
         goop1 = jj_consume_token(RANGE_GOOP);
         break;
-      case RANGE_QUOTED:
+        }
+      case RANGE_QUOTED:{
         goop1 = jj_consume_token(RANGE_QUOTED);
         break;
-      case RANGE_TO:
+        }
+      case RANGE_TO:{
         goop1 = jj_consume_token(RANGE_TO);
         break;
+        }
       default:
         jj_la1[18] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
       jj_consume_token(RANGE_TO);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGE_GOOP:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case RANGE_GOOP:{
         goop2 = jj_consume_token(RANGE_GOOP);
         break;
-      case RANGE_QUOTED:
+        }
+      case RANGE_QUOTED:{
         goop2 = jj_consume_token(RANGE_QUOTED);
         break;
-      case RANGE_TO:
+        }
+      case RANGE_TO:{
         goop2 = jj_consume_token(RANGE_TO);
         break;
+        }
       default:
         jj_la1[19] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case RANGEIN_END:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case RANGEIN_END:{
         jj_consume_token(RANGEIN_END);
-                      endInc = true;
+endInc = true;
         break;
-      case RANGEEX_END:
+        }
+      case RANGEEX_END:{
         jj_consume_token(RANGEEX_END);
         break;
+        }
       default:
         jj_la1[20] = jj_gen;
         jj_consume_token(-1);
         throw new ParseException();
       }
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-      case CARAT:
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+      case CARAT:{
         jj_consume_token(CARAT);
         boost = jj_consume_token(NUMBER);
         break;
+        }
       default:
         jj_la1[21] = jj_gen;
         ;
       }
-      boolean startOpen=false;
+boolean startOpen=false;
       boolean endOpen=false;
       if (goop1.kind == RANGE_QUOTED) {
         goop1.image = goop1.image.substring(1, goop1.image.length()-1);
@@ -506,64 +545,70 @@
                         startOpen ? null : discardEscapeChar(goop1.image),
                         endOpen ? null : discardEscapeChar(goop2.image), startInc, endInc);
       break;
-    case QUOTED:
+      }
+    case QUOTED:{
       term = jj_consume_token(QUOTED);
-      switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
+      switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
       case CARAT:
-      case FUZZY_SLOP:
-        switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-        case CARAT:
+      case FUZZY_SLOP:{
+        switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+        case CARAT:{
           jj_consume_token(CARAT);
           boost = jj_consume_token(NUMBER);
-          switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-          case FUZZY_SLOP:
+          switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+          case FUZZY_SLOP:{
             fuzzySlop = jj_consume_token(FUZZY_SLOP);
-                                                        fuzzy=true;
+fuzzy=true;
             break;
+            }
           default:
             jj_la1[22] = jj_gen;
             ;
           }
           break;
-        case FUZZY_SLOP:
+          }
+        case FUZZY_SLOP:{
           fuzzySlop = jj_consume_token(FUZZY_SLOP);
-                                 fuzzy=true;
-          switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
-          case CARAT:
+fuzzy=true;
+          switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) {
+          case CARAT:{
             jj_consume_token(CARAT);
             boost = jj_consume_token(NUMBER);
             break;
+            }
           default:
             jj_la1[23] = jj_gen;
             ;
           }
           break;
+          }
         default:
           jj_la1[24] = jj_gen;
           jj_consume_token(-1);
           throw new ParseException();
         }
         break;
+        }
       default:
         jj_la1[25] = jj_gen;
         ;
       }
-      q = handleQuotedTerm(getField(field), term, fuzzySlop);
+q = handleQuotedTerm(getField(field), term, fuzzySlop);
       break;
+      }
     default:
       jj_la1[26] = jj_gen;
       jj_consume_token(-1);
       throw new ParseException();
     }
-    {if (true) return handleBoost(q, boost);}
+{if ("" != null) return handleBoost(q, boost);}
     throw new Error("Missing return statement in function");
-  }
+}
 
-  final public void MultiTerm(String field, List<BooleanClause> clauses) throws ParseException, SyntaxError {
-  Token text;
+  final public void MultiTerm(String field, List<BooleanClause> clauses) throws ParseException, SyntaxError {Token text;
   List<String> terms = null;
     text = jj_consume_token(TERM);
-    if (splitOnWhitespace) {
+if (splitOnWhitespace) {
       Query q = getFieldQuery(getField(field), discardEscapeChar(text.image), false, true);
       addClause(clauses, CONJ_NONE, MOD_NONE, q);
     } else {
@@ -579,7 +624,7 @@
     label_2:
     while (true) {
       text = jj_consume_token(TERM);
-      if (splitOnWhitespace) {
+if (splitOnWhitespace) {
         Query q = getFieldQuery(getField(field), discardEscapeChar(text.image), false, true);
         addClause(clauses, CONJ_NONE, MOD_NONE, q);
       } else {
@@ -591,38 +636,43 @@
         break label_2;
       }
     }
-    if (splitOnWhitespace == false) {
+if (splitOnWhitespace == false) {
       Query q = getFieldQuery(getField(field), terms, true);
       addMultiTermClause(clauses, q);
     }
-  }
+}
 
-  private boolean jj_2_1(int xla) {
+  private boolean jj_2_1(int xla)
+ {
     jj_la = xla; jj_lastpos = jj_scanpos = token;
-    try { return !jj_3_1(); }
+    try { return (!jj_3_1()); }
     catch(LookaheadSuccess ls) { return true; }
     finally { jj_save(0, xla); }
   }
 
-  private boolean jj_2_2(int xla) {
+  private boolean jj_2_2(int xla)
+ {
     jj_la = xla; jj_lastpos = jj_scanpos = token;
-    try { return !jj_3_2(); }
+    try { return (!jj_3_2()); }
     catch(LookaheadSuccess ls) { return true; }
     finally { jj_save(1, xla); }
   }
 
-  private boolean jj_2_3(int xla) {
+  private boolean jj_2_3(int xla)
+ {
     jj_la = xla; jj_lastpos = jj_scanpos = token;
-    try { return !jj_3_3(); }
+    try { return (!jj_3_3()); }
     catch(LookaheadSuccess ls) { return true; }
     finally { jj_save(2, xla); }
   }
 
-  private boolean jj_3R_6() {
+  private boolean jj_3R_6()
+ {
     return false;
   }
 
-  private boolean jj_3R_3() {
+  private boolean jj_3R_3()
+ {
     if (jj_scan_token(TERM)) return true;
     jj_lookingAhead = true;
     jj_semLA = getToken(1).kind == TERM && allowedPostMultiTerm(getToken(2).kind);
@@ -637,7 +687,8 @@
     return false;
   }
 
-  private boolean jj_3_3() {
+  private boolean jj_3_3()
+ {
     Token xsp;
     xsp = jj_scanpos;
     if (jj_3R_4()) {
@@ -647,29 +698,34 @@
     return false;
   }
 
-  private boolean jj_3_2() {
+  private boolean jj_3_2()
+ {
     if (jj_3R_3()) return true;
     return false;
   }
 
-  private boolean jj_3R_5() {
+  private boolean jj_3R_5()
+ {
     if (jj_scan_token(STAR)) return true;
     if (jj_scan_token(COLON)) return true;
     return false;
   }
 
-  private boolean jj_3R_7() {
+  private boolean jj_3R_7()
+ {
     if (jj_scan_token(TERM)) return true;
     return false;
   }
 
-  private boolean jj_3R_4() {
+  private boolean jj_3R_4()
+ {
     if (jj_scan_token(TERM)) return true;
     if (jj_scan_token(COLON)) return true;
     return false;
   }
 
-  private boolean jj_3_1() {
+  private boolean jj_3_1()
+ {
     if (jj_3R_3()) return true;
     return false;
   }
@@ -691,132 +747,133 @@
   static private int[] jj_la1_0;
   static private int[] jj_la1_1;
   static {
-      jj_la1_init_0();
-      jj_la1_init_1();
-   }
-   private static void jj_la1_init_0() {
-      jj_la1_0 = new int[] {0x6000,0x6000,0x38000,0x38000,0xfb4f8000,0xfb4fe000,0xfb4fe000,0x2400000,0x800000,0x800000,0x800000,0xfb4c0000,0x3a440000,0x4000000,0x800000,0x4800000,0x4800000,0xc0000000,0x0,0x0,0x0,0x800000,0x4000000,0x800000,0x4800000,0x4800000,0xfb440000,};
-   }
-   private static void jj_la1_init_1() {
-      jj_la1_1 = new int[] {0x0,0x0,0x0,0x0,0x7,0x7,0x7,0x0,0x0,0x0,0x0,0x7,0x4,0x0,0x0,0x0,0x0,0x0,0xc8,0xc8,0x30,0x0,0x0,0x0,0x0,0x0,0x4,};
-   }
+       jj_la1_init_0();
+       jj_la1_init_1();
+    }
+    private static void jj_la1_init_0() {
+       jj_la1_0 = new int[] {0x6000,0x6000,0x38000,0x38000,0xfb4f8000,0xfb4fe000,0xfb4fe000,0x2400000,0x800000,0x800000,0x800000,0xfb4c0000,0x3a440000,0x4000000,0x800000,0x4800000,0x4800000,0xc0000000,0x0,0x0,0x0,0x800000,0x4000000,0x800000,0x4800000,0x4800000,0xfb440000,};
+    }
+    private static void jj_la1_init_1() {
+       jj_la1_1 = new int[] {0x0,0x0,0x0,0x0,0x7,0x7,0x7,0x0,0x0,0x0,0x0,0x7,0x4,0x0,0x0,0x0,0x0,0x0,0xc8,0xc8,0x30,0x0,0x0,0x0,0x0,0x0,0x4,};
+    }
   final private JJCalls[] jj_2_rtns = new JJCalls[3];
   private boolean jj_rescan = false;
   private int jj_gc = 0;
 
   /** Constructor with user supplied CharStream. */
   protected QueryParser(CharStream stream) {
-    token_source = new QueryParserTokenManager(stream);
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 27; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source = new QueryParserTokenManager(stream);
+     token = new Token();
+     jj_ntk = -1;
+     jj_gen = 0;
+     for (int i = 0; i < 27; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   /** Reinitialise. */
   public void ReInit(CharStream stream) {
-    token_source.ReInit(stream);
-    token = new Token();
-    jj_ntk = -1;
-    jj_lookingAhead = false;
-    jj_gen = 0;
-    for (int i = 0; i < 27; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source.ReInit(stream);
+     token = new Token();
+     jj_ntk = -1;
+     jj_lookingAhead = false;
+     jj_gen = 0;
+     for (int i = 0; i < 27; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   /** Constructor with generated Token Manager. */
   protected QueryParser(QueryParserTokenManager tm) {
-    token_source = tm;
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 27; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source = tm;
+     token = new Token();
+     jj_ntk = -1;
+     jj_gen = 0;
+     for (int i = 0; i < 27; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   /** Reinitialise. */
   public void ReInit(QueryParserTokenManager tm) {
-    token_source = tm;
-    token = new Token();
-    jj_ntk = -1;
-    jj_gen = 0;
-    for (int i = 0; i < 27; i++) jj_la1[i] = -1;
-    for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
+     token_source = tm;
+     token = new Token();
+     jj_ntk = -1;
+     jj_gen = 0;
+     for (int i = 0; i < 27; i++) jj_la1[i] = -1;
+     for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
   }
 
   private Token jj_consume_token(int kind) throws ParseException {
-    Token oldToken;
-    if ((oldToken = token).next != null) token = token.next;
-    else token = token.next = token_source.getNextToken();
-    jj_ntk = -1;
-    if (token.kind == kind) {
-      jj_gen++;
-      if (++jj_gc > 100) {
-        jj_gc = 0;
-        for (int i = 0; i < jj_2_rtns.length; i++) {
-          JJCalls c = jj_2_rtns[i];
-          while (c != null) {
-            if (c.gen < jj_gen) c.first = null;
-            c = c.next;
-          }
-        }
-      }
-      return token;
-    }
-    token = oldToken;
-    jj_kind = kind;
-    throw generateParseException();
+     Token oldToken;
+     if ((oldToken = token).next != null) token = token.next;
+     else token = token.next = token_source.getNextToken();
+     jj_ntk = -1;
+     if (token.kind == kind) {
+       jj_gen++;
+       if (++jj_gc > 100) {
+         jj_gc = 0;
+         for (int i = 0; i < jj_2_rtns.length; i++) {
+           JJCalls c = jj_2_rtns[i];
+           while (c != null) {
+             if (c.gen < jj_gen) c.first = null;
+             c = c.next;
+           }
+         }
+       }
+       return token;
+     }
+     token = oldToken;
+     jj_kind = kind;
+     throw generateParseException();
   }
 
+  @SuppressWarnings("serial")
   static private final class LookaheadSuccess extends java.lang.Error { }
   static final private LookaheadSuccess jj_ls = new LookaheadSuccess();
   private boolean jj_scan_token(int kind) {
-    if (jj_scanpos == jj_lastpos) {
-      jj_la--;
-      if (jj_scanpos.next == null) {
-        jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.getNextToken();
-      } else {
-        jj_lastpos = jj_scanpos = jj_scanpos.next;
-      }
-    } else {
-      jj_scanpos = jj_scanpos.next;
-    }
-    if (jj_rescan) {
-      int i = 0; Token tok = token;
-      while (tok != null && tok != jj_scanpos) { i++; tok = tok.next; }
-      if (tok != null) jj_add_error_token(kind, i);
-    }
-    if (jj_scanpos.kind != kind) return true;
-    if (jj_la == 0 && jj_scanpos == jj_lastpos) throw jj_ls;
-    return false;
+     if (jj_scanpos == jj_lastpos) {
+       jj_la--;
+       if (jj_scanpos.next == null) {
+         jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.getNextToken();
+       } else {
+         jj_lastpos = jj_scanpos = jj_scanpos.next;
+       }
+     } else {
+       jj_scanpos = jj_scanpos.next;
+     }
+     if (jj_rescan) {
+       int i = 0; Token tok = token;
+       while (tok != null && tok != jj_scanpos) { i++; tok = tok.next; }
+       if (tok != null) jj_add_error_token(kind, i);
+     }
+     if (jj_scanpos.kind != kind) return true;
+     if (jj_la == 0 && jj_scanpos == jj_lastpos) throw jj_ls;
+     return false;
   }
 
 
 /** Get the next Token. */
   final public Token getNextToken() {
-    if (token.next != null) token = token.next;
-    else token = token.next = token_source.getNextToken();
-    jj_ntk = -1;
-    jj_gen++;
-    return token;
+     if (token.next != null) token = token.next;
+     else token = token.next = token_source.getNextToken();
+     jj_ntk = -1;
+     jj_gen++;
+     return token;
   }
 
 /** Get the specific Token. */
   final public Token getToken(int index) {
-    Token t = jj_lookingAhead ? jj_scanpos : token;
-    for (int i = 0; i < index; i++) {
-      if (t.next != null) t = t.next;
-      else t = t.next = token_source.getNextToken();
-    }
-    return t;
+     Token t = jj_lookingAhead ? jj_scanpos : token;
+     for (int i = 0; i < index; i++) {
+       if (t.next != null) t = t.next;
+       else t = t.next = token_source.getNextToken();
+     }
+     return t;
   }
 
-  private int jj_ntk() {
-    if ((jj_nt=token.next) == null)
-      return (jj_ntk = (token.next=token_source.getNextToken()).kind);
-    else
-      return (jj_ntk = jj_nt.kind);
+  private int jj_ntk_f() {
+     if ((jj_nt=token.next) == null)
+       return (jj_ntk = (token.next=token_source.getNextToken()).kind);
+     else
+       return (jj_ntk = jj_nt.kind);
   }
 
   private java.util.List<int[]> jj_expentries = new java.util.ArrayList<int[]>();
@@ -826,65 +883,86 @@
   private int jj_endpos;
 
   private void jj_add_error_token(int kind, int pos) {
-    if (pos >= 100) return;
-    if (pos == jj_endpos + 1) {
-      jj_lasttokens[jj_endpos++] = kind;
-    } else if (jj_endpos != 0) {
-      jj_expentry = new int[jj_endpos];
-      for (int i = 0; i < jj_endpos; i++) {
-        jj_expentry[i] = jj_lasttokens[i];
-      }
-      jj_entries_loop: for (java.util.Iterator<?> it = jj_expentries.iterator(); it.hasNext();) {
-        int[] oldentry = (int[])(it.next());
-        if (oldentry.length == jj_expentry.length) {
-          for (int i = 0; i < jj_expentry.length; i++) {
-            if (oldentry[i] != jj_expentry[i]) {
-              continue jj_entries_loop;
-            }
-          }
-          jj_expentries.add(jj_expentry);
-          break jj_entries_loop;
-        }
-      }
-      if (pos != 0) jj_lasttokens[(jj_endpos = pos) - 1] = kind;
-    }
+     if (pos >= 100) {
+        return;
+     }
+
+     if (pos == jj_endpos + 1) {
+       jj_lasttokens[jj_endpos++] = kind;
+     } else if (jj_endpos != 0) {
+       jj_expentry = new int[jj_endpos];
+
+       for (int i = 0; i < jj_endpos; i++) {
+         jj_expentry[i] = jj_lasttokens[i];
+       }
+
+       for (int[] oldentry : jj_expentries) {
+         if (oldentry.length == jj_expentry.length) {
+           boolean isMatched = true;
+
+           for (int i = 0; i < jj_expentry.length; i++) {
+             if (oldentry[i] != jj_expentry[i]) {
+               isMatched = false;
+               break;
+             }
+
+           }
+           if (isMatched) {
+             jj_expentries.add(jj_expentry);
+             break;
+           }
+         }
+       }
+
+       if (pos != 0) {
+         jj_lasttokens[(jj_endpos = pos) - 1] = kind;
+       }
+     }
   }
 
   /** Generate ParseException. */
   public ParseException generateParseException() {
-    jj_expentries.clear();
-    boolean[] la1tokens = new boolean[40];
-    if (jj_kind >= 0) {
-      la1tokens[jj_kind] = true;
-      jj_kind = -1;
-    }
-    for (int i = 0; i < 27; i++) {
-      if (jj_la1[i] == jj_gen) {
-        for (int j = 0; j < 32; j++) {
-          if ((jj_la1_0[i] & (1<<j)) != 0) {
-            la1tokens[j] = true;
-          }
-          if ((jj_la1_1[i] & (1<<j)) != 0) {
-            la1tokens[32+j] = true;
-          }
-        }
-      }
-    }
-    for (int i = 0; i < 40; i++) {
-      if (la1tokens[i]) {
-        jj_expentry = new int[1];
-        jj_expentry[0] = i;
-        jj_expentries.add(jj_expentry);
-      }
-    }
-    jj_endpos = 0;
-    jj_rescan_token();
-    jj_add_error_token(0, 0);
-    int[][] exptokseq = new int[jj_expentries.size()][];
-    for (int i = 0; i < jj_expentries.size(); i++) {
-      exptokseq[i] = jj_expentries.get(i);
-    }
-    return new ParseException(token, exptokseq, tokenImage);
+     jj_expentries.clear();
+     boolean[] la1tokens = new boolean[40];
+     if (jj_kind >= 0) {
+       la1tokens[jj_kind] = true;
+       jj_kind = -1;
+     }
+     for (int i = 0; i < 27; i++) {
+       if (jj_la1[i] == jj_gen) {
+         for (int j = 0; j < 32; j++) {
+           if ((jj_la1_0[i] & (1<<j)) != 0) {
+             la1tokens[j] = true;
+           }
+           if ((jj_la1_1[i] & (1<<j)) != 0) {
+             la1tokens[32+j] = true;
+           }
+         }
+       }
+     }
+     for (int i = 0; i < 40; i++) {
+       if (la1tokens[i]) {
+         jj_expentry = new int[1];
+         jj_expentry[0] = i;
+         jj_expentries.add(jj_expentry);
+       }
+     }
+     jj_endpos = 0;
+     jj_rescan_token();
+     jj_add_error_token(0, 0);
+     int[][] exptokseq = new int[jj_expentries.size()][];
+     for (int i = 0; i < jj_expentries.size(); i++) {
+       exptokseq[i] = jj_expentries.get(i);
+     }
+     return new ParseException(token, exptokseq, tokenImage);
+  }
+
+  private int trace_indent = 0;
+  private boolean trace_enabled;
+
+/** Trace enabled. */
+  final public boolean trace_enabled() {
+     return trace_enabled;
   }
 
   /** Enable tracing. */
@@ -896,40 +974,45 @@
   }
 
   private void jj_rescan_token() {
-    jj_rescan = true;
-    for (int i = 0; i < 3; i++) {
-    try {
-      JJCalls p = jj_2_rtns[i];
-      do {
-        if (p.gen > jj_gen) {
-          jj_la = p.arg; jj_lastpos = jj_scanpos = p.first;
-          switch (i) {
-            case 0: jj_3_1(); break;
-            case 1: jj_3_2(); break;
-            case 2: jj_3_3(); break;
-          }
-        }
-        p = p.next;
-      } while (p != null);
-      } catch(LookaheadSuccess ls) { }
-    }
-    jj_rescan = false;
+     jj_rescan = true;
+     for (int i = 0; i < 3; i++) {
+       try {
+         JJCalls p = jj_2_rtns[i];
+
+         do {
+           if (p.gen > jj_gen) {
+             jj_la = p.arg; jj_lastpos = jj_scanpos = p.first;
+             switch (i) {
+               case 0: jj_3_1(); break;
+               case 1: jj_3_2(); break;
+               case 2: jj_3_3(); break;
+             }
+           }
+           p = p.next;
+         } while (p != null);
+
+         } catch(LookaheadSuccess ls) { }
+     }
+     jj_rescan = false;
   }
 
   private void jj_save(int index, int xla) {
-    JJCalls p = jj_2_rtns[index];
-    while (p.gen > jj_gen) {
-      if (p.next == null) { p = p.next = new JJCalls(); break; }
-      p = p.next;
-    }
-    p.gen = jj_gen + xla - jj_la; p.first = token; p.arg = xla;
+     JJCalls p = jj_2_rtns[index];
+     while (p.gen > jj_gen) {
+       if (p.next == null) { p = p.next = new JJCalls(); break; }
+       p = p.next;
+     }
+
+     p.gen = jj_gen + xla - jj_la; 
+     p.first = token;
+     p.arg = xla;
   }
 
   static final class JJCalls {
-    int gen;
-    Token first;
-    int arg;
-    JJCalls next;
+     int gen;
+     Token first;
+     int arg;
+     JJCalls next;
   }
 
 }
diff --git a/solr/core/src/java/org/apache/solr/parser/QueryParserTokenManager.java b/solr/core/src/java/org/apache/solr/parser/QueryParserTokenManager.java
index cf0c02f..c717057 100644
--- a/solr/core/src/java/org/apache/solr/parser/QueryParserTokenManager.java
+++ b/solr/core/src/java/org/apache/solr/parser/QueryParserTokenManager.java
@@ -1,13 +1,27 @@
+/* QueryParserTokenManager.java */
 /* Generated By:JavaCC: Do not edit this line. QueryParserTokenManager.java */
 package org.apache.solr.parser;
+
+
+
+
+
+
+
+
+
+
+
+
 /** Token Manager. */
-public class QueryParserTokenManager implements QueryParserConstants
-{
+public class QueryParserTokenManager implements QueryParserConstants {
   int commentNestingDepth ;
 
-  
-private final int jjStopStringLiteralDfa_3(int pos, long active0)
-{
+  /** Debug output. */
+  // (debugStream omitted).
+  /** Set debug output. */
+  // (setDebugStream omitted).
+private final int jjStopStringLiteralDfa_3(int pos, long active0){
    switch (pos)
    {
       case 0:
@@ -69,8 +83,7 @@
          return -1;
    }
 }
-private final int jjStartNfa_3(int pos, long active0)
-{
+private final int jjStartNfa_3(int pos, long active0){
    return jjMoveNfa_3(jjStopStringLiteralDfa_3(pos, active0), pos + 1);
 }
 private int jjStopAtPos(int pos, int kind)
@@ -79,8 +92,7 @@
    jjmatchedPos = pos;
    return pos + 1;
 }
-private int jjMoveStringLiteralDfa0_3()
-{
+private int jjMoveStringLiteralDfa0_3(){
    switch(curChar)
    {
       case 40:
@@ -109,8 +121,7 @@
          return jjMoveNfa_3(0, 0);
    }
 }
-private int jjMoveStringLiteralDfa1_3(long active0)
-{
+private int jjMoveStringLiteralDfa1_3(long active0){
    try { curChar = input_stream.readChar(); }
    catch(java.io.IOException e) {
       jjStopStringLiteralDfa_3(0, active0);
@@ -129,8 +140,7 @@
    }
    return jjStartNfa_3(0, active0);
 }
-private int jjMoveStringLiteralDfa2_3(long old0, long active0)
-{
+private int jjMoveStringLiteralDfa2_3(long old0, long active0){
    if (((active0 &= old0)) == 0L)
       return jjStartNfa_3(0, old0);
    try { curChar = input_stream.readChar(); }
@@ -147,8 +157,7 @@
    }
    return jjStartNfa_3(1, active0);
 }
-private int jjMoveStringLiteralDfa3_3(long old0, long active0)
-{
+private int jjMoveStringLiteralDfa3_3(long old0, long active0){
    if (((active0 &= old0)) == 0L)
       return jjStartNfa_3(1, old0);
    try { curChar = input_stream.readChar(); }
@@ -165,8 +174,7 @@
    }
    return jjStartNfa_3(2, active0);
 }
-private int jjMoveStringLiteralDfa4_3(long old0, long active0)
-{
+private int jjMoveStringLiteralDfa4_3(long old0, long active0){
    if (((active0 &= old0)) == 0L)
       return jjStartNfa_3(2, old0);
    try { curChar = input_stream.readChar(); }
@@ -183,8 +191,7 @@
    }
    return jjStartNfa_3(3, active0);
 }
-private int jjMoveStringLiteralDfa5_3(long old0, long active0)
-{
+private int jjMoveStringLiteralDfa5_3(long old0, long active0){
    if (((active0 &= old0)) == 0L)
       return jjStartNfa_3(3, old0);
    try { curChar = input_stream.readChar(); }
@@ -201,8 +208,7 @@
    }
    return jjStartNfa_3(4, active0);
 }
-private int jjMoveStringLiteralDfa6_3(long old0, long active0)
-{
+private int jjMoveStringLiteralDfa6_3(long old0, long active0){
    if (((active0 &= old0)) == 0L)
       return jjStartNfa_3(4, old0);
    try { curChar = input_stream.readChar(); }
@@ -265,18 +271,18 @@
                      break;
                   if (kind > 28)
                      kind = 28;
-                  jjCheckNAddTwoStates(27, 28);
+                  { jjCheckNAddTwoStates(27, 28); }
                   break;
                case 31:
                   if ((0xffff7bffffffffffL & l) != 0L)
-                     jjCheckNAddStates(0, 2);
+                     { jjCheckNAddStates(0, 2); }
                   break;
                case 0:
                   if ((0xfbff54f8ffffd9ffL & l) != 0L)
                   {
                      if (kind > 28)
                         kind = 28;
-                     jjCheckNAddTwoStates(27, 28);
+                     { jjCheckNAddTwoStates(27, 28); }
                   }
                   else if ((0x100002600L & l) != 0L)
                   {
@@ -286,14 +292,14 @@
                   else if ((0x280200000000L & l) != 0L)
                      jjstateSet[jjnewStateCnt++] = 15;
                   else if (curChar == 47)
-                     jjAddStates(3, 4);
+                     { jjAddStates(3, 4); }
                   else if (curChar == 34)
-                     jjCheckNAddStates(5, 7);
+                     { jjCheckNAddStates(5, 7); }
                   if ((0x7bff50f8ffffd9ffL & l) != 0L)
                   {
                      if (kind > 25)
                         kind = 25;
-                     jjCheckNAddStates(8, 12);
+                     { jjCheckNAddStates(8, 12); }
                   }
                   else if (curChar == 42)
                   {
@@ -313,10 +319,10 @@
                   {
                      if (kind > 28)
                         kind = 28;
-                     jjCheckNAddTwoStates(27, 28);
+                     { jjCheckNAddTwoStates(27, 28); }
                   }
                   if ((0x7bfff8faffffd9ffL & l) != 0L)
-                     jjCheckNAddStates(13, 15);
+                     { jjCheckNAddStates(13, 15); }
                   else if (curChar == 42)
                   {
                      if (kind > 27)
@@ -326,7 +332,7 @@
                   {
                      if (kind > 25)
                         kind = 25;
-                     jjCheckNAddTwoStates(56, 57);
+                     { jjCheckNAddTwoStates(56, 57); }
                   }
                   break;
                case 4:
@@ -351,14 +357,14 @@
                   break;
                case 16:
                   if (curChar == 34)
-                     jjCheckNAddStates(5, 7);
+                     { jjCheckNAddStates(5, 7); }
                   break;
                case 17:
                   if ((0xfffffffbffffffffL & l) != 0L)
-                     jjCheckNAddStates(5, 7);
+                     { jjCheckNAddStates(5, 7); }
                   break;
                case 19:
-                  jjCheckNAddStates(5, 7);
+                  { jjCheckNAddStates(5, 7); }
                   break;
                case 20:
                   if (curChar == 34 && kind > 24)
@@ -369,18 +375,18 @@
                      break;
                   if (kind > 26)
                      kind = 26;
-                  jjAddStates(16, 17);
+                  { jjAddStates(16, 17); }
                   break;
                case 23:
                   if (curChar == 46)
-                     jjCheckNAdd(24);
+                     { jjCheckNAdd(24); }
                   break;
                case 24:
                   if ((0x3ff000000000000L & l) == 0L)
                      break;
                   if (kind > 26)
                      kind = 26;
-                  jjCheckNAdd(24);
+                  { jjCheckNAdd(24); }
                   break;
                case 25:
                   if (curChar == 42 && kind > 27)
@@ -391,24 +397,24 @@
                      break;
                   if (kind > 28)
                      kind = 28;
-                  jjCheckNAddTwoStates(27, 28);
+                  { jjCheckNAddTwoStates(27, 28); }
                   break;
                case 29:
                   if (kind > 28)
                      kind = 28;
-                  jjCheckNAddTwoStates(27, 28);
+                  { jjCheckNAddTwoStates(27, 28); }
                   break;
                case 30:
                   if (curChar == 47)
-                     jjAddStates(3, 4);
+                     { jjAddStates(3, 4); }
                   break;
                case 32:
                   if ((0xffff7fffffffffffL & l) != 0L)
-                     jjCheckNAddStates(0, 2);
+                     { jjCheckNAddStates(0, 2); }
                   break;
                case 33:
                   if (curChar == 47)
-                     jjCheckNAddStates(0, 2);
+                     { jjCheckNAddStates(0, 2); }
                   break;
                case 35:
                   if (curChar == 47 && kind > 29)
@@ -416,34 +422,34 @@
                   break;
                case 37:
                   if (curChar == 33)
-                     jjCheckNAddStates(18, 20);
+                     { jjCheckNAddStates(18, 20); }
                   break;
                case 38:
                   if ((0x100002600L & l) != 0L)
-                     jjCheckNAddTwoStates(38, 39);
+                     { jjCheckNAddTwoStates(38, 39); }
                   break;
                case 39:
                   if ((0xdfffffffffffffffL & l) != 0L)
-                     jjCheckNAddStates(21, 24);
+                     { jjCheckNAddStates(21, 24); }
                   break;
                case 40:
                   if (curChar == 61)
-                     jjCheckNAddStates(25, 30);
+                     { jjCheckNAddStates(25, 30); }
                   break;
                case 41:
                   if (curChar == 34)
-                     jjCheckNAddStates(31, 33);
+                     { jjCheckNAddStates(31, 33); }
                   break;
                case 42:
                   if ((0xfffffffbffffffffL & l) != 0L)
-                     jjCheckNAddStates(31, 33);
+                     { jjCheckNAddStates(31, 33); }
                   break;
                case 44:
-                  jjCheckNAddStates(31, 33);
+                  { jjCheckNAddStates(31, 33); }
                   break;
                case 45:
                   if (curChar == 34)
-                     jjCheckNAddStates(18, 20);
+                     { jjCheckNAddStates(18, 20); }
                   break;
                case 48:
                   if ((0xfffffdfefffff9ffL & l) == 0L)
@@ -454,48 +460,48 @@
                   break;
                case 49:
                   if (curChar == 39)
-                     jjCheckNAddStates(34, 36);
+                     { jjCheckNAddStates(34, 36); }
                   break;
                case 50:
                   if ((0xffffff7fffffffffL & l) != 0L)
-                     jjCheckNAddStates(34, 36);
+                     { jjCheckNAddStates(34, 36); }
                   break;
                case 52:
-                  jjCheckNAddStates(34, 36);
+                  { jjCheckNAddStates(34, 36); }
                   break;
                case 53:
                   if (curChar == 39)
-                     jjCheckNAddStates(18, 20);
+                     { jjCheckNAddStates(18, 20); }
                   break;
                case 54:
                   if ((0xfffffffeffffffffL & l) != 0L)
-                     jjCheckNAddStates(37, 40);
+                     { jjCheckNAddStates(37, 40); }
                   break;
                case 55:
                   if ((0x7bff50f8ffffd9ffL & l) == 0L)
                      break;
                   if (kind > 25)
                      kind = 25;
-                  jjCheckNAddStates(8, 12);
+                  { jjCheckNAddStates(8, 12); }
                   break;
                case 56:
                   if ((0x7bfff8faffffd9ffL & l) == 0L)
                      break;
                   if (kind > 25)
                      kind = 25;
-                  jjCheckNAddTwoStates(56, 57);
+                  { jjCheckNAddTwoStates(56, 57); }
                   break;
                case 58:
                   if (kind > 25)
                      kind = 25;
-                  jjCheckNAddTwoStates(56, 57);
+                  { jjCheckNAddTwoStates(56, 57); }
                   break;
                case 59:
                   if ((0x7bfff8faffffd9ffL & l) != 0L)
-                     jjCheckNAddStates(13, 15);
+                     { jjCheckNAddStates(13, 15); }
                   break;
                case 61:
-                  jjCheckNAddStates(13, 15);
+                  { jjCheckNAddStates(13, 15); }
                   break;
                default : break;
             }
@@ -513,25 +519,25 @@
                   {
                      if (kind > 28)
                         kind = 28;
-                     jjCheckNAddTwoStates(27, 28);
+                     { jjCheckNAddTwoStates(27, 28); }
                   }
                   else if (curChar == 92)
-                     jjCheckNAddTwoStates(29, 29);
+                     { jjCheckNAddTwoStates(29, 29); }
                   break;
                case 31:
-                  jjCheckNAddStates(0, 2);
+                  { jjCheckNAddStates(0, 2); }
                   if (curChar == 92)
-                     jjCheckNAdd(33);
+                     { jjCheckNAdd(33); }
                   break;
                case 0:
                   if ((0x97ffffff87ffffffL & l) != 0L)
                   {
                      if (kind > 25)
                         kind = 25;
-                     jjCheckNAddStates(8, 12);
+                     { jjCheckNAddStates(8, 12); }
                   }
                   else if (curChar == 92)
-                     jjCheckNAddStates(41, 43);
+                     { jjCheckNAddStates(41, 43); }
                   else if (curChar == 123)
                      jjstateSet[jjnewStateCnt++] = 37;
                   else if (curChar == 126)
@@ -544,7 +550,7 @@
                   {
                      if (kind > 28)
                         kind = 28;
-                     jjCheckNAddTwoStates(27, 28);
+                     { jjCheckNAddTwoStates(27, 28); }
                   }
                   if (curChar == 78)
                      jjstateSet[jjnewStateCnt++] = 11;
@@ -560,22 +566,22 @@
                   {
                      if (kind > 28)
                         kind = 28;
-                     jjCheckNAddTwoStates(27, 28);
+                     { jjCheckNAddTwoStates(27, 28); }
                   }
                   else if (curChar == 92)
-                     jjCheckNAddTwoStates(58, 58);
+                     { jjCheckNAddTwoStates(58, 58); }
                   if ((0x97ffffff87ffffffL & l) != 0L)
-                     jjCheckNAddStates(13, 15);
+                     { jjCheckNAddStates(13, 15); }
                   else if (curChar == 92)
-                     jjCheckNAddTwoStates(61, 61);
+                     { jjCheckNAddTwoStates(61, 61); }
                   if ((0x97ffffff87ffffffL & l) != 0L)
                   {
                      if (kind > 25)
                         kind = 25;
-                     jjCheckNAddTwoStates(56, 57);
+                     { jjCheckNAddTwoStates(56, 57); }
                   }
                   else if (curChar == 92)
-                     jjCheckNAddTwoStates(29, 29);
+                     { jjCheckNAddTwoStates(29, 29); }
                   break;
                case 1:
                   if (curChar == 68 && kind > 13)
@@ -619,14 +625,14 @@
                   break;
                case 17:
                   if ((0xffffffffefffffffL & l) != 0L)
-                     jjCheckNAddStates(5, 7);
+                     { jjCheckNAddStates(5, 7); }
                   break;
                case 18:
                   if (curChar == 92)
                      jjstateSet[jjnewStateCnt++] = 19;
                   break;
                case 19:
-                  jjCheckNAddStates(5, 7);
+                  { jjCheckNAddStates(5, 7); }
                   break;
                case 21:
                   if (curChar != 126)
@@ -640,56 +646,56 @@
                      break;
                   if (kind > 28)
                      kind = 28;
-                  jjCheckNAddTwoStates(27, 28);
+                  { jjCheckNAddTwoStates(27, 28); }
                   break;
                case 27:
                   if ((0x97ffffff87ffffffL & l) == 0L)
                      break;
                   if (kind > 28)
                      kind = 28;
-                  jjCheckNAddTwoStates(27, 28);
+                  { jjCheckNAddTwoStates(27, 28); }
                   break;
                case 28:
                   if (curChar == 92)
-                     jjCheckNAddTwoStates(29, 29);
+                     { jjCheckNAddTwoStates(29, 29); }
                   break;
                case 29:
                   if (kind > 28)
                      kind = 28;
-                  jjCheckNAddTwoStates(27, 28);
+                  { jjCheckNAddTwoStates(27, 28); }
                   break;
                case 32:
-                  jjCheckNAddStates(0, 2);
+                  { jjCheckNAddStates(0, 2); }
                   break;
                case 34:
                   if (curChar == 92)
-                     jjCheckNAdd(33);
+                     { jjCheckNAdd(33); }
                   break;
                case 36:
                   if (curChar == 92)
-                     jjCheckNAdd(33);
+                     { jjCheckNAdd(33); }
                   break;
                case 39:
                   if ((0xdfffffffffffffffL & l) != 0L)
-                     jjCheckNAddStates(21, 24);
+                     { jjCheckNAddStates(21, 24); }
                   break;
                case 42:
                   if ((0xffffffffefffffffL & l) != 0L)
-                     jjCheckNAddStates(31, 33);
+                     { jjCheckNAddStates(31, 33); }
                   break;
                case 43:
                   if (curChar == 92)
                      jjstateSet[jjnewStateCnt++] = 44;
                   break;
                case 44:
-                  jjCheckNAddStates(31, 33);
+                  { jjCheckNAddStates(31, 33); }
                   break;
                case 46:
                   if (curChar != 125)
                      break;
                   if (kind > 32)
                      kind = 32;
-                  jjCheckNAddTwoStates(47, 48);
+                  { jjCheckNAddTwoStates(47, 48); }
                   break;
                case 47:
                   if (curChar == 123)
@@ -700,60 +706,60 @@
                      break;
                   if (kind > 32)
                      kind = 32;
-                  jjCheckNAdd(48);
+                  { jjCheckNAdd(48); }
                   break;
                case 50:
                   if ((0xffffffffefffffffL & l) != 0L)
-                     jjCheckNAddStates(34, 36);
+                     { jjCheckNAddStates(34, 36); }
                   break;
                case 51:
                   if (curChar == 92)
                      jjstateSet[jjnewStateCnt++] = 52;
                   break;
                case 52:
-                  jjCheckNAddStates(34, 36);
+                  { jjCheckNAddStates(34, 36); }
                   break;
                case 54:
                   if ((0xdfffffffffffffffL & l) != 0L)
-                     jjCheckNAddStates(37, 40);
+                     { jjCheckNAddStates(37, 40); }
                   break;
                case 55:
                   if ((0x97ffffff87ffffffL & l) == 0L)
                      break;
                   if (kind > 25)
                      kind = 25;
-                  jjCheckNAddStates(8, 12);
+                  { jjCheckNAddStates(8, 12); }
                   break;
                case 56:
                   if ((0x97ffffff87ffffffL & l) == 0L)
                      break;
                   if (kind > 25)
                      kind = 25;
-                  jjCheckNAddTwoStates(56, 57);
+                  { jjCheckNAddTwoStates(56, 57); }
                   break;
                case 57:
                   if (curChar == 92)
-                     jjCheckNAddTwoStates(58, 58);
+                     { jjCheckNAddTwoStates(58, 58); }
                   break;
                case 58:
                   if (kind > 25)
                      kind = 25;
-                  jjCheckNAddTwoStates(56, 57);
+                  { jjCheckNAddTwoStates(56, 57); }
                   break;
                case 59:
                   if ((0x97ffffff87ffffffL & l) != 0L)
-                     jjCheckNAddStates(13, 15);
+                     { jjCheckNAddStates(13, 15); }
                   break;
                case 60:
                   if (curChar == 92)
-                     jjCheckNAddTwoStates(61, 61);
+                     { jjCheckNAddTwoStates(61, 61); }
                   break;
                case 61:
-                  jjCheckNAddStates(13, 15);
+                  { jjCheckNAddStates(13, 15); }
                   break;
                case 62:
                   if (curChar == 92)
-                     jjCheckNAddStates(41, 43);
+                     { jjCheckNAddStates(41, 43); }
                   break;
                default : break;
             }
@@ -761,7 +767,7 @@
       }
       else
       {
-         int hiByte = curChar >> 8;
+         int hiByte = (curChar >> 8);
          int i1 = hiByte >> 6;
          long l1 = 1L << (hiByte & 077);
          int i2 = (curChar & 0xff) >> 6;
@@ -776,12 +782,12 @@
                      break;
                   if (kind > 28)
                      kind = 28;
-                  jjCheckNAddTwoStates(27, 28);
+                  { jjCheckNAddTwoStates(27, 28); }
                   break;
                case 31:
                case 32:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddStates(0, 2);
+                     { jjCheckNAddStates(0, 2); }
                   break;
                case 0:
                   if (jjCanMove_0(hiByte, i1, i2, l1, l2))
@@ -793,13 +799,13 @@
                   {
                      if (kind > 28)
                         kind = 28;
-                     jjCheckNAddTwoStates(27, 28);
+                     { jjCheckNAddTwoStates(27, 28); }
                   }
                   if (jjCanMove_2(hiByte, i1, i2, l1, l2))
                   {
                      if (kind > 25)
                         kind = 25;
-                     jjCheckNAddStates(8, 12);
+                     { jjCheckNAddStates(8, 12); }
                   }
                   break;
                case 63:
@@ -807,15 +813,15 @@
                   {
                      if (kind > 25)
                         kind = 25;
-                     jjCheckNAddTwoStates(56, 57);
+                     { jjCheckNAddTwoStates(56, 57); }
                   }
                   if (jjCanMove_2(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddStates(13, 15);
+                     { jjCheckNAddStates(13, 15); }
                   if (jjCanMove_2(hiByte, i1, i2, l1, l2))
                   {
                      if (kind > 28)
                         kind = 28;
-                     jjCheckNAddTwoStates(27, 28);
+                     { jjCheckNAddTwoStates(27, 28); }
                   }
                   break;
                case 15:
@@ -825,34 +831,34 @@
                case 17:
                case 19:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddStates(5, 7);
+                     { jjCheckNAddStates(5, 7); }
                   break;
                case 26:
                   if (!jjCanMove_2(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 28)
                      kind = 28;
-                  jjCheckNAddTwoStates(27, 28);
+                  { jjCheckNAddTwoStates(27, 28); }
                   break;
                case 29:
                   if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 28)
                      kind = 28;
-                  jjCheckNAddTwoStates(27, 28);
+                  { jjCheckNAddTwoStates(27, 28); }
                   break;
                case 38:
                   if (jjCanMove_0(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddTwoStates(38, 39);
+                     { jjCheckNAddTwoStates(38, 39); }
                   break;
                case 39:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddStates(21, 24);
+                     { jjCheckNAddStates(21, 24); }
                   break;
                case 42:
                case 44:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddStates(31, 33);
+                     { jjCheckNAddStates(31, 33); }
                   break;
                case 48:
                   if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
@@ -864,42 +870,42 @@
                case 50:
                case 52:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddStates(34, 36);
+                     { jjCheckNAddStates(34, 36); }
                   break;
                case 54:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddStates(37, 40);
+                     { jjCheckNAddStates(37, 40); }
                   break;
                case 55:
                   if (!jjCanMove_2(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 25)
                      kind = 25;
-                  jjCheckNAddStates(8, 12);
+                  { jjCheckNAddStates(8, 12); }
                   break;
                case 56:
                   if (!jjCanMove_2(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 25)
                      kind = 25;
-                  jjCheckNAddTwoStates(56, 57);
+                  { jjCheckNAddTwoStates(56, 57); }
                   break;
                case 58:
                   if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 25)
                      kind = 25;
-                  jjCheckNAddTwoStates(56, 57);
+                  { jjCheckNAddTwoStates(56, 57); }
                   break;
                case 59:
                   if (jjCanMove_2(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddStates(13, 15);
+                     { jjCheckNAddStates(13, 15); }
                   break;
                case 61:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjCheckNAddStates(13, 15);
+                     { jjCheckNAddStates(13, 15); }
                   break;
-               default : break;
+               default : if (i1 == 0 || l1 == 0 || i2 == 0 ||  l2 == 0) break; else break;
             }
          } while(i != startsAt);
       }
@@ -916,20 +922,17 @@
       catch(java.io.IOException e) { return curPos; }
    }
 }
-private final int jjStopStringLiteralDfa_2(int pos, long active0)
-{
+private final int jjStopStringLiteralDfa_2(int pos, long active0){
    switch (pos)
    {
       default :
          return -1;
    }
 }
-private final int jjStartNfa_2(int pos, long active0)
-{
+private final int jjStartNfa_2(int pos, long active0){
    return jjMoveNfa_2(jjStopStringLiteralDfa_2(pos, active0), pos + 1);
 }
-private int jjMoveStringLiteralDfa0_2()
-{
+private int jjMoveStringLiteralDfa0_2(){
    switch(curChar)
    {
       case 42:
@@ -940,8 +943,7 @@
          return jjMoveNfa_2(0, 0);
    }
 }
-private int jjMoveStringLiteralDfa1_2(long active0)
-{
+private int jjMoveStringLiteralDfa1_2(long active0){
    try { curChar = input_stream.readChar(); }
    catch(java.io.IOException e) {
       jjStopStringLiteralDfa_2(0, active0);
@@ -1001,7 +1003,7 @@
       }
       else
       {
-         int hiByte = curChar >> 8;
+         int hiByte = (curChar >> 8);
          int i1 = hiByte >> 6;
          long l1 = 1L << (hiByte & 077);
          int i2 = (curChar & 0xff) >> 6;
@@ -1014,7 +1016,7 @@
                   if (jjCanMove_0(hiByte, i1, i2, l1, l2) && kind > 8)
                      kind = 8;
                   break;
-               default : break;
+               default : if (i1 == 0 || l1 == 0 || i2 == 0 ||  l2 == 0) break; else break;
             }
          } while(i != startsAt);
       }
@@ -1058,34 +1060,34 @@
                   {
                      if (kind > 34)
                         kind = 34;
-                     jjCheckNAddTwoStates(2, 3);
+                     { jjCheckNAddTwoStates(2, 3); }
                   }
                   else if (curChar == 45)
-                     jjCheckNAdd(2);
+                     { jjCheckNAdd(2); }
                   else if (curChar == 61)
-                     jjCheckNAddTwoStates(1, 2);
+                     { jjCheckNAddTwoStates(1, 2); }
                   break;
                case 1:
                   if (curChar == 45)
-                     jjCheckNAdd(2);
+                     { jjCheckNAdd(2); }
                   break;
                case 2:
                   if ((0x3ff000000000000L & l) == 0L)
                      break;
                   if (kind > 34)
                      kind = 34;
-                  jjCheckNAddTwoStates(2, 3);
+                  { jjCheckNAddTwoStates(2, 3); }
                   break;
                case 3:
                   if (curChar == 46)
-                     jjCheckNAdd(4);
+                     { jjCheckNAdd(4); }
                   break;
                case 4:
                   if ((0x3ff000000000000L & l) == 0L)
                      break;
                   if (kind > 34)
                      kind = 34;
-                  jjCheckNAdd(4);
+                  { jjCheckNAdd(4); }
                   break;
                default : break;
             }
@@ -1104,7 +1106,7 @@
       }
       else
       {
-         int hiByte = curChar >> 8;
+         int hiByte = (curChar >> 8);
          int i1 = hiByte >> 6;
          long l1 = 1L << (hiByte & 077);
          int i2 = (curChar & 0xff) >> 6;
@@ -1113,7 +1115,7 @@
          {
             switch(jjstateSet[--i])
             {
-               default : break;
+               default : if (i1 == 0 || l1 == 0 || i2 == 0 ||  l2 == 0) break; else break;
             }
          } while(i != startsAt);
       }
@@ -1130,8 +1132,7 @@
       catch(java.io.IOException e) { return curPos; }
    }
 }
-private final int jjStopStringLiteralDfa_1(int pos, long active0)
-{
+private final int jjStopStringLiteralDfa_1(int pos, long active0){
    switch (pos)
    {
       case 0:
@@ -1145,12 +1146,10 @@
          return -1;
    }
 }
-private final int jjStartNfa_1(int pos, long active0)
-{
+private final int jjStartNfa_1(int pos, long active0){
    return jjMoveNfa_1(jjStopStringLiteralDfa_1(pos, active0), pos + 1);
 }
-private int jjMoveStringLiteralDfa0_1()
-{
+private int jjMoveStringLiteralDfa0_1(){
    switch(curChar)
    {
       case 84:
@@ -1163,8 +1162,7 @@
          return jjMoveNfa_1(0, 0);
    }
 }
-private int jjMoveStringLiteralDfa1_1(long active0)
-{
+private int jjMoveStringLiteralDfa1_1(long active0){
    try { curChar = input_stream.readChar(); }
    catch(java.io.IOException e) {
       jjStopStringLiteralDfa_1(0, active0);
@@ -1212,7 +1210,7 @@
                   {
                      if (kind > 39)
                         kind = 39;
-                     jjCheckNAdd(6);
+                     { jjCheckNAdd(6); }
                   }
                   if ((0x100002600L & l) != 0L)
                   {
@@ -1220,19 +1218,19 @@
                         kind = 12;
                   }
                   else if (curChar == 34)
-                     jjCheckNAddTwoStates(2, 4);
+                     { jjCheckNAddTwoStates(2, 4); }
                   break;
                case 1:
                   if (curChar == 34)
-                     jjCheckNAddTwoStates(2, 4);
+                     { jjCheckNAddTwoStates(2, 4); }
                   break;
                case 2:
                   if ((0xfffffffbffffffffL & l) != 0L)
-                     jjCheckNAddStates(44, 46);
+                     { jjCheckNAddStates(44, 46); }
                   break;
                case 3:
                   if (curChar == 34)
-                     jjCheckNAddStates(44, 46);
+                     { jjCheckNAddStates(44, 46); }
                   break;
                case 5:
                   if (curChar == 34 && kind > 38)
@@ -1243,7 +1241,7 @@
                      break;
                   if (kind > 39)
                      kind = 39;
-                  jjCheckNAdd(6);
+                  { jjCheckNAdd(6); }
                   break;
                default : break;
             }
@@ -1262,10 +1260,10 @@
                      break;
                   if (kind > 39)
                      kind = 39;
-                  jjCheckNAdd(6);
+                  { jjCheckNAdd(6); }
                   break;
                case 2:
-                  jjAddStates(44, 46);
+                  { jjAddStates(44, 46); }
                   break;
                case 4:
                   if (curChar == 92)
@@ -1277,7 +1275,7 @@
       }
       else
       {
-         int hiByte = curChar >> 8;
+         int hiByte = (curChar >> 8);
          int i1 = hiByte >> 6;
          long l1 = 1L << (hiByte & 077);
          int i2 = (curChar & 0xff) >> 6;
@@ -1296,21 +1294,21 @@
                   {
                      if (kind > 39)
                         kind = 39;
-                     jjCheckNAdd(6);
+                     { jjCheckNAdd(6); }
                   }
                   break;
                case 2:
                   if (jjCanMove_1(hiByte, i1, i2, l1, l2))
-                     jjAddStates(44, 46);
+                     { jjAddStates(44, 46); }
                   break;
                case 6:
                   if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
                      break;
                   if (kind > 39)
                      kind = 39;
-                  jjCheckNAdd(6);
+                  { jjCheckNAdd(6); }
                   break;
-               default : break;
+               default : if (i1 == 0 || l1 == 0 || i2 == 0 ||  l2 == 0) break; else break;
             }
          } while(i != startsAt);
       }
@@ -1327,6 +1325,38 @@
       catch(java.io.IOException e) { return curPos; }
    }
 }
+
+/** Token literal values. */
+public static final String[] jjstrLiteralImages = {
+"", null, null, null, null, null, null, null, null, null, null, null, null, 
+null, null, null, "\53", "\55", null, "\50", "\51", "\72", "\52", "\136", null, null, 
+null, null, null, null, "\133", "\173", null, "\146\151\154\164\145\162\50", null, 
+"\124\117", "\135", "\175", null, null, };
+protected Token jjFillToken()
+{
+   final Token t;
+   final String curTokenImage;
+   final int beginLine;
+   final int endLine;
+   final int beginColumn;
+   final int endColumn;
+   String im = jjstrLiteralImages[jjmatchedKind];
+   curTokenImage = (im == null) ? input_stream.GetImage() : im;
+   beginLine = input_stream.getBeginLine();
+   beginColumn = input_stream.getBeginColumn();
+   endLine = input_stream.getEndLine();
+   endColumn = input_stream.getEndColumn();
+   t = Token.newToken(jjmatchedKind);
+   t.kind = jjmatchedKind;
+   t.image = curTokenImage;
+
+   t.beginLine = beginLine;
+   t.endLine = endLine;
+   t.beginColumn = beginColumn;
+   t.endColumn = endColumn;
+
+   return t;
+}
 static final int[] jjnextStates = {
    32, 34, 35, 31, 36, 17, 18, 20, 56, 59, 25, 60, 57, 59, 25, 60, 
    22, 23, 38, 39, 46, 38, 39, 40, 46, 38, 39, 41, 49, 54, 46, 42, 
@@ -1369,107 +1399,6 @@
    }
 }
 
-/** Token literal values. */
-public static final String[] jjstrLiteralImages = {
-"", null, null, null, null, null, null, null, null, null, null, null, null, 
-null, null, null, "\53", "\55", null, "\50", "\51", "\72", "\52", "\136", null, null, 
-null, null, null, null, "\133", "\173", null, "\146\151\154\164\145\162\50", null, 
-"\124\117", "\135", "\175", null, null, };
-
-/** Lexer state names. */
-public static final String[] lexStateNames = {
-   "Boost",
-   "Range",
-   "COMMENT",
-   "DEFAULT",
-};
-
-/** Lex State array. */
-public static final int[] jjnewLexState = {
-   -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, 
-   -1, -1, -1, -1, -1, 1, 1, -1, -1, 3, -1, 3, 3, -1, -1, 
-};
-static final long[] jjtoToken = {
-   0xffffffe001L, 
-};
-static final long[] jjtoSkip = {
-   0x1f00L, 
-};
-protected CharStream input_stream;
-private final int[] jjrounds = new int[63];
-private final int[] jjstateSet = new int[126];
-private final StringBuilder jjimage = new StringBuilder();
-private StringBuilder image = jjimage;
-private int jjimageLen;
-private int lengthOfMatch;
-protected char curChar;
-/** Constructor. */
-public QueryParserTokenManager(CharStream stream){
-   input_stream = stream;
-}
-
-/** Constructor. */
-public QueryParserTokenManager(CharStream stream, int lexState){
-   this(stream);
-   SwitchTo(lexState);
-}
-
-/** Reinitialise parser. */
-public void ReInit(CharStream stream)
-{
-   jjmatchedPos = jjnewStateCnt = 0;
-   curLexState = defaultLexState;
-   input_stream = stream;
-   ReInitRounds();
-}
-private void ReInitRounds()
-{
-   int i;
-   jjround = 0x80000001;
-   for (i = 63; i-- > 0;)
-      jjrounds[i] = 0x80000000;
-}
-
-/** Reinitialise parser. */
-public void ReInit(CharStream stream, int lexState)
-{
-   ReInit(stream);
-   SwitchTo(lexState);
-}
-
-/** Switch to specified lex state. */
-public void SwitchTo(int lexState)
-{
-   if (lexState >= 4 || lexState < 0)
-      throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE);
-   else
-      curLexState = lexState;
-}
-
-protected Token jjFillToken()
-{
-   final Token t;
-   final String curTokenImage;
-   final int beginLine;
-   final int endLine;
-   final int beginColumn;
-   final int endColumn;
-   String im = jjstrLiteralImages[jjmatchedKind];
-   curTokenImage = (im == null) ? input_stream.GetImage() : im;
-   beginLine = input_stream.getBeginLine();
-   beginColumn = input_stream.getBeginColumn();
-   endLine = input_stream.getEndLine();
-   endColumn = input_stream.getEndColumn();
-   t = Token.newToken(jjmatchedKind, curTokenImage);
-
-   t.beginLine = beginLine;
-   t.endLine = endLine;
-   t.beginColumn = beginColumn;
-   t.endColumn = endColumn;
-
-   return t;
-}
-
 int curLexState = 3;
 int defaultLexState = 3;
 int jjnewStateCnt;
@@ -1490,9 +1419,10 @@
    {
       curChar = input_stream.BeginToken();
    }
-   catch(java.io.IOException e)
+   catch(Exception e)
    {
       jjmatchedKind = 0;
+      jjmatchedPos = -1;
       matchedToken = jjFillToken();
       return matchedToken;
    }
@@ -1585,6 +1515,23 @@
          break;
    }
 }
+void MoreLexicalActions()
+{
+   jjimageLen += (lengthOfMatch = jjmatchedPos + 1);
+   switch(jjmatchedKind)
+   {
+      default :
+         break;
+   }
+}
+void TokenLexicalActions(Token matchedToken)
+{
+   switch(jjmatchedKind)
+   {
+      default :
+         break;
+   }
+}
 private void jjCheckNAdd(int state)
 {
    if (jjrounds[state] != jjround)
@@ -1612,4 +1559,91 @@
    } while (start++ != end);
 }
 
+    /** Constructor. */
+    public QueryParserTokenManager(CharStream stream){
+
+
+    input_stream = stream;
+  }
+
+  /** Constructor. */
+  public QueryParserTokenManager (CharStream stream, int lexState){
+    ReInit(stream);
+    SwitchTo(lexState);
+  }
+
+  /** Reinitialise parser. */
+  
+  public void ReInit(CharStream stream)
+  {
+
+
+    jjmatchedPos =
+    jjnewStateCnt =
+    0;
+    curLexState = defaultLexState;
+    input_stream = stream;
+    ReInitRounds();
+  }
+
+  private void ReInitRounds()
+  {
+    int i;
+    jjround = 0x80000001;
+    for (i = 63; i-- > 0;)
+      jjrounds[i] = 0x80000000;
+  }
+
+  /** Reinitialise parser. */
+  public void ReInit(CharStream stream, int lexState)
+  
+  {
+    ReInit(stream);
+    SwitchTo(lexState);
+  }
+
+  /** Switch to specified lex state. */
+  public void SwitchTo(int lexState)
+  {
+    if (lexState >= 4 || lexState < 0)
+      throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE);
+    else
+      curLexState = lexState;
+  }
+
+
+/** Lexer state names. */
+public static final String[] lexStateNames = {
+   "Boost",
+   "Range",
+   "COMMENT",
+   "DEFAULT",
+};
+
+/** Lex State array. */
+public static final int[] jjnewLexState = {
+   -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, 
+   -1, -1, -1, -1, -1, 1, 1, -1, -1, 3, -1, 3, 3, -1, -1, 
+};
+static final long[] jjtoToken = {
+   0xffffffe001L, 
+};
+static final long[] jjtoSkip = {
+   0x1f00L, 
+};
+static final long[] jjtoSpecial = {
+   0x0L, 
+};
+static final long[] jjtoMore = {
+   0x0L, 
+};
+    protected CharStream  input_stream;
+
+    private final int[] jjrounds = new int[63];
+    private final int[] jjstateSet = new int[2 * 63];
+    private final StringBuilder jjimage = new StringBuilder();
+    private StringBuilder image = jjimage;
+    private int jjimageLen;
+    private int lengthOfMatch;
+    protected int curChar;
 }
diff --git a/solr/core/src/java/org/apache/solr/parser/Token.java b/solr/core/src/java/org/apache/solr/parser/Token.java
index 0d59603..db6ffd0 100644
--- a/solr/core/src/java/org/apache/solr/parser/Token.java
+++ b/solr/core/src/java/org/apache/solr/parser/Token.java
@@ -1,5 +1,5 @@
-/* Generated By:JavaCC: Do not edit this line. Token.java Version 5.0 */
-/* JavaCCOptions:TOKEN_EXTENDS=,KEEP_LINE_COL=null,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */
+/* Generated By:JavaCC: Do not edit this line. Token.java Version 7.0 */
+/* JavaCCOptions:TOKEN_EXTENDS=,KEEP_LINE_COLUMN=true,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */
 package org.apache.solr.parser;
 
 /**
@@ -97,6 +97,7 @@
   /**
    * Returns the image.
    */
+  @Override
   public String toString()
   {
     return image;
@@ -128,4 +129,4 @@
   }
 
 }
-/* JavaCC - OriginalChecksum=f463ad6fd3205ca07166de02ee86b907 (do not edit this line) */
+/* (filtered)*/
diff --git a/solr/core/src/java/org/apache/solr/parser/TokenMgrError.java b/solr/core/src/java/org/apache/solr/parser/TokenMgrError.java
index ecdcc53..609c572 100644
--- a/solr/core/src/java/org/apache/solr/parser/TokenMgrError.java
+++ b/solr/core/src/java/org/apache/solr/parser/TokenMgrError.java
@@ -1,4 +1,4 @@
-/* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 5.0 */
+/* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 7.0 */
 /* JavaCCOptions: */
 package org.apache.solr.parser;
 
@@ -20,22 +20,22 @@
   /**
    * Lexical error occurred.
    */
-  static final int LEXICAL_ERROR = 0;
+  public static final int LEXICAL_ERROR = 0;
 
   /**
    * An attempt was made to create a second instance of a static token manager.
    */
-  static final int STATIC_LEXER_ERROR = 1;
+  public static final int STATIC_LEXER_ERROR = 1;
 
   /**
    * Tried to change to an invalid lexical state.
    */
-  static final int INVALID_LEXICAL_STATE = 2;
+  public static final int INVALID_LEXICAL_STATE = 2;
 
   /**
    * Detected (and bailed out of) an infinite loop in the token manager.
    */
-  static final int LOOP_DETECTED = 3;
+  public static final int LOOP_DETECTED = 3;
 
   /**
    * Indicates the reason why the exception is thrown. It will have
@@ -53,8 +53,6 @@
     for (int i = 0; i < str.length(); i++) {
       switch (str.charAt(i))
       {
-        case 0 :
-          continue;
         case '\b':
           retval.append("\\b");
           continue;
@@ -104,11 +102,12 @@
    *    curchar     : the offending character
    * Note: You can customize the lexical error message by modifying this method.
    */
-  protected static String LexicalError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar) {
+  protected static String LexicalErr(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, int curChar) {
+    char curChar1 = (char)curChar;
     return("Lexical error at line " +
           errorLine + ", column " +
           errorColumn + ".  Encountered: " +
-          (EOFSeen ? "<EOF> " : ("\"" + addEscapes(String.valueOf(curChar)) + "\"") + " (" + (int)curChar + "), ") +
+          (EOFSeen ? "<EOF> " : ("\"" + addEscapes(String.valueOf(curChar1)) + "\"") + " (" + curChar + "), ") +
           "after : \"" + addEscapes(errorAfter) + "\"");
   }
 
@@ -121,6 +120,7 @@
    *
    * from this method for such cases in the release version of your parser.
    */
+  @Override
   public String getMessage() {
     return super.getMessage();
   }
@@ -140,8 +140,8 @@
   }
 
   /** Full Constructor. */
-  public TokenMgrError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar, int reason) {
-    this(LexicalError(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
+  public TokenMgrError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, int curChar, int reason) {
+    this(LexicalErr(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
   }
 }
-/* JavaCC - OriginalChecksum=200a46f65c1a0f71a7f037b35f4e934e (do not edit this line) */
+/* (filtered)*/
diff --git a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
index 83dccd5..bd4ae7e 100644
--- a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
+++ b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
@@ -54,6 +54,7 @@
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.cloud.SolrClassLoader;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.MapSolrParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
@@ -62,7 +63,6 @@
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.Pair;
 import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.common.cloud.SolrClassLoader;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.core.SolrResourceLoader;
 import org.apache.solr.core.XmlConfigFile;
diff --git a/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java b/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
index 9fd5243..f8c0c4f 100644
--- a/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
+++ b/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
@@ -66,10 +66,10 @@
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.ExecutorUtil;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SolrNamedThreadFactory;
 import org.apache.solr.core.SolrConfig;
 import org.apache.solr.core.SolrResourceLoader;
 import org.apache.solr.rest.schema.FieldTypeXmlAdapter;
-import org.apache.solr.common.util.SolrNamedThreadFactory;
 import org.apache.solr.util.FileUtils;
 import org.apache.solr.util.RTimer;
 import org.apache.zookeeper.CreateMode;
@@ -1343,10 +1343,13 @@
     TokenFilterFactory[] filters = chain.getTokenFilterFactories();
     for (TokenFilterFactory next : filters) {
       if (next instanceof ResourceLoaderAware) {
+        SolrResourceLoader.CURRENT_AWARE.set((ResourceLoaderAware) next);
         try {
           ((ResourceLoaderAware) next).inform(loader);
         } catch (IOException e) {
           throw new SolrException(ErrorCode.SERVER_ERROR, e);
+        } finally {
+          SolrResourceLoader.CURRENT_AWARE.remove();
         }
       }
     }
diff --git a/solr/core/src/java/org/apache/solr/schema/SchemaManager.java b/solr/core/src/java/org/apache/solr/schema/SchemaManager.java
index 3731a17..c04c9c3 100644
--- a/solr/core/src/java/org/apache/solr/schema/SchemaManager.java
+++ b/solr/core/src/java/org/apache/solr/schema/SchemaManager.java
@@ -31,13 +31,13 @@
 import org.apache.solr.cloud.ZkSolrResourceLoader;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.util.CommandOperation;
 import org.apache.solr.common.util.TimeSource;
 import org.apache.solr.core.CoreDescriptor;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.core.SolrResourceLoader;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.rest.BaseSolrResource;
-import org.apache.solr.common.util.CommandOperation;
 import org.apache.solr.util.TimeOut;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -189,6 +189,7 @@
           mgr.managedIndexSchema = mgr.managedIndexSchema.addFieldTypes(singletonList(fieldType), false);
           return true;
         } catch (Exception e) {
+          log.error("err", e);
           op.addError(getErrorStr(e));
           return false;
         }
@@ -223,6 +224,7 @@
           mgr.managedIndexSchema = mgr.managedIndexSchema.addCopyFields(src, dests, maxChars);
           return true;
         } catch (Exception e) {
+          log.error("err", e);
           op.addError(getErrorStr(e));
           return false;
         }
@@ -240,6 +242,7 @@
               = mgr.managedIndexSchema.addFields(singletonList(field), Collections.emptyMap(), false);
           return true;
         } catch (Exception e) {
+          log.error("err", e);
           op.addError(getErrorStr(e));
           return false;
         }
@@ -257,6 +260,7 @@
               = mgr.managedIndexSchema.addDynamicFields(singletonList(field), Collections.emptyMap(), false);
           return true;
         } catch (Exception e) {
+          log.error("err", e);
           op.addError(getErrorStr(e));
           return false;
         }
@@ -275,6 +279,7 @@
           mgr.managedIndexSchema = mgr.managedIndexSchema.deleteFieldTypes(singleton(name));
           return true;
         } catch (Exception e) {
+          log.error("err", e);
           op.addError(getErrorStr(e));
           return false;
         }
@@ -331,6 +336,7 @@
           mgr.managedIndexSchema = mgr.managedIndexSchema.deleteDynamicFields(singleton(name));
           return true;
         } catch (Exception e) {
+          log.error("err", e);
           op.addError(getErrorStr(e));
           return false;
         }
@@ -346,6 +352,7 @@
           mgr.managedIndexSchema = mgr.managedIndexSchema.replaceFieldType(name, className, op.getDataMap());
           return true;
         } catch (Exception e) {
+          log.error("err", e);
           op.addError(getErrorStr(e));
           return false;
         }
@@ -366,6 +373,7 @@
           mgr.managedIndexSchema = mgr.managedIndexSchema.replaceField(name, ft, op.getValuesExcluding(NAME, TYPE));
           return true;
         } catch (Exception e) {
+          log.error("err", e);
           op.addError(getErrorStr(e));
           return false;
         }
diff --git a/solr/core/src/java/org/apache/solr/security/JWTAuthPlugin.java b/solr/core/src/java/org/apache/solr/security/JWTAuthPlugin.java
index fb7b9db..8b9271a 100644
--- a/solr/core/src/java/org/apache/solr/security/JWTAuthPlugin.java
+++ b/solr/core/src/java/org/apache/solr/security/JWTAuthPlugin.java
@@ -71,7 +71,6 @@
 public class JWTAuthPlugin extends AuthenticationPlugin implements SpecProvider, ConfigEditablePlugin {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
   private static final String PARAM_BLOCK_UNKNOWN = "blockUnknown";
-  private static final String PARAM_REQUIRE_SUBJECT = "requireSub";
   private static final String PARAM_REQUIRE_ISSUER = "requireIss";
   private static final String PARAM_PRINCIPAL_CLAIM = "principalClaim";
   private static final String PARAM_ROLES_CLAIM = "rolesClaim";
@@ -92,7 +91,7 @@
   static final String PRIMARY_ISSUER = "PRIMARY";
 
   private static final Set<String> PROPS = ImmutableSet.of(PARAM_BLOCK_UNKNOWN,
-      PARAM_REQUIRE_SUBJECT, PARAM_PRINCIPAL_CLAIM, PARAM_REQUIRE_EXPIRATIONTIME, PARAM_ALG_WHITELIST,
+      PARAM_PRINCIPAL_CLAIM, PARAM_REQUIRE_EXPIRATIONTIME, PARAM_ALG_WHITELIST,
       PARAM_JWK_CACHE_DURATION, PARAM_CLAIMS_MATCH, PARAM_SCOPE, PARAM_REALM, PARAM_ROLES_CLAIM,
       PARAM_ADMINUI_SCOPE, PARAM_REDIRECT_URIS, PARAM_REQUIRE_ISSUER, PARAM_ISSUERS,
       // These keys are supported for now to enable PRIMARY issuer config through top-level keys
@@ -137,10 +136,6 @@
     blockUnknown = Boolean.parseBoolean(String.valueOf(pluginConfig.getOrDefault(PARAM_BLOCK_UNKNOWN, false)));
     requireIssuer = Boolean.parseBoolean(String.valueOf(pluginConfig.getOrDefault(PARAM_REQUIRE_ISSUER, "true")));
     requireExpirationTime = Boolean.parseBoolean(String.valueOf(pluginConfig.getOrDefault(PARAM_REQUIRE_EXPIRATIONTIME, "true")));
-    if (pluginConfig.get(PARAM_REQUIRE_SUBJECT) != null) {
-      log.warn("Parameter {} is no longer used and may generate error in a later version. A subject claim is now always required",
-          PARAM_REQUIRE_SUBJECT);
-    }
     principalClaim = (String) pluginConfig.getOrDefault(PARAM_PRINCIPAL_CLAIM, "sub");
 
     rolesClaim = (String) pluginConfig.get(PARAM_ROLES_CLAIM);
@@ -500,7 +495,6 @@
     } else {
       jwtConsumerBuilder.setSkipDefaultAudienceValidation();
     }
-    jwtConsumerBuilder.setRequireSubject();
     if (requireExpirationTime)
       jwtConsumerBuilder.setRequireExpirationTime();
     if (algWhitelist != null)
diff --git a/solr/core/src/test-files/runtimecode/payload-component.jar.bin b/solr/core/src/test-files/runtimecode/payload-component.jar.bin
new file mode 100644
index 0000000..47a4305
--- /dev/null
+++ b/solr/core/src/test-files/runtimecode/payload-component.jar.bin
Binary files differ
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/TestCollectionAPIs.java b/solr/core/src/test/org/apache/solr/handler/admin/TestCollectionAPIs.java
index ff298aa..3890d85 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/TestCollectionAPIs.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/TestCollectionAPIs.java
@@ -40,6 +40,7 @@
 import org.apache.solr.common.util.Pair;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.core.CoreContainer;
+import org.apache.solr.handler.ClusterAPI;
 import org.apache.solr.request.LocalSolrQueryRequest;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
@@ -84,6 +85,10 @@
       apiBag = new ApiBag(false);
       Collection<Api> apis = collectionsHandler.getApis();
       for (Api api : apis) apiBag.register(api, Collections.emptyMap());
+
+      ClusterAPI clusterAPI = new ClusterAPI(collectionsHandler);
+      apiBag.registerObject(clusterAPI);
+      apiBag.registerObject(clusterAPI.commands);
     }
     //test a simple create collection call
     compareOutput(apiBag, "/collections", POST,
@@ -278,6 +283,11 @@
     }
 
     @Override
+    protected CoreContainer checkErrors() {
+      return null;
+    }
+
+    @Override
     protected void copyFromClusterProp(Map<String, Object> props, String prop) {
 
     }
diff --git a/solr/core/src/test/org/apache/solr/pkg/TestPackages.java b/solr/core/src/test/org/apache/solr/pkg/TestPackages.java
index 8d8583c..9473d41 100644
--- a/solr/core/src/test/org/apache/solr/pkg/TestPackages.java
+++ b/solr/core/src/test/org/apache/solr/pkg/TestPackages.java
@@ -19,13 +19,20 @@
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.Map;
 import java.util.concurrent.Callable;
 
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.lucene.analysis.util.ResourceLoader;
 import org.apache.lucene.analysis.util.ResourceLoaderAware;
-import org.apache.solr.client.solrj.*;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrResponse;
+import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettySolrRunner;
 import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
 import org.apache.solr.client.solrj.impl.HttpSolrClient;
@@ -69,9 +76,9 @@
 import static org.apache.solr.common.params.CommonParams.JAVABIN;
 import static org.apache.solr.common.params.CommonParams.WT;
 import static org.apache.solr.core.TestSolrConfigHandler.getFileContent;
+import static org.apache.solr.filestore.TestDistribPackageStore.checkAllNodesForFile;
 import static org.apache.solr.filestore.TestDistribPackageStore.readFile;
 import static org.apache.solr.filestore.TestDistribPackageStore.uploadKey;
-import static org.apache.solr.filestore.TestDistribPackageStore.checkAllNodesForFile;
 import static org.hamcrest.CoreMatchers.containsString;
 
 @LogLevel("org.apache.solr.pkg.PackageLoader=DEBUG;org.apache.solr.pkg.PackageAPI=DEBUG")
@@ -655,10 +662,14 @@
       postFileAndWait(cluster, "runtimecode/schema-plugins.jar.bin", FILE1,
               "iSRhrogDyt9P1htmSf/krh1kx9oty3TYyWm4GKHQGlb8a+X4tKCe9kKk+3tGs+bU9zq5JBZ5txNXsn96aZem5A==");
 
+      String FILE2 = "/schemapkg/payload-component.jar";
+      postFileAndWait(cluster, "runtimecode/payload-component.jar.bin", FILE2,
+          "gI6vYUDmSXSXmpNEeK1cwqrp4qTeVQgizGQkd8A4Prx2K8k7c5QlXbcs4lxFAAbbdXz9F4esBqTCiLMjVDHJ5Q==");
+
       Package.AddVersion add = new Package.AddVersion();
       add.version = "1.0";
       add.pkg = "schemapkg";
-      add.files = Arrays.asList(new String[]{FILE1});
+      add.files = Arrays.asList(FILE1,FILE2);
       V2Request req = new V2Request.Builder("/cluster/package")
               .forceV2(true)
               .withMethod(SolrRequest.METHOD.POST)
@@ -697,7 +708,7 @@
       String tokenizer =
               "        'tokenizer' : { 'class':'schemapkg:my.pkg.MyWhitespaceTokenizerFactory' },\n";
       String filters =
-              "        'filters' : [{ 'class':'solr.ASCIIFoldingFilterFactory' }]\n";
+          "        'filters' : [{ 'class':'solr.DelimitedPayloadTokenFilterFactory', 'encoder' : 'schemapkg:com.o19s.payloads.Base64Encoder'}]\n";
       String suffix = "    }\n" +
               "}}";
       cluster.getSolrClient().request(new SolrRequest(SolrRequest.METHOD.POST, "/schema") {
diff --git a/solr/core/src/test/org/apache/solr/security/JWTAuthPluginTest.java b/solr/core/src/test/org/apache/solr/security/JWTAuthPluginTest.java
index 7b04c95..9071341 100644
--- a/solr/core/src/test/org/apache/solr/security/JWTAuthPluginTest.java
+++ b/solr/core/src/test/org/apache/solr/security/JWTAuthPluginTest.java
@@ -93,6 +93,7 @@
     claims.unsetClaim("iss");
     claims.unsetClaim("aud");
     claims.unsetClaim("exp");
+    claims.setSubject(null);
     jws.setPayload(claims.toJson());
     String slimJwt = jws.getCompactSerialization();
     slimHeader = "Bearer" + " " + slimJwt;
@@ -127,6 +128,7 @@
 
     testConfig = new HashMap<>();
     testConfig.put("class", "org.apache.solr.security.JWTAuthPlugin");
+    testConfig.put("principalClaim", "customPrincipal");
     testConfig.put("jwk", testJwk);
     plugin.init(testConfig);
     
@@ -216,11 +218,25 @@
   public void authenticateOk() {
     JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(testHeader);
     assertTrue(resp.isAuthenticated());
-    assertEquals("solruser", resp.getPrincipal().getName());
+    assertEquals("custom", resp.getPrincipal().getName()); // principalClaim = customPrincipal, not sub here
   }
 
   @Test
   public void authFailedMissingSubject() {
+    minimalConfig.put("principalClaim","sub");  // minimalConfig has no subject specified
+    plugin.init(minimalConfig);
+    JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(testHeader);
+    assertFalse(resp.isAuthenticated());
+    assertEquals(JWTAuthPlugin.JWTAuthenticationResponse.AuthCode.JWT_VALIDATION_EXCEPTION, resp.getAuthCode());
+
+    testConfig.put("principalClaim","sub");  // testConfig has subject = solruser
+    plugin.init(testConfig);
+    resp = plugin.authenticate(testHeader);
+    assertTrue(resp.isAuthenticated());
+  }
+
+  @Test
+  public void authFailedMissingIssuer() {
     testConfig.put("iss", "NA");
     plugin.init(testConfig);
     JWTAuthPlugin.JWTAuthenticationResponse resp = plugin.authenticate(testHeader);
diff --git a/solr/example/files/README.md b/solr/example/files/README.md
deleted file mode 100644
index 04771be..0000000
--- a/solr/example/files/README.md
+++ /dev/null
@@ -1,167 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-# Solr-Powered File Search
-
-This README guides you through creating a Solr-powered search engine for your own set of files including Word documents,
-PDFs, HTML, and many other supported types. 
-
-For further explanations, see the frequently asked questions at the end of the guide.
-
-## GETTING STARTED
-
-* To start Solr, enter the following command (make sure you’ve cd’ed into the directory in which Solr was installed): 
-
-```
-    bin/solr start 
-```
-
-* If you’ve started correctly, you should see the following output:
-    
-```
-        Waiting to see Solr listening on port 8983 [/]  
-        Started Solr server on port 8983 (pid=<your pid>). Happy searching!
-```
-
-## CREATING THE CORE/COLLECTION
-
-* Before you can index your documents, you’ll need to create a core/collection. Do this by entering:
-
-```
-        bin/solr create -c files -d example/files/conf
-```
-
-* Now you’ve created a core called “files” using a configuration tuned for indexing and querying rich text files.
-
-* You should see the following response:
-
-```
-        Creating new core 'files' using command:
-        http://localhost:8983/solr/admin/cores?action=CREATE&name=files&instanceDir=files
-
-        {
-            "responseHeader":{
-                "status":0,
-                "QTime":239},
-            "core":"files"}
-```
-
-## INDEXING DOCUMENTS
-
-* Return to your command shell. To post all of your documents to the documents core, enter the following: 
-
-```
-        bin/post -c files ~/Documents
-```
-
-* Depending on how many documents you have, this could take a while. Sit back and watch the magic happen. When all of your documents have been indexed you’ll see something like:
-
-```
-        <some number> files indexed.
-        COMMITting Solr index changes to http://localhost:8983/solr/files/update...
-        Time spent: <some amount of time>
-```
-        
-* To see a list of accepted file types, do:
-
-```
-            bin/post -h
-```
-
-## BROWSING DOCUMENTS
-
-* Your document information can be viewed in multiple formats: XML, JSON, CSV, as well as a nice HTML interface. 
-
-* To view your document information in the HTML interface view, adjust the URL in your address bar to [http://localhost:8983/solr/files/browse](http://localhost:8983/solr/files/browse)
-
-* To view your document information in XML or other formats, add &wt (for writer type) to the end of that URL. i.e. To view your results in xml format direct your browser to:
-    [http://localhost:8983/solr/files/browse?&wt=xml](http://localhost:8983/solr/files/browse?&wt=xml)
-
-## ADMIN UI
-
-* Another way to verify that your core has been created is to view it in the Admin User Interface.
-
-    - The Admin_UI serves as a visual tool for indexing and querying your index in Solr.
-
-* To access the Admin UI, go to your browser and visit :
-    [http://localhost:8983/solr/](http://localhost:8983/solr/)
-
-    - <i>The Admin UI is only accessible when Solr is running</i>
-
-* On the left-hand side of the home page, click on “Core Selector”. The core you created, called “files” should be listed there; click on it. If it’s not listed, your core was not created and you’ll need to re-enter the create command.
-* Alternatively, you could just go to the core page directly by visiting : [http://localhost:8983/solr/#/files](http://localhost:8983/solr/#/files)
-
-* Now you’ve opened the core page. On this page there are a multitude of different tools you can use to analyze and search your core. You will make use of these features after indexing your documents.
-* Take note of the "Num Docs" field in your core Statistics. If after indexing your documents, it shows Num Docs to be 0, that means there was a problem indexing.
-
-## QUERYING INDEX
-
-* In the Admin UI, enter a term in the query box to see which documents contain the word. 
-
-* You can filter the results by switching between the different content type tabs. To view an international version of this interface, hover over the globe icon in the top right hand section of the page.
-
-* Notice the tag cloud on the right side, which facets by top phrases extracted during indexing.
-  Click on the phrases to see which documents contain them.
-
-* Another way to query the index is by manipulating the URL in your address bar once in the browse view.
-
-* i.e. : [http://localhost:8983/solr/files/browse?q=Lucene](http://localhost:8983/solr/files/browse?q=Lucene)
-
-## FAQs
-
-* Why use -d when creating a core?
-    * -d specifies a specific configuration to use.  This example as a configuration tuned for indexing and query rich
-      text files.
-    
-* How do I delete a core?
-  * To delete a core (i.e. files), you can enter the following in your command shell:
-    
-    ```
-    bin/solr delete -c files
-    ```
- 
-  * You should see the following output:
-    
-    Deleting core 'files' using command:
-        
-    ```
-    http://localhost:8983/solr/admin/cores?action=UNLOAD&core=files&deleteIndex=true&deleteDataDir=true&deleteInstanceDir=true
-    
-    {"responseHeader":{
-        "status":0,
-        "QTime":19}}
-    ```
-  
-  * This calls the Solr core admin handler, "UNLOAD", and the parameters "deleteDataDir" and "deleteInstanceDir" to ensure that all data associated with core is also removed
-
-* How can I change the /browse UI?
-
-    The primary templates are under example/files/conf/velocity.  **In order to edit those files in place (without having to
-    re-create or patch a core/collection with an updated configuration)**, Solr can be started with a special system property
-    set to the _absolute_ path to the conf/velocity directory, like this: 
-    
-    ```
-    bin/solr start -Dvelocity.template.base.dir=</full/path/to>/example/files/conf/velocity/
-    ```
- 
-If you want to adjust the browse templates for an existing collection, edit the core’s configuration
-under server/solr/files/conf/velocity.
-
-## Provenance of free images used in this example:
-
-  - Globe icon: visualpharm.com
-  - Flag icons: freeflagicons.com
\ No newline at end of file
diff --git a/solr/example/files/browse-resources/velocity/resources.properties b/solr/example/files/browse-resources/velocity/resources.properties
deleted file mode 100644
index 4cc15b2..0000000
--- a/solr/example/files/browse-resources/velocity/resources.properties
+++ /dev/null
@@ -1,82 +0,0 @@
-# Title: "<Solr logo> Powered File Search"
-powered_file_search=Powered File Search
-
-# Search box and results
-find=Find
-submit=Submit
-page_of=Page <span class="page-num">{0}</span> of <span class="page-count">{1}</span>
-previous=previous
-next=next
-results_found_in=results found in {0}ms
-results_found=results found
- 
-# Facets
-facet.top_phrases=Top Phrases
-facet.language=Language
- 
-# Type labels
-type.all=All Types
-type.doc.label=Document
-type.html.label=HTML
-type.pdf.label=PDF
-type.presentation.label=Presentation
-type.spreadsheet.label=Spreadsheet
-type.text.label=text
-type.image.label=image
-type.unknown=unknown
-
-# Language code mappings
-#   - from https://code.google.com/p/language-detection/wiki/LanguageList
-language.af=Afrikaans
-language.ar=Arabic
-language.bg=Bulgarian
-language.bn=Bengali
-language.cs=Czech
-language.da=Danish
-language.de=German
-language.el=Greek
-language.en=English
-language.es=Spanish
-language.et=Estonian
-language.fa=Persian
-language.fi=Finnish
-language.fr=French
-language.gu=Gujarati
-language.he=Hebrew
-language.hi=Hindi
-language.hr=Croatian
-language.hu=Hungarian
-language.id=Indonesian
-language.it=Italian
-language.ja=Japanese
-language.kn=Kannada
-language.ko=Korean
-language.lt=Lithuanian
-language.lv=Latvian
-language.mk=Macedonian
-language.ml=Malayalam
-language.mr=Marathi
-language.ne=Nepali
-language.nl=Dutch
-language.no=Norwegian
-language.pa=Punjabi
-language.pl=Polish
-language.pt=Portuguese
-language.ro=Romanian
-language.ru=Russian
-language.sk=Slovak
-language.sl=Slovene
-language.so=Somali
-language.sq=Albanian
-language.sv=Swedish
-language.sw=Swahili
-language.ta=Tamil
-language.te=Telugu
-language.th=Thai
-language.tl=Tagalog
-language.tr=Turkish
-language.uk=Ukrainian
-language.ur=Urdu
-language.vi=Vietnamese
-language.zh-cn=Simplified Chinese
-language.zh-tw=Traditional Chinese
diff --git a/solr/example/files/browse-resources/velocity/resources_de_DE.properties b/solr/example/files/browse-resources/velocity/resources_de_DE.properties
deleted file mode 100644
index 1837bf5..0000000
--- a/solr/example/files/browse-resources/velocity/resources_de_DE.properties
+++ /dev/null
@@ -1,18 +0,0 @@
-find=Durchsuchen
-page_of=Page <span class="page-num">{0}</span> von <span class="page-count">{1}</span>
-previous=vorherige Seite
-next=n\u00e4chste Seite
-results_found_in=Ergebnisse in {0}ms gefunden
-results_found=Ergebnisse gefunden
-powered_file_search= betriebene Dateisuche
-type.text.label=Text
-type.pdf.label=PDF
-type.html.label=HTML
-type.presentation.label=Pr\u00e4sentation
-type.image.label=Bild
-type.doc.label=Dokument
-type.spreadsheet.label=Kalkulationstabelle
-type.unknown=unbekannt
-type.all=alle Arten
-facet.top_phrases=Schl\u00fcssels\u00e4tze
-submit=einreichen
diff --git a/solr/example/files/browse-resources/velocity/resources_fr_FR.properties b/solr/example/files/browse-resources/velocity/resources_fr_FR.properties
deleted file mode 100644
index 5b62757..0000000
--- a/solr/example/files/browse-resources/velocity/resources_fr_FR.properties
+++ /dev/null
@@ -1,20 +0,0 @@
-find=Recherche
-page_of=Page <span class="page-num">{0}</span> de <span class="page-count">{1}</span>
-previous=pr\u00e9c\u00e9dent
-next=suivant
-results_found_in=resultas ficher en {0}ms
-results_found=resultas ficher
-powered_file_search=Recherches de Fichiers
-type.text.label=Texte
-type.pdf.label=PDF
-type.html.label=HTML
-type.image.label=Image
-type.presentation.label=Pr\u00e9sentation
-type.doc.label=Documents
-type.spreadsheet.label=Tableur
-type.unknown=Inconnu
-type.all=Tous les Types
-facet.top_phrases=Phrases Cl\u00e9s
-submit=Recherche
-
-
diff --git a/solr/example/files/conf/currency.xml b/solr/example/files/conf/currency.xml
deleted file mode 100644
index 3a9c58a..0000000
--- a/solr/example/files/conf/currency.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- Example exchange rates file for CurrencyField type named "currency" in example schema -->
-
-<currencyConfig version="1.0">
-  <rates>
-    <!-- Updated from http://www.exchangerate.com/ at 2011-09-27 -->
-    <rate from="USD" to="ARS" rate="4.333871" comment="ARGENTINA Peso" />
-    <rate from="USD" to="AUD" rate="1.025768" comment="AUSTRALIA Dollar" />
-    <rate from="USD" to="EUR" rate="0.743676" comment="European Euro" />
-    <rate from="USD" to="BRL" rate="1.881093" comment="BRAZIL Real" />
-    <rate from="USD" to="CAD" rate="1.030815" comment="CANADA Dollar" />
-    <rate from="USD" to="CLP" rate="519.0996" comment="CHILE Peso" />
-    <rate from="USD" to="CNY" rate="6.387310" comment="CHINA Yuan" />
-    <rate from="USD" to="CZK" rate="18.47134" comment="CZECH REP. Koruna" />
-    <rate from="USD" to="DKK" rate="5.515436" comment="DENMARK Krone" />
-    <rate from="USD" to="HKD" rate="7.801922" comment="HONG KONG Dollar" />
-    <rate from="USD" to="HUF" rate="215.6169" comment="HUNGARY Forint" />
-    <rate from="USD" to="ISK" rate="118.1280" comment="ICELAND Krona" />
-    <rate from="USD" to="INR" rate="49.49088" comment="INDIA Rupee" />
-    <rate from="USD" to="XDR" rate="0.641358" comment="INTNL MON. FUND SDR" />
-    <rate from="USD" to="ILS" rate="3.709739" comment="ISRAEL Sheqel" />
-    <rate from="USD" to="JPY" rate="76.32419" comment="JAPAN Yen" />
-    <rate from="USD" to="KRW" rate="1169.173" comment="KOREA (SOUTH) Won" />
-    <rate from="USD" to="KWD" rate="0.275142" comment="KUWAIT Dinar" />
-    <rate from="USD" to="MXN" rate="13.85895" comment="MEXICO Peso" />
-    <rate from="USD" to="NZD" rate="1.285159" comment="NEW ZEALAND Dollar" />
-    <rate from="USD" to="NOK" rate="5.859035" comment="NORWAY Krone" />
-    <rate from="USD" to="PKR" rate="87.57007" comment="PAKISTAN Rupee" />
-    <rate from="USD" to="PEN" rate="2.730683" comment="PERU Sol" />
-    <rate from="USD" to="PHP" rate="43.62039" comment="PHILIPPINES Peso" />
-    <rate from="USD" to="PLN" rate="3.310139" comment="POLAND Zloty" />
-    <rate from="USD" to="RON" rate="3.100932" comment="ROMANIA Leu" />
-    <rate from="USD" to="RUB" rate="32.14663" comment="RUSSIA Ruble" />
-    <rate from="USD" to="SAR" rate="3.750465" comment="SAUDI ARABIA Riyal" />
-    <rate from="USD" to="SGD" rate="1.299352" comment="SINGAPORE Dollar" />
-    <rate from="USD" to="ZAR" rate="8.329761" comment="SOUTH AFRICA Rand" />
-    <rate from="USD" to="SEK" rate="6.883442" comment="SWEDEN Krona" />
-    <rate from="USD" to="CHF" rate="0.906035" comment="SWITZERLAND Franc" />
-    <rate from="USD" to="TWD" rate="30.40283" comment="TAIWAN Dollar" />
-    <rate from="USD" to="THB" rate="30.89487" comment="THAILAND Baht" />
-    <rate from="USD" to="AED" rate="3.672955" comment="U.A.E. Dirham" />
-    <rate from="USD" to="UAH" rate="7.988582" comment="UKRAINE Hryvnia" />
-    <rate from="USD" to="GBP" rate="0.647910" comment="UNITED KINGDOM Pound" />
-    
-    <!-- Cross-rates for some common currencies -->
-    <rate from="EUR" to="GBP" rate="0.869914" />  
-    <rate from="EUR" to="NOK" rate="7.800095" />  
-    <rate from="GBP" to="NOK" rate="8.966508" />  
-  </rates>
-</currencyConfig>
diff --git a/solr/example/files/conf/elevate.xml b/solr/example/files/conf/elevate.xml
deleted file mode 100644
index 2c09ebe..0000000
--- a/solr/example/files/conf/elevate.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- If this file is found in the config directory, it will only be
-     loaded once at startup.  If it is found in Solr's data
-     directory, it will be re-loaded every commit.
-
-   See http://wiki.apache.org/solr/QueryElevationComponent for more info
-
--->
-<elevate>
- <!-- Query elevation examples
-  <query text="foo bar">
-    <doc id="1" />
-    <doc id="2" />
-    <doc id="3" />
-  </query>
-
-for use with techproducts example
- 
-  <query text="ipod">
-    <doc id="MA147LL/A" />  put the actual ipod at the top 
-    <doc id="IW-02" exclude="true" /> exclude this cable
-  </query>
--->
-
-</elevate>
diff --git a/solr/example/files/conf/email_url_types.txt b/solr/example/files/conf/email_url_types.txt
deleted file mode 100644
index 622b193..0000000
--- a/solr/example/files/conf/email_url_types.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-<URL>
-<EMAIL>
diff --git a/solr/example/files/conf/lang/contractions_ca.txt b/solr/example/files/conf/lang/contractions_ca.txt
deleted file mode 100644
index 307a85f..0000000
--- a/solr/example/files/conf/lang/contractions_ca.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-# Set of Catalan contractions for ElisionFilter
-# TODO: load this as a resource from the analyzer and sync it in build.xml
-d
-l
-m
-n
-s
-t
diff --git a/solr/example/files/conf/lang/contractions_fr.txt b/solr/example/files/conf/lang/contractions_fr.txt
deleted file mode 100644
index f1bba51..0000000
--- a/solr/example/files/conf/lang/contractions_fr.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-# Set of French contractions for ElisionFilter
-# TODO: load this as a resource from the analyzer and sync it in build.xml
-l
-m
-t
-qu
-n
-s
-j
-d
-c
-jusqu
-quoiqu
-lorsqu
-puisqu
diff --git a/solr/example/files/conf/lang/contractions_ga.txt b/solr/example/files/conf/lang/contractions_ga.txt
deleted file mode 100644
index 9ebe7fa..0000000
--- a/solr/example/files/conf/lang/contractions_ga.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-# Set of Irish contractions for ElisionFilter
-# TODO: load this as a resource from the analyzer and sync it in build.xml
-d
-m
-b
diff --git a/solr/example/files/conf/lang/contractions_it.txt b/solr/example/files/conf/lang/contractions_it.txt
deleted file mode 100644
index cac0409..0000000
--- a/solr/example/files/conf/lang/contractions_it.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# Set of Italian contractions for ElisionFilter
-# TODO: load this as a resource from the analyzer and sync it in build.xml
-c
-l 
-all 
-dall 
-dell 
-nell 
-sull 
-coll 
-pell 
-gl 
-agl 
-dagl 
-degl 
-negl 
-sugl 
-un 
-m 
-t 
-s 
-v 
-d
diff --git a/solr/example/files/conf/lang/hyphenations_ga.txt b/solr/example/files/conf/lang/hyphenations_ga.txt
deleted file mode 100644
index 4d2642c..0000000
--- a/solr/example/files/conf/lang/hyphenations_ga.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-# Set of Irish hyphenations for StopFilter
-# TODO: load this as a resource from the analyzer and sync it in build.xml
-h
-n
-t
diff --git a/solr/example/files/conf/lang/stemdict_nl.txt b/solr/example/files/conf/lang/stemdict_nl.txt
deleted file mode 100644
index 4410729..0000000
--- a/solr/example/files/conf/lang/stemdict_nl.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-# Set of overrides for the dutch stemmer
-# TODO: load this as a resource from the analyzer and sync it in build.xml
-fiets	fiets
-bromfiets	bromfiets
-ei	eier
-kind	kinder
diff --git a/solr/example/files/conf/lang/stoptags_ja.txt b/solr/example/files/conf/lang/stoptags_ja.txt
deleted file mode 100644
index 71b7508..0000000
--- a/solr/example/files/conf/lang/stoptags_ja.txt
+++ /dev/null
@@ -1,420 +0,0 @@
-#
-# This file defines a Japanese stoptag set for JapanesePartOfSpeechStopFilter.
-#
-# Any token with a part-of-speech tag that exactly matches those defined in this
-# file are removed from the token stream.
-#
-# Set your own stoptags by uncommenting the lines below.  Note that comments are
-# not allowed on the same line as a stoptag.  See LUCENE-3745 for frequency lists,
-# etc. that can be useful for building you own stoptag set.
-#
-# The entire possible tagset is provided below for convenience.
-#
-#####
-#  noun: unclassified nouns
-#名詞
-#
-#  noun-common: Common nouns or nouns where the sub-classification is undefined
-#名詞-一般
-#
-#  noun-proper: Proper nouns where the sub-classification is undefined 
-#名詞-固有名詞
-#
-#  noun-proper-misc: miscellaneous proper nouns
-#名詞-固有名詞-一般
-#
-#  noun-proper-person: Personal names where the sub-classification is undefined
-#名詞-固有名詞-人名
-#
-#  noun-proper-person-misc: names that cannot be divided into surname and 
-#  given name; foreign names; names where the surname or given name is unknown.
-#  e.g. お市の方
-#名詞-固有名詞-人名-一般
-#
-#  noun-proper-person-surname: Mainly Japanese surnames.
-#  e.g. 山田
-#名詞-固有名詞-人名-姓
-#
-#  noun-proper-person-given_name: Mainly Japanese given names.
-#  e.g. 太郎
-#名詞-固有名詞-人名-名
-#
-#  noun-proper-organization: Names representing organizations.
-#  e.g. 通産省, NHK
-#名詞-固有名詞-組織
-#
-#  noun-proper-place: Place names where the sub-classification is undefined
-#名詞-固有名詞-地域
-#
-#  noun-proper-place-misc: Place names excluding countries.
-#  e.g. アジア, バルセロナ, 京都
-#名詞-固有名詞-地域-一般
-#
-#  noun-proper-place-country: Country names. 
-#  e.g. 日本, オーストラリア
-#名詞-固有名詞-地域-国
-#
-#  noun-pronoun: Pronouns where the sub-classification is undefined
-#名詞-代名詞
-#
-#  noun-pronoun-misc: miscellaneous pronouns: 
-#  e.g. それ, ここ, あいつ, あなた, あちこち, いくつ, どこか, なに, みなさん, みんな, わたくし, われわれ
-#名詞-代名詞-一般
-#
-#  noun-pronoun-contraction: Spoken language contraction made by combining a 
-#  pronoun and the particle 'wa'.
-#  e.g. ありゃ, こりゃ, こりゃあ, そりゃ, そりゃあ 
-#名詞-代名詞-縮約
-#
-#  noun-adverbial: Temporal nouns such as names of days or months that behave 
-#  like adverbs. Nouns that represent amount or ratios and can be used adverbially,
-#  e.g. 金曜, 一月, 午後, 少量
-#名詞-副詞可能
-#
-#  noun-verbal: Nouns that take arguments with case and can appear followed by 
-#  'suru' and related verbs (する, できる, なさる, くださる)
-#  e.g. インプット, 愛着, 悪化, 悪戦苦闘, 一安心, 下取り
-#名詞-サ変接続
-#
-#  noun-adjective-base: The base form of adjectives, words that appear before な ("na")
-#  e.g. 健康, 安易, 駄目, だめ
-#名詞-形容動詞語幹
-#
-#  noun-numeric: Arabic numbers, Chinese numerals, and counters like 何 (回), 数.
-#  e.g. 0, 1, 2, 何, 数, 幾
-#名詞-数
-#
-#  noun-affix: noun affixes where the sub-classification is undefined
-#名詞-非自立
-#
-#  noun-affix-misc: Of adnominalizers, the case-marker の ("no"), and words that 
-#  attach to the base form of inflectional words, words that cannot be classified 
-#  into any of the other categories below. This category includes indefinite nouns.
-#  e.g. あかつき, 暁, かい, 甲斐, 気, きらい, 嫌い, くせ, 癖, こと, 事, ごと, 毎, しだい, 次第, 
-#       順, せい, 所為, ついで, 序で, つもり, 積もり, 点, どころ, の, はず, 筈, はずみ, 弾み, 
-#       拍子, ふう, ふり, 振り, ほう, 方, 旨, もの, 物, 者, ゆえ, 故, ゆえん, 所以, わけ, 訳,
-#       わり, 割り, 割, ん-口語/, もん-口語/
-#名詞-非自立-一般
-#
-#  noun-affix-adverbial: noun affixes that that can behave as adverbs.
-#  e.g. あいだ, 間, あげく, 挙げ句, あと, 後, 余り, 以外, 以降, 以後, 以上, 以前, 一方, うえ, 
-#       上, うち, 内, おり, 折り, かぎり, 限り, きり, っきり, 結果, ころ, 頃, さい, 際, 最中, さなか, 
-#       最中, じたい, 自体, たび, 度, ため, 為, つど, 都度, とおり, 通り, とき, 時, ところ, 所, 
-#       とたん, 途端, なか, 中, のち, 後, ばあい, 場合, 日, ぶん, 分, ほか, 他, まえ, 前, まま, 
-#       儘, 侭, みぎり, 矢先
-#名詞-非自立-副詞可能
-#
-#  noun-affix-aux: noun affixes treated as 助動詞 ("auxiliary verb") in school grammars 
-#  with the stem よう(だ) ("you(da)").
-#  e.g.  よう, やう, 様 (よう)
-#名詞-非自立-助動詞語幹
-#  
-#  noun-affix-adjective-base: noun affixes that can connect to the indeclinable
-#  connection form な (aux "da").
-#  e.g. みたい, ふう
-#名詞-非自立-形容動詞語幹
-#
-#  noun-special: special nouns where the sub-classification is undefined.
-#名詞-特殊
-#
-#  noun-special-aux: The そうだ ("souda") stem form that is used for reporting news, is 
-#  treated as 助動詞 ("auxiliary verb") in school grammars, and attach to the base 
-#  form of inflectional words.
-#  e.g. そう
-#名詞-特殊-助動詞語幹
-#
-#  noun-suffix: noun suffixes where the sub-classification is undefined.
-#名詞-接尾
-#
-#  noun-suffix-misc: Of the nouns or stem forms of other parts of speech that connect 
-#  to ガル or タイ and can combine into compound nouns, words that cannot be classified into
-#  any of the other categories below. In general, this category is more inclusive than 
-#  接尾語 ("suffix") and is usually the last element in a compound noun.
-#  e.g. おき, かた, 方, 甲斐 (がい), がかり, ぎみ, 気味, ぐるみ, (~した) さ, 次第, 済 (ず) み,
-#       よう, (でき)っこ, 感, 観, 性, 学, 類, 面, 用
-#名詞-接尾-一般
-#
-#  noun-suffix-person: Suffixes that form nouns and attach to person names more often
-#  than other nouns.
-#  e.g. 君, 様, 著
-#名詞-接尾-人名
-#
-#  noun-suffix-place: Suffixes that form nouns and attach to place names more often 
-#  than other nouns.
-#  e.g. 町, 市, 県
-#名詞-接尾-地域
-#
-#  noun-suffix-verbal: Of the suffixes that attach to nouns and form nouns, those that 
-#  can appear before スル ("suru").
-#  e.g. 化, 視, 分け, 入り, 落ち, 買い
-#名詞-接尾-サ変接続
-#
-#  noun-suffix-aux: The stem form of そうだ (様態) that is used to indicate conditions, 
-#  is treated as 助動詞 ("auxiliary verb") in school grammars, and attach to the 
-#  conjunctive form of inflectional words.
-#  e.g. そう
-#名詞-接尾-助動詞語幹
-#
-#  noun-suffix-adjective-base: Suffixes that attach to other nouns or the conjunctive 
-#  form of inflectional words and appear before the copula だ ("da").
-#  e.g. 的, げ, がち
-#名詞-接尾-形容動詞語幹
-#
-#  noun-suffix-adverbial: Suffixes that attach to other nouns and can behave as adverbs.
-#  e.g. 後 (ご), 以後, 以降, 以前, 前後, 中, 末, 上, 時 (じ)
-#名詞-接尾-副詞可能
-#
-#  noun-suffix-classifier: Suffixes that attach to numbers and form nouns. This category 
-#  is more inclusive than 助数詞 ("classifier") and includes common nouns that attach 
-#  to numbers.
-#  e.g. 個, つ, 本, 冊, パーセント, cm, kg, カ月, か国, 区画, 時間, 時半
-#名詞-接尾-助数詞
-#
-#  noun-suffix-special: Special suffixes that mainly attach to inflecting words.
-#  e.g. (楽し) さ, (考え) 方
-#名詞-接尾-特殊
-#
-#  noun-suffix-conjunctive: Nouns that behave like conjunctions and join two words 
-#  together.
-#  e.g. (日本) 対 (アメリカ), 対 (アメリカ), (3) 対 (5), (女優) 兼 (主婦)
-#名詞-接続詞的
-#
-#  noun-verbal_aux: Nouns that attach to the conjunctive particle て ("te") and are 
-#  semantically verb-like.
-#  e.g. ごらん, ご覧, 御覧, 頂戴
-#名詞-動詞非自立的
-#
-#  noun-quotation: text that cannot be segmented into words, proverbs, Chinese poetry, 
-#  dialects, English, etc. Currently, the only entry for 名詞 引用文字列 ("noun quotation") 
-#  is いわく ("iwaku").
-#名詞-引用文字列
-#
-#  noun-nai_adjective: Words that appear before the auxiliary verb ない ("nai") and
-#  behave like an adjective.
-#  e.g. 申し訳, 仕方, とんでも, 違い
-#名詞-ナイ形容詞語幹
-#
-#####
-#  prefix: unclassified prefixes
-#接頭詞
-#
-#  prefix-nominal: Prefixes that attach to nouns (including adjective stem forms) 
-#  excluding numerical expressions.
-#  e.g. お (水), 某 (氏), 同 (社), 故 (~氏), 高 (品質), お (見事), ご (立派)
-#接頭詞-名詞接続
-#
-#  prefix-verbal: Prefixes that attach to the imperative form of a verb or a verb
-#  in conjunctive form followed by なる/なさる/くださる.
-#  e.g. お (読みなさい), お (座り)
-#接頭詞-動詞接続
-#
-#  prefix-adjectival: Prefixes that attach to adjectives.
-#  e.g. お (寒いですねえ), バカ (でかい)
-#接頭詞-形容詞接続
-#
-#  prefix-numerical: Prefixes that attach to numerical expressions.
-#  e.g. 約, およそ, 毎時
-#接頭詞-数接続
-#
-#####
-#  verb: unclassified verbs
-#動詞
-#
-#  verb-main:
-#動詞-自立
-#
-#  verb-auxiliary:
-#動詞-非自立
-#
-#  verb-suffix:
-#動詞-接尾
-#
-#####
-#  adjective: unclassified adjectives
-#形容詞
-#
-#  adjective-main:
-#形容詞-自立
-#
-#  adjective-auxiliary:
-#形容詞-非自立
-#
-#  adjective-suffix:
-#形容詞-接尾
-#
-#####
-#  adverb: unclassified adverbs
-#副詞
-#
-#  adverb-misc: Words that can be segmented into one unit and where adnominal 
-#  modification is not possible.
-#  e.g. あいかわらず, 多分
-#副詞-一般
-#
-#  adverb-particle_conjunction: Adverbs that can be followed by の, は, に, 
-#  な, する, だ, etc.
-#  e.g. こんなに, そんなに, あんなに, なにか, なんでも
-#副詞-助詞類接続
-#
-#####
-#  adnominal: Words that only have noun-modifying forms.
-#  e.g. この, その, あの, どの, いわゆる, なんらかの, 何らかの, いろんな, こういう, そういう, ああいう, 
-#       どういう, こんな, そんな, あんな, どんな, 大きな, 小さな, おかしな, ほんの, たいした, 
-#       「(, も) さる (ことながら)」, 微々たる, 堂々たる, 単なる, いかなる, 我が」「同じ, 亡き
-#連体詞
-#
-#####
-#  conjunction: Conjunctions that can occur independently.
-#  e.g. が, けれども, そして, じゃあ, それどころか
-接続詞
-#
-#####
-#  particle: unclassified particles.
-助詞
-#
-#  particle-case: case particles where the subclassification is undefined.
-助詞-格助詞
-#
-#  particle-case-misc: Case particles.
-#  e.g. から, が, で, と, に, へ, より, を, の, にて
-助詞-格助詞-一般
-#
-#  particle-case-quote: the "to" that appears after nouns, a person’s speech, 
-#  quotation marks, expressions of decisions from a meeting, reasons, judgements,
-#  conjectures, etc.
-#  e.g. ( だ) と (述べた.), ( である) と (して執行猶予...)
-助詞-格助詞-引用
-#
-#  particle-case-compound: Compounds of particles and verbs that mainly behave 
-#  like case particles.
-#  e.g. という, といった, とかいう, として, とともに, と共に, でもって, にあたって, に当たって, に当って,
-#       にあたり, に当たり, に当り, に当たる, にあたる, において, に於いて,に於て, における, に於ける, 
-#       にかけ, にかけて, にかんし, に関し, にかんして, に関して, にかんする, に関する, に際し, 
-#       に際して, にしたがい, に従い, に従う, にしたがって, に従って, にたいし, に対し, にたいして, 
-#       に対して, にたいする, に対する, について, につき, につけ, につけて, につれ, につれて, にとって,
-#       にとり, にまつわる, によって, に依って, に因って, により, に依り, に因り, による, に依る, に因る, 
-#       にわたって, にわたる, をもって, を以って, を通じ, を通じて, を通して, をめぐって, をめぐり, をめぐる,
-#       って-口語/, ちゅう-関西弁「という」/, (何) ていう (人)-口語/, っていう-口語/, といふ, とかいふ
-助詞-格助詞-連語
-#
-#  particle-conjunctive:
-#  e.g. から, からには, が, けれど, けれども, けど, し, つつ, て, で, と, ところが, どころか, とも, ども, 
-#       ながら, なり, ので, のに, ば, ものの, や ( した), やいなや, (ころん) じゃ(いけない)-口語/, 
-#       (行っ) ちゃ(いけない)-口語/, (言っ) たって (しかたがない)-口語/, (それがなく)ったって (平気)-口語/
-助詞-接続助詞
-#
-#  particle-dependency:
-#  e.g. こそ, さえ, しか, すら, は, も, ぞ
-助詞-係助詞
-#
-#  particle-adverbial:
-#  e.g. がてら, かも, くらい, 位, ぐらい, しも, (学校) じゃ(これが流行っている)-口語/, 
-#       (それ)じゃあ (よくない)-口語/, ずつ, (私) なぞ, など, (私) なり (に), (先生) なんか (大嫌い)-口語/,
-#       (私) なんぞ, (先生) なんて (大嫌い)-口語/, のみ, だけ, (私) だって-口語/, だに, 
-#       (彼)ったら-口語/, (お茶) でも (いかが), 等 (とう), (今後) とも, ばかり, ばっか-口語/, ばっかり-口語/,
-#       ほど, 程, まで, 迄, (誰) も (が)([助詞-格助詞] および [助詞-係助詞] の前に位置する「も」)
-助詞-副助詞
-#
-#  particle-interjective: particles with interjective grammatical roles.
-#  e.g. (松島) や
-助詞-間投助詞
-#
-#  particle-coordinate:
-#  e.g. と, たり, だの, だり, とか, なり, や, やら
-助詞-並立助詞
-#
-#  particle-final:
-#  e.g. かい, かしら, さ, ぜ, (だ)っけ-口語/, (とまってる) で-方言/, な, ナ, なあ-口語/, ぞ, ね, ネ, 
-#       ねぇ-口語/, ねえ-口語/, ねん-方言/, の, のう-口語/, や, よ, ヨ, よぉ-口語/, わ, わい-口語/
-助詞-終助詞
-#
-#  particle-adverbial/conjunctive/final: The particle "ka" when unknown whether it is 
-#  adverbial, conjunctive, or sentence final. For example:
-#       (a) 「A か B か」. Ex:「(国内で運用する) か,(海外で運用する) か (.)」
-#       (b) Inside an adverb phrase. Ex:「(幸いという) か (, 死者はいなかった.)」
-#           「(祈りが届いたせい) か (, 試験に合格した.)」
-#       (c) 「かのように」. Ex:「(何もなかった) か (のように振る舞った.)」
-#  e.g. か
-助詞-副助詞/並立助詞/終助詞
-#
-#  particle-adnominalizer: The "no" that attaches to nouns and modifies 
-#  non-inflectional words.
-助詞-連体化
-#
-#  particle-adnominalizer: The "ni" and "to" that appear following nouns and adverbs 
-#  that are giongo, giseigo, or gitaigo.
-#  e.g. に, と
-助詞-副詞化
-#
-#  particle-special: A particle that does not fit into one of the above classifications. 
-#  This includes particles that are used in Tanka, Haiku, and other poetry.
-#  e.g. かな, けむ, ( しただろう) に, (あんた) にゃ(わからん), (俺) ん (家)
-助詞-特殊
-#
-#####
-#  auxiliary-verb:
-助動詞
-#
-#####
-#  interjection: Greetings and other exclamations.
-#  e.g. おはよう, おはようございます, こんにちは, こんばんは, ありがとう, どうもありがとう, ありがとうございます, 
-#       いただきます, ごちそうさま, さよなら, さようなら, はい, いいえ, ごめん, ごめんなさい
-#感動詞
-#
-#####
-#  symbol: unclassified Symbols.
-記号
-#
-#  symbol-misc: A general symbol not in one of the categories below.
-#  e.g. [○◎@$〒→+]
-記号-一般
-#
-#  symbol-comma: Commas
-#  e.g. [,、]
-記号-読点
-#
-#  symbol-period: Periods and full stops.
-#  e.g. [..。]
-記号-句点
-#
-#  symbol-space: Full-width whitespace.
-記号-空白
-#
-#  symbol-open_bracket:
-#  e.g. [({‘“『【]
-記号-括弧開
-#
-#  symbol-close_bracket:
-#  e.g. [)}’”』」】]
-記号-括弧閉
-#
-#  symbol-alphabetic:
-#記号-アルファベット
-#
-#####
-#  other: unclassified other
-#その他
-#
-#  other-interjection: Words that are hard to classify as noun-suffixes or 
-#  sentence-final particles.
-#  e.g. (だ)ァ
-その他-間投
-#
-#####
-#  filler: Aizuchi that occurs during a conversation or sounds inserted as filler.
-#  e.g. あの, うんと, えと
-フィラー
-#
-#####
-#  non-verbal: non-verbal sound.
-非言語音
-#
-#####
-#  fragment:
-#語断片
-#
-#####
-#  unknown: unknown part of speech.
-#未知語
-#
-##### End of file
diff --git a/solr/example/files/conf/lang/stopwords_ar.txt b/solr/example/files/conf/lang/stopwords_ar.txt
deleted file mode 100644
index 046829d..0000000
--- a/solr/example/files/conf/lang/stopwords_ar.txt
+++ /dev/null
@@ -1,125 +0,0 @@
-# This file was created by Jacques Savoy and is distributed under the BSD license.
-# See http://members.unine.ch/jacques.savoy/clef/index.html.
-# Also see http://www.opensource.org/licenses/bsd-license.html
-# Cleaned on October 11, 2009 (not normalized, so use before normalization)
-# This means that when modifying this list, you might need to add some 
-# redundant entries, for example containing forms with both أ and ا
-من
-ومن
-منها
-منه
-في
-وفي
-فيها
-فيه


-ثم
-او
-أو

-بها
-به


-اى
-اي
-أي
-أى
-لا
-ولا
-الا
-ألا
-إلا
-لكن
-ما
-وما
-كما
-فما
-عن
-مع
-اذا
-إذا
-ان
-أن
-إن
-انها
-أنها
-إنها
-انه
-أنه
-إنه
-بان
-بأن
-فان
-فأن
-وان
-وأن
-وإن
-التى
-التي
-الذى
-الذي
-الذين
-الى
-الي
-إلى
-إلي
-على
-عليها
-عليه
-اما
-أما
-إما
-ايضا
-أيضا
-كل
-وكل
-لم
-ولم
-لن
-ولن
-هى
-هي
-هو
-وهى
-وهي
-وهو
-فهى
-فهي
-فهو
-انت
-أنت
-لك
-لها
-له
-هذه
-هذا
-تلك
-ذلك
-هناك
-كانت
-كان
-يكون
-تكون
-وكانت
-وكان
-غير
-بعض
-قد
-نحو
-بين
-بينما
-منذ
-ضمن
-حيث
-الان
-الآن
-خلال
-بعد
-قبل
-حتى
-عند
-عندما
-لدى
-جميع
diff --git a/solr/example/files/conf/lang/stopwords_bg.txt b/solr/example/files/conf/lang/stopwords_bg.txt
deleted file mode 100644
index 1ae4ba2..0000000
--- a/solr/example/files/conf/lang/stopwords_bg.txt
+++ /dev/null
@@ -1,193 +0,0 @@
-# This file was created by Jacques Savoy and is distributed under the BSD license.
-# See http://members.unine.ch/jacques.savoy/clef/index.html.
-# Also see http://www.opensource.org/licenses/bsd-license.html

-аз
-ако
-ала
-бе
-без
-беше
-би
-бил
-била
-били
-било
-близо
-бъдат
-бъде
-бяха

-вас
-ваш
-ваша
-вероятно
-вече
-взема
-ви
-вие
-винаги
-все
-всеки
-всички
-всичко
-всяка
-във
-въпреки
-върху

-ги
-главно
-го

-да
-дали
-до
-докато
-докога
-дори
-досега
-доста

-едва
-един
-ето
-за
-зад
-заедно
-заради
-засега
-затова
-защо
-защото

-из
-или
-им
-има
-имат
-иска

-каза
-как
-каква
-какво
-както
-какъв
-като
-кога
-когато
-което
-които
-кой
-който
-колко
-която
-къде
-където
-към
-ли

-ме
-между
-мен
-ми
-мнозина
-мога
-могат
-може
-моля
-момента
-му

-на
-над
-назад
-най
-направи
-напред
-например
-нас
-не
-него
-нея
-ни
-ние
-никой
-нито
-но
-някои
-някой
-няма
-обаче
-около
-освен
-особено
-от
-отгоре
-отново
-още
-пак
-по
-повече
-повечето
-под
-поне
-поради
-после
-почти
-прави
-пред
-преди
-през
-при
-пък
-първо

-са
-само
-се
-сега
-си
-скоро
-след
-сме
-според
-сред
-срещу
-сте
-съм
-със
-също

-тази
-така
-такива
-такъв
-там
-твой
-те
-тези
-ти
-тн
-то
-това
-тогава
-този
-той
-толкова
-точно
-трябва
-тук
-тъй
-тя
-тях

-харесва

-че
-често
-чрез
-ще
-щом

diff --git a/solr/example/files/conf/lang/stopwords_ca.txt b/solr/example/files/conf/lang/stopwords_ca.txt
deleted file mode 100644
index 3da65de..0000000
--- a/solr/example/files/conf/lang/stopwords_ca.txt
+++ /dev/null
@@ -1,220 +0,0 @@
-# Catalan stopwords from http://github.com/vcl/cue.language (Apache 2 Licensed)
-a
-abans
-ací
-ah
-així
-això
-al
-als
-aleshores
-algun
-alguna
-algunes
-alguns
-alhora
-allà
-allí
-allò
-altra
-altre
-altres
-amb
-ambdós
-ambdues
-apa
-aquell
-aquella
-aquelles
-aquells
-aquest
-aquesta
-aquestes
-aquests
-aquí
-baix
-cada
-cadascú
-cadascuna
-cadascunes
-cadascuns
-com
-contra
-d'un
-d'una
-d'unes
-d'uns
-dalt
-de
-del
-dels
-des
-després
-dins
-dintre
-donat
-doncs
-durant
-e
-eh
-el
-els
-em
-en
-encara
-ens
-entre
-érem
-eren
-éreu
-es
-és
-esta
-està
-estàvem
-estaven
-estàveu
-esteu
-et
-etc
-ets
-fins
-fora
-gairebé
-ha
-han
-has
-havia
-he
-hem
-heu
-hi 
-ho
-i
-igual
-iguals
-ja
-l'hi
-la
-les
-li
-li'n
-llavors
-m'he
-ma
-mal
-malgrat
-mateix
-mateixa
-mateixes
-mateixos
-me
-mentre
-més
-meu
-meus
-meva
-meves
-molt
-molta
-moltes
-molts
-mon
-mons
-n'he
-n'hi
-ne
-ni
-no
-nogensmenys
-només
-nosaltres
-nostra
-nostre
-nostres
-o
-oh
-oi
-on
-pas
-pel
-pels
-per
-però
-perquè
-poc 
-poca
-pocs
-poques
-potser
-propi
-qual
-quals
-quan
-quant 
-que
-què
-quelcom
-qui
-quin
-quina
-quines
-quins
-s'ha
-s'han
-sa
-semblant
-semblants
-ses
-seu 
-seus
-seva
-seva
-seves
-si
-sobre
-sobretot
-sóc
-solament
-sols
-son 
-són
-sons 
-sota
-sou
-t'ha
-t'han
-t'he
-ta
-tal
-també
-tampoc
-tan
-tant
-tanta
-tantes
-teu
-teus
-teva
-teves
-ton
-tons
-tot
-tota
-totes
-tots
-un
-una
-unes
-uns
-us
-va
-vaig
-vam
-van
-vas
-veu
-vosaltres
-vostra
-vostre
-vostres
diff --git a/solr/example/files/conf/lang/stopwords_cz.txt b/solr/example/files/conf/lang/stopwords_cz.txt
deleted file mode 100644
index 53c6097..0000000
--- a/solr/example/files/conf/lang/stopwords_cz.txt
+++ /dev/null
@@ -1,172 +0,0 @@
-a
-s
-k
-o
-i
-u
-v
-z
-dnes
-cz
-tímto
-budeš
-budem
-byli
-jseš
-můj
-svým
-ta
-tomto
-tohle
-tuto
-tyto
-jej
-zda
-proč
-máte
-tato
-kam
-tohoto
-kdo
-kteří
-mi
-nám
-tom
-tomuto
-mít
-nic
-proto
-kterou
-byla
-toho
-protože
-asi
-ho
-naši
-napište
-re
-což
-tím
-takže
-svých
-její
-svými
-jste
-aj
-tu
-tedy
-teto
-bylo
-kde
-ke
-pravé
-ji
-nad
-nejsou
-či
-pod
-téma
-mezi
-přes
-ty
-pak
-vám
-ani
-když
-však
-neg
-jsem
-tento
-článku
-články
-aby
-jsme
-před
-pta
-jejich
-byl
-ještě
-až
-bez
-také
-pouze
-první
-vaše
-která
-nás
-nový
-tipy
-pokud
-může
-strana
-jeho
-své
-jiné
-zprávy
-nové
-není
-vás
-jen
-podle
-zde
-už
-být
-více
-bude
-již
-než
-který
-by
-které
-co
-nebo
-ten
-tak
-má
-při
-od
-po
-jsou
-jak
-další
-ale
-si
-se
-ve
-to
-jako
-za
-zpět
-ze
-do
-pro
-je
-na
-atd
-atp
-jakmile
-přičemž
-já
-on
-ona
-ono
-oni
-ony
-my
-vy
-jí
-ji
-mě
-mne
-jemu
-tomu
-těm
-těmu
-němu
-němuž
-jehož
-jíž
-jelikož
-jež
-jakož
-načež
diff --git a/solr/example/files/conf/lang/stopwords_da.txt b/solr/example/files/conf/lang/stopwords_da.txt
deleted file mode 100644
index 42e6145..0000000
--- a/solr/example/files/conf/lang/stopwords_da.txt
+++ /dev/null
@@ -1,110 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/danish/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A Danish stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
- | This is a ranked list (commonest to rarest) of stopwords derived from
- | a large text sample.
-
-
-og           | and
-i            | in
-jeg          | I
-det          | that (dem. pronoun)/it (pers. pronoun)
-at           | that (in front of a sentence)/to (with infinitive)
-en           | a/an
-den          | it (pers. pronoun)/that (dem. pronoun)
-til          | to/at/for/until/against/by/of/into, more
-er           | present tense of "to be"
-som          | who, as
-på           | on/upon/in/on/at/to/after/of/with/for, on
-de           | they
-med          | with/by/in, along
-han          | he
-af           | of/by/from/off/for/in/with/on, off
-for          | at/for/to/from/by/of/ago, in front/before, because
-ikke         | not
-der          | who/which, there/those
-var          | past tense of "to be"
-mig          | me/myself
-sig          | oneself/himself/herself/itself/themselves
-men          | but
-et           | a/an/one, one (number), someone/somebody/one
-har          | present tense of "to have"
-om           | round/about/for/in/a, about/around/down, if
-vi           | we
-min          | my
-havde        | past tense of "to have"
-ham          | him
-hun          | she
-nu           | now
-over         | over/above/across/by/beyond/past/on/about, over/past
-da           | then, when/as/since
-fra          | from/off/since, off, since
-du           | you
-ud           | out
-sin          | his/her/its/one's
-dem          | them
-os           | us/ourselves
-op           | up
-man          | you/one
-hans         | his
-hvor         | where
-eller        | or
-hvad         | what
-skal         | must/shall etc.
-selv         | myself/youself/herself/ourselves etc., even
-her          | here
-alle         | all/everyone/everybody etc.
-vil          | will (verb)
-blev         | past tense of "to stay/to remain/to get/to become"
-kunne        | could
-ind          | in
-når          | when
-være         | present tense of "to be"
-dog          | however/yet/after all
-noget        | something
-ville        | would
-jo           | you know/you see (adv), yes
-deres        | their/theirs
-efter        | after/behind/according to/for/by/from, later/afterwards
-ned          | down
-skulle       | should
-denne        | this
-end          | than
-dette        | this
-mit          | my/mine
-også         | also
-under        | under/beneath/below/during, below/underneath
-have         | have
-dig          | you
-anden        | other
-hende        | her
-mine         | my
-alt          | everything
-meget        | much/very, plenty of
-sit          | his, her, its, one's
-sine         | his, her, its, one's
-vor          | our
-mod          | against
-disse        | these
-hvis         | if
-din          | your/yours
-nogle        | some
-hos          | by/at
-blive        | be/become
-mange        | many
-ad           | by/through
-bliver       | present tense of "to be/to become"
-hendes       | her/hers
-været        | be
-thi          | for (conj)
-jer          | you
-sådan        | such, like this/like that
diff --git a/solr/example/files/conf/lang/stopwords_de.txt b/solr/example/files/conf/lang/stopwords_de.txt
deleted file mode 100644
index 86525e7..0000000
--- a/solr/example/files/conf/lang/stopwords_de.txt
+++ /dev/null
@@ -1,294 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/german/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A German stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
- | The number of forms in this list is reduced significantly by passing it
- | through the German stemmer.
-
-
-aber           |  but
-
-alle           |  all
-allem
-allen
-aller
-alles
-
-als            |  than, as
-also           |  so
-am             |  an + dem
-an             |  at
-
-ander          |  other
-andere
-anderem
-anderen
-anderer
-anderes
-anderm
-andern
-anderr
-anders
-
-auch           |  also
-auf            |  on
-aus            |  out of
-bei            |  by
-bin            |  am
-bis            |  until
-bist           |  art
-da             |  there
-damit          |  with it
-dann           |  then
-
-der            |  the
-den
-des
-dem
-die
-das
-
-daß            |  that
-
-derselbe       |  the same
-derselben
-denselben
-desselben
-demselben
-dieselbe
-dieselben
-dasselbe
-
-dazu           |  to that
-
-dein           |  thy
-deine
-deinem
-deinen
-deiner
-deines
-
-denn           |  because
-
-derer          |  of those
-dessen         |  of him
-
-dich           |  thee
-dir            |  to thee
-du             |  thou
-
-dies           |  this
-diese
-diesem
-diesen
-dieser
-dieses
-
-
-doch           |  (several meanings)
-dort           |  (over) there
-
-
-durch          |  through
-
-ein            |  a
-eine
-einem
-einen
-einer
-eines
-
-einig          |  some
-einige
-einigem
-einigen
-einiger
-einiges
-
-einmal         |  once
-
-er             |  he
-ihn            |  him
-ihm            |  to him
-
-es             |  it
-etwas          |  something
-
-euer           |  your
-eure
-eurem
-euren
-eurer
-eures
-
-für            |  for
-gegen          |  towards
-gewesen        |  p.p. of sein
-hab            |  have
-habe           |  have
-haben          |  have
-hat            |  has
-hatte          |  had
-hatten         |  had
-hier           |  here
-hin            |  there
-hinter         |  behind
-
-ich            |  I
-mich           |  me
-mir            |  to me
-
-
-ihr            |  you, to her
-ihre
-ihrem
-ihren
-ihrer
-ihres
-euch           |  to you
-
-im             |  in + dem
-in             |  in
-indem          |  while
-ins            |  in + das
-ist            |  is
-
-jede           |  each, every
-jedem
-jeden
-jeder
-jedes
-
-jene           |  that
-jenem
-jenen
-jener
-jenes
-
-jetzt          |  now
-kann           |  can
-
-kein           |  no
-keine
-keinem
-keinen
-keiner
-keines
-
-können         |  can
-könnte         |  could
-machen         |  do
-man            |  one
-
-manche         |  some, many a
-manchem
-manchen
-mancher
-manches
-
-mein           |  my
-meine
-meinem
-meinen
-meiner
-meines
-
-mit            |  with
-muss           |  must
-musste         |  had to
-nach           |  to(wards)
-nicht          |  not
-nichts         |  nothing
-noch           |  still, yet
-nun            |  now
-nur            |  only
-ob             |  whether
-oder           |  or
-ohne           |  without
-sehr           |  very
-
-sein           |  his
-seine
-seinem
-seinen
-seiner
-seines
-
-selbst         |  self
-sich           |  herself
-
-sie            |  they, she
-ihnen          |  to them
-
-sind           |  are
-so             |  so
-
-solche         |  such
-solchem
-solchen
-solcher
-solches
-
-soll           |  shall
-sollte         |  should
-sondern        |  but
-sonst          |  else
-über           |  over
-um             |  about, around
-und            |  and
-
-uns            |  us
-unse
-unsem
-unsen
-unser
-unses
-
-unter          |  under
-viel           |  much
-vom            |  von + dem
-von            |  from
-vor            |  before
-während        |  while
-war            |  was
-waren          |  were
-warst          |  wast
-was            |  what
-weg            |  away, off
-weil           |  because
-weiter         |  further
-
-welche         |  which
-welchem
-welchen
-welcher
-welches
-
-wenn           |  when
-werde          |  will
-werden         |  will
-wie            |  how
-wieder         |  again
-will           |  want
-wir            |  we
-wird           |  will
-wirst          |  willst
-wo             |  where
-wollen         |  want
-wollte         |  wanted
-würde          |  would
-würden         |  would
-zu             |  to
-zum            |  zu + dem
-zur            |  zu + der
-zwar           |  indeed
-zwischen       |  between
-
diff --git a/solr/example/files/conf/lang/stopwords_el.txt b/solr/example/files/conf/lang/stopwords_el.txt
deleted file mode 100644
index 232681f..0000000
--- a/solr/example/files/conf/lang/stopwords_el.txt
+++ /dev/null
@@ -1,78 +0,0 @@
-# Lucene Greek Stopwords list
-# Note: by default this file is used after GreekLowerCaseFilter,
-# so when modifying this file use 'σ' instead of 'ς' 
-ο

-το
-οι
-τα
-του
-τησ
-των
-τον
-την
-και 
-κι

-ειμαι
-εισαι
-ειναι
-ειμαστε
-ειστε
-στο
-στον
-στη
-στην
-μα
-αλλα
-απο
-για
-προσ
-με
-σε
-ωσ
-παρα
-αντι
-κατα
-μετα
-θα
-να
-δε
-δεν
-μη
-μην
-επι
-ενω
-εαν
-αν
-τοτε
-που
-πωσ
-ποιοσ
-ποια
-ποιο
-ποιοι
-ποιεσ
-ποιων
-ποιουσ
-αυτοσ
-αυτη
-αυτο
-αυτοι
-αυτων
-αυτουσ
-αυτεσ
-αυτα
-εκεινοσ
-εκεινη
-εκεινο
-εκεινοι
-εκεινεσ
-εκεινα
-εκεινων
-εκεινουσ
-οπωσ
-ομωσ
-ισωσ
-οσο
-οτι
diff --git a/solr/example/files/conf/lang/stopwords_en.txt b/solr/example/files/conf/lang/stopwords_en.txt
deleted file mode 100644
index 2c164c0..0000000
--- a/solr/example/files/conf/lang/stopwords_en.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# a couple of test stopwords to test that the words are really being
-# configured from this file:
-stopworda
-stopwordb
-
-# Standard english stop words taken from Lucene's StopAnalyzer
-a
-an
-and
-are
-as
-at
-be
-but
-by
-for
-if
-in
-into
-is
-it
-no
-not
-of
-on
-or
-such
-that
-the
-their
-then
-there
-these
-they
-this
-to
-was
-will
-with
diff --git a/solr/example/files/conf/lang/stopwords_es.txt b/solr/example/files/conf/lang/stopwords_es.txt
deleted file mode 100644
index 487d78c..0000000
--- a/solr/example/files/conf/lang/stopwords_es.txt
+++ /dev/null
@@ -1,356 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/spanish/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A Spanish stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
-
- | The following is a ranked list (commonest to rarest) of stopwords
- | deriving from a large sample of text.
-
- | Extra words have been added at the end.
-
-de             |  from, of
-la             |  the, her
-que            |  who, that
-el             |  the
-en             |  in
-y              |  and
-a              |  to
-los            |  the, them
-del            |  de + el
-se             |  himself, from him etc
-las            |  the, them
-por            |  for, by, etc
-un             |  a
-para           |  for
-con            |  with
-no             |  no
-una            |  a
-su             |  his, her
-al             |  a + el
-  | es         from SER
-lo             |  him
-como           |  how
-más            |  more
-pero           |  pero
-sus            |  su plural
-le             |  to him, her
-ya             |  already
-o              |  or
-  | fue        from SER
-este           |  this
-  | ha         from HABER
-sí             |  himself etc
-porque         |  because
-esta           |  this
-  | son        from SER
-entre          |  between
-  | está     from ESTAR
-cuando         |  when
-muy            |  very
-sin            |  without
-sobre          |  on
-  | ser        from SER
-  | tiene      from TENER
-también        |  also
-me             |  me
-hasta          |  until
-hay            |  there is/are
-donde          |  where
-  | han        from HABER
-quien          |  whom, that
-  | están      from ESTAR
-  | estado     from ESTAR
-desde          |  from
-todo           |  all
-nos            |  us
-durante        |  during
-  | estados    from ESTAR
-todos          |  all
-uno            |  a
-les            |  to them
-ni             |  nor
-contra         |  against
-otros          |  other
-  | fueron     from SER
-ese            |  that
-eso            |  that
-  | había      from HABER
-ante           |  before
-ellos          |  they
-e              |  and (variant of y)
-esto           |  this
-mí             |  me
-antes          |  before
-algunos        |  some
-qué            |  what?
-unos           |  a
-yo             |  I
-otro           |  other
-otras          |  other
-otra           |  other
-él             |  he
-tanto          |  so much, many
-esa            |  that
-estos          |  these
-mucho          |  much, many
-quienes        |  who
-nada           |  nothing
-muchos         |  many
-cual           |  who
-  | sea        from SER
-poco           |  few
-ella           |  she
-estar          |  to be
-  | haber      from HABER
-estas          |  these
-  | estaba     from ESTAR
-  | estamos    from ESTAR
-algunas        |  some
-algo           |  something
-nosotros       |  we
-
-      | other forms
-
-mi             |  me
-mis            |  mi plural
-tú             |  thou
-te             |  thee
-ti             |  thee
-tu             |  thy
-tus            |  tu plural
-ellas          |  they
-nosotras       |  we
-vosotros       |  you
-vosotras       |  you
-os             |  you
-mío            |  mine
-mía            |
-míos           |
-mías           |
-tuyo           |  thine
-tuya           |
-tuyos          |
-tuyas          |
-suyo           |  his, hers, theirs
-suya           |
-suyos          |
-suyas          |
-nuestro        |  ours
-nuestra        |
-nuestros       |
-nuestras       |
-vuestro        |  yours
-vuestra        |
-vuestros       |
-vuestras       |
-esos           |  those
-esas           |  those
-
-               | forms of estar, to be (not including the infinitive):
-estoy
-estás
-está
-estamos
-estáis
-están
-esté
-estés
-estemos
-estéis
-estén
-estaré
-estarás
-estará
-estaremos
-estaréis
-estarán
-estaría
-estarías
-estaríamos
-estaríais
-estarían
-estaba
-estabas
-estábamos
-estabais
-estaban
-estuve
-estuviste
-estuvo
-estuvimos
-estuvisteis
-estuvieron
-estuviera
-estuvieras
-estuviéramos
-estuvierais
-estuvieran
-estuviese
-estuvieses
-estuviésemos
-estuvieseis
-estuviesen
-estando
-estado
-estada
-estados
-estadas
-estad
-
-               | forms of haber, to have (not including the infinitive):
-he
-has
-ha
-hemos
-habéis
-han
-haya
-hayas
-hayamos
-hayáis
-hayan
-habré
-habrás
-habrá
-habremos
-habréis
-habrán
-habría
-habrías
-habríamos
-habríais
-habrían
-había
-habías
-habíamos
-habíais
-habían
-hube
-hubiste
-hubo
-hubimos
-hubisteis
-hubieron
-hubiera
-hubieras
-hubiéramos
-hubierais
-hubieran
-hubiese
-hubieses
-hubiésemos
-hubieseis
-hubiesen
-habiendo
-habido
-habida
-habidos
-habidas
-
-               | forms of ser, to be (not including the infinitive):
-soy
-eres
-es
-somos
-sois
-son
-sea
-seas
-seamos
-seáis
-sean
-seré
-serás
-será
-seremos
-seréis
-serán
-sería
-serías
-seríamos
-seríais
-serían
-era
-eras
-éramos
-erais
-eran
-fui
-fuiste
-fue
-fuimos
-fuisteis
-fueron
-fuera
-fueras
-fuéramos
-fuerais
-fueran
-fuese
-fueses
-fuésemos
-fueseis
-fuesen
-siendo
-sido
-  |  sed also means 'thirst'
-
-               | forms of tener, to have (not including the infinitive):
-tengo
-tienes
-tiene
-tenemos
-tenéis
-tienen
-tenga
-tengas
-tengamos
-tengáis
-tengan
-tendré
-tendrás
-tendrá
-tendremos
-tendréis
-tendrán
-tendría
-tendrías
-tendríamos
-tendríais
-tendrían
-tenía
-tenías
-teníamos
-teníais
-tenían
-tuve
-tuviste
-tuvo
-tuvimos
-tuvisteis
-tuvieron
-tuviera
-tuvieras
-tuviéramos
-tuvierais
-tuvieran
-tuviese
-tuvieses
-tuviésemos
-tuvieseis
-tuviesen
-teniendo
-tenido
-tenida
-tenidos
-tenidas
-tened
-
diff --git a/solr/example/files/conf/lang/stopwords_eu.txt b/solr/example/files/conf/lang/stopwords_eu.txt
deleted file mode 100644
index 25f1db9..0000000
--- a/solr/example/files/conf/lang/stopwords_eu.txt
+++ /dev/null
@@ -1,99 +0,0 @@
-# example set of basque stopwords
-al
-anitz
-arabera
-asko
-baina
-bat
-batean
-batek
-bati
-batzuei
-batzuek
-batzuetan
-batzuk
-bera
-beraiek
-berau
-berauek
-bere
-berori
-beroriek
-beste
-bezala
-da
-dago
-dira
-ditu
-du
-dute
-edo
-egin
-ere
-eta
-eurak
-ez
-gainera
-gu
-gutxi
-guzti
-haiei
-haiek
-haietan
-hainbeste
-hala
-han
-handik
-hango
-hara
-hari
-hark
-hartan
-hau
-hauei
-hauek
-hauetan
-hemen
-hemendik
-hemengo
-hi
-hona
-honek
-honela
-honetan
-honi
-hor
-hori
-horiei
-horiek
-horietan
-horko
-horra
-horrek
-horrela
-horretan
-horri
-hortik
-hura
-izan
-ni
-noiz
-nola
-non
-nondik
-nongo
-nor
-nora
-ze
-zein
-zen
-zenbait
-zenbat
-zer
-zergatik
-ziren
-zituen
-zu
-zuek
-zuen
-zuten
diff --git a/solr/example/files/conf/lang/stopwords_fa.txt b/solr/example/files/conf/lang/stopwords_fa.txt
deleted file mode 100644
index 723641c..0000000
--- a/solr/example/files/conf/lang/stopwords_fa.txt
+++ /dev/null
@@ -1,313 +0,0 @@
-# This file was created by Jacques Savoy and is distributed under the BSD license.
-# See http://members.unine.ch/jacques.savoy/clef/index.html.
-# Also see http://www.opensource.org/licenses/bsd-license.html
-# Note: by default this file is used after normalization, so when adding entries
-# to this file, use the arabic 'ي' instead of 'ی'
-انان
-نداشته
-سراسر
-خياه
-ايشان
-وي
-تاكنون
-بيشتري
-دوم
-پس
-ناشي
-وگو
-يا
-داشتند
-سپس
-هنگام
-هرگز
-پنج
-نشان
-امسال
-ديگر
-گروهي
-شدند
-چطور
-ده

-دو
-نخستين
-ولي
-چرا
-چه
-وسط

-كدام
-قابل
-يك
-رفت
-هفت
-همچنين
-در
-هزار
-بله
-بلي
-شايد
-اما
-شناسي
-گرفته
-دهد
-داشته
-دانست
-داشتن
-خواهيم
-ميليارد
-وقتيكه
-امد
-خواهد
-جز
-اورده
-شده
-بلكه
-خدمات
-شدن
-برخي
-نبود
-بسياري
-جلوگيري
-حق
-كردند
-نوعي
-بعري
-نكرده
-نظير
-نبايد
-بوده
-بودن
-داد
-اورد
-هست
-جايي
-شود
-دنبال
-داده
-بايد
-سابق
-هيچ
-همان
-انجا
-كمتر
-كجاست
-گردد
-كسي
-تر
-مردم
-تان
-دادن
-بودند
-سري
-جدا
-ندارند
-مگر
-يكديگر
-دارد
-دهند
-بنابراين
-هنگامي
-سمت
-جا
-انچه
-خود
-دادند
-زياد
-دارند
-اثر
-بدون
-بهترين
-بيشتر
-البته
-به
-براساس
-بيرون
-كرد
-بعضي
-گرفت
-توي
-اي
-ميليون
-او
-جريان
-تول
-بر
-مانند
-برابر
-باشيم
-مدتي
-گويند
-اكنون
-تا
-تنها
-جديد
-چند
-بي
-نشده
-كردن
-كردم
-گويد
-كرده
-كنيم
-نمي
-نزد
-روي
-قصد
-فقط
-بالاي
-ديگران
-اين
-ديروز
-توسط
-سوم
-ايم
-دانند
-سوي
-استفاده
-شما
-كنار
-داريم
-ساخته
-طور
-امده
-رفته
-نخست
-بيست
-نزديك
-طي
-كنيد
-از
-انها
-تمامي
-داشت
-يكي
-طريق
-اش
-چيست
-روب
-نمايد
-گفت
-چندين
-چيزي
-تواند
-ام
-ايا
-با
-ان
-ايد
-ترين
-اينكه
-ديگري
-راه
-هايي
-بروز
-همچنان
-پاعين
-كس
-حدود
-مختلف
-مقابل
-چيز
-گيرد
-ندارد
-ضد
-همچون
-سازي
-شان
-مورد
-باره
-مرسي
-خويش
-برخوردار
-چون
-خارج
-شش
-هنوز
-تحت
-ضمن
-هستيم
-گفته
-فكر
-بسيار
-پيش
-براي
-روزهاي
-انكه
-نخواهد
-بالا
-كل
-وقتي
-كي
-چنين
-كه
-گيري
-نيست
-است
-كجا
-كند
-نيز
-يابد
-بندي
-حتي
-توانند
-عقب
-خواست
-كنند
-بين
-تمام
-همه
-ما
-باشند
-مثل
-شد
-اري
-باشد
-اره
-طبق
-بعد
-اگر
-صورت
-غير
-جاي
-بيش
-ريزي
-اند
-زيرا
-چگونه
-بار
-لطفا
-مي
-درباره
-من
-ديده
-همين
-گذاري
-برداري
-علت
-گذاشته
-هم
-فوق
-نه
-ها
-شوند
-اباد
-همواره
-هر
-اول
-خواهند
-چهار
-نام
-امروز
-مان
-هاي
-قبل
-كنم
-سعي
-تازه
-را
-هستند
-زير
-جلوي
-عنوان
-بود
diff --git a/solr/example/files/conf/lang/stopwords_fi.txt b/solr/example/files/conf/lang/stopwords_fi.txt
deleted file mode 100644
index 4372c9a..0000000
--- a/solr/example/files/conf/lang/stopwords_fi.txt
+++ /dev/null
@@ -1,97 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/finnish/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
- 
-| forms of BE
-
-olla
-olen
-olet
-on
-olemme
-olette
-ovat
-ole        | negative form
-
-oli
-olisi
-olisit
-olisin
-olisimme
-olisitte
-olisivat
-olit
-olin
-olimme
-olitte
-olivat
-ollut
-olleet
-
-en         | negation
-et
-ei
-emme
-ette
-eivät
-
-|Nom   Gen    Acc    Part   Iness   Elat    Illat  Adess   Ablat   Allat   Ess    Trans
-minä   minun  minut  minua  minussa minusta minuun minulla minulta minulle               | I
-sinä   sinun  sinut  sinua  sinussa sinusta sinuun sinulla sinulta sinulle               | you
-hän    hänen  hänet  häntä  hänessä hänestä häneen hänellä häneltä hänelle               | he she
-me     meidän meidät meitä  meissä  meistä  meihin meillä  meiltä  meille                | we
-te     teidän teidät teitä  teissä  teistä  teihin teillä  teiltä  teille                | you
-he     heidän heidät heitä  heissä  heistä  heihin heillä  heiltä  heille                | they
-
-tämä   tämän         tätä   tässä   tästä   tähän  tallä   tältä   tälle   tänä   täksi  | this
-tuo    tuon          tuotä  tuossa  tuosta  tuohon tuolla  tuolta  tuolle  tuona  tuoksi | that
-se     sen           sitä   siinä   siitä   siihen sillä   siltä   sille   sinä   siksi  | it
-nämä   näiden        näitä  näissä  näistä  näihin näillä  näiltä  näille  näinä  näiksi | these
-nuo    noiden        noita  noissa  noista  noihin noilla  noilta  noille  noina  noiksi | those
-ne     niiden        niitä  niissä  niistä  niihin niillä  niiltä  niille  niinä  niiksi | they
-
-kuka   kenen kenet   ketä   kenessä kenestä keneen kenellä keneltä kenelle kenenä keneksi| who
-ketkä  keiden ketkä  keitä  keissä  keistä  keihin keillä  keiltä  keille  keinä  keiksi | (pl)
-mikä   minkä minkä   mitä   missä   mistä   mihin  millä   miltä   mille   minä   miksi  | which what
-mitkä                                                                                    | (pl)
-
-joka   jonka         jota   jossa   josta   johon  jolla   jolta   jolle   jona   joksi  | who which
-jotka  joiden        joita  joissa  joista  joihin joilla  joilta  joille  joina  joiksi | (pl)
-
-| conjunctions
-
-että   | that
-ja     | and
-jos    | if
-koska  | because
-kuin   | than
-mutta  | but
-niin   | so
-sekä   | and
-sillä  | for
-tai    | or
-vaan   | but
-vai    | or
-vaikka | although
-
-
-| prepositions
-
-kanssa  | with
-mukaan  | according to
-noin    | about
-poikki  | across
-yli     | over, across
-
-| other
-
-kun    | when
-niin   | so
-nyt    | now
-itse   | self
-
diff --git a/solr/example/files/conf/lang/stopwords_fr.txt b/solr/example/files/conf/lang/stopwords_fr.txt
deleted file mode 100644
index 749abae..0000000
--- a/solr/example/files/conf/lang/stopwords_fr.txt
+++ /dev/null
@@ -1,186 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/french/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A French stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
-au             |  a + le
-aux            |  a + les
-avec           |  with
-ce             |  this
-ces            |  these
-dans           |  with
-de             |  of
-des            |  de + les
-du             |  de + le
-elle           |  she
-en             |  `of them' etc
-et             |  and
-eux            |  them
-il             |  he
-je             |  I
-la             |  the
-le             |  the
-leur           |  their
-lui            |  him
-ma             |  my (fem)
-mais           |  but
-me             |  me
-même           |  same; as in moi-même (myself) etc
-mes            |  me (pl)
-moi            |  me
-mon            |  my (masc)
-ne             |  not
-nos            |  our (pl)
-notre          |  our
-nous           |  we
-on             |  one
-ou             |  where
-par            |  by
-pas            |  not
-pour           |  for
-qu             |  que before vowel
-que            |  that
-qui            |  who
-sa             |  his, her (fem)
-se             |  oneself
-ses            |  his (pl)
-son            |  his, her (masc)
-sur            |  on
-ta             |  thy (fem)
-te             |  thee
-tes            |  thy (pl)
-toi            |  thee
-ton            |  thy (masc)
-tu             |  thou
-un             |  a
-une            |  a
-vos            |  your (pl)
-votre          |  your
-vous           |  you
-
-               |  single letter forms
-
-c              |  c'
-d              |  d'
-j              |  j'
-l              |  l'
-à              |  to, at
-m              |  m'
-n              |  n'
-s              |  s'
-t              |  t'
-y              |  there
-
-               | forms of être (not including the infinitive):
-été
-étée
-étées
-étés
-étant
-suis
-es
-est
-sommes
-êtes
-sont
-serai
-seras
-sera
-serons
-serez
-seront
-serais
-serait
-serions
-seriez
-seraient
-étais
-était
-étions
-étiez
-étaient
-fus
-fut
-fûmes
-fûtes
-furent
-sois
-soit
-soyons
-soyez
-soient
-fusse
-fusses
-fût
-fussions
-fussiez
-fussent
-
-               | forms of avoir (not including the infinitive):
-ayant
-eu
-eue
-eues
-eus
-ai
-as
-avons
-avez
-ont
-aurai
-auras
-aura
-aurons
-aurez
-auront
-aurais
-aurait
-aurions
-auriez
-auraient
-avais
-avait
-avions
-aviez
-avaient
-eut
-eûmes
-eûtes
-eurent
-aie
-aies
-ait
-ayons
-ayez
-aient
-eusse
-eusses
-eût
-eussions
-eussiez
-eussent
-
-               | Later additions (from Jean-Christophe Deschamps)
-ceci           |  this
-cela           |  that
-celà           |  that
-cet            |  this
-cette          |  this
-ici            |  here
-ils            |  they
-les            |  the (pl)
-leurs          |  their (pl)
-quel           |  which
-quels          |  which
-quelle         |  which
-quelles        |  which
-sans           |  without
-soi            |  oneself
-
diff --git a/solr/example/files/conf/lang/stopwords_ga.txt b/solr/example/files/conf/lang/stopwords_ga.txt
deleted file mode 100644
index 9ff88d7..0000000
--- a/solr/example/files/conf/lang/stopwords_ga.txt
+++ /dev/null
@@ -1,110 +0,0 @@
-
-a
-ach
-ag
-agus
-an
-aon
-ar
-arna
-as
-b'
-ba
-beirt
-bhúr
-caoga
-ceathair
-ceathrar
-chomh
-chtó
-chuig
-chun
-cois
-céad
-cúig
-cúigear
-d'
-daichead
-dar
-de
-deich
-deichniúr
-den
-dhá
-do
-don
-dtí
-dá
-dár
-dó
-faoi
-faoin
-faoina
-faoinár
-fara
-fiche
-gach
-gan
-go
-gur
-haon
-hocht
-i
-iad
-idir
-in
-ina
-ins
-inár
-is
-le
-leis
-lena
-lenár
-m'
-mar
-mo
-mé
-na
-nach
-naoi
-naonúr
-ná
-ní
-níor
-nó
-nócha
-ocht
-ochtar
-os
-roimh
-sa
-seacht
-seachtar
-seachtó
-seasca
-seisear
-siad
-sibh
-sinn
-sna
-sé
-sí
-tar
-thar
-thú
-triúr
-trí
-trína
-trínár
-tríocha
-tú
-um
-ár

-éis


-ón
-óna
-ónár
diff --git a/solr/example/files/conf/lang/stopwords_gl.txt b/solr/example/files/conf/lang/stopwords_gl.txt
deleted file mode 100644
index d8760b1..0000000
--- a/solr/example/files/conf/lang/stopwords_gl.txt
+++ /dev/null
@@ -1,161 +0,0 @@
-# galican stopwords
-a
-aínda
-alí
-aquel
-aquela
-aquelas
-aqueles
-aquilo
-aquí
-ao
-aos
-as
-así

-ben
-cando
-che
-co
-coa
-comigo
-con
-connosco
-contigo
-convosco
-coas
-cos
-cun
-cuns
-cunha
-cunhas
-da
-dalgunha
-dalgunhas
-dalgún
-dalgúns
-das
-de
-del
-dela
-delas
-deles
-desde
-deste
-do
-dos
-dun
-duns
-dunha
-dunhas
-e
-el
-ela
-elas
-eles
-en
-era
-eran
-esa
-esas
-ese
-eses
-esta
-estar
-estaba
-está
-están
-este
-estes
-estiven
-estou
-eu

-facer
-foi
-foron
-fun
-había
-hai
-iso
-isto
-la
-las
-lle
-lles
-lo
-los
-mais
-me
-meu
-meus
-min
-miña
-miñas
-moi
-na
-nas
-neste
-nin
-no
-non
-nos
-nosa
-nosas
-noso
-nosos
-nós
-nun
-nunha
-nuns
-nunhas
-o
-os
-ou

-ós
-para
-pero
-pode
-pois
-pola
-polas
-polo
-polos
-por
-que
-se
-senón
-ser
-seu
-seus
-sexa
-sido
-sobre
-súa
-súas
-tamén
-tan
-te
-ten
-teñen
-teño
-ter
-teu
-teus
-ti
-tido
-tiña
-tiven
-túa
-túas
-un
-unha
-unhas
-uns
-vos
-vosa
-vosas
-voso
-vosos
-vós
diff --git a/solr/example/files/conf/lang/stopwords_hi.txt b/solr/example/files/conf/lang/stopwords_hi.txt
deleted file mode 100644
index 86286bb..0000000
--- a/solr/example/files/conf/lang/stopwords_hi.txt
+++ /dev/null
@@ -1,235 +0,0 @@
-# Also see http://www.opensource.org/licenses/bsd-license.html
-# See http://members.unine.ch/jacques.savoy/clef/index.html.
-# This file was created by Jacques Savoy and is distributed under the BSD license.
-# Note: by default this file also contains forms normalized by HindiNormalizer 
-# for spelling variation (see section below), such that it can be used whether or 
-# not you enable that feature. When adding additional entries to this list,
-# please add the normalized form as well. 
-अंदर
-अत
-अपना
-अपनी
-अपने
-अभी
-आदि
-आप
-इत्यादि
-इन 
-इनका
-इन्हीं
-इन्हें
-इन्हों
-इस
-इसका
-इसकी
-इसके
-इसमें
-इसी
-इसे
-उन
-उनका
-उनकी
-उनके
-उनको
-उन्हीं
-उन्हें
-उन्हों
-उस
-उसके
-उसी
-उसे
-एक
-एवं
-एस
-ऐसे
-और
-कई
-कर
-करता
-करते
-करना
-करने
-करें
-कहते
-कहा
-का
-काफ़ी
-कि
-कितना
-किन्हें
-किन्हों
-किया
-किर
-किस
-किसी
-किसे
-की
-कुछ
-कुल
-के
-को
-कोई
-कौन
-कौनसा
-गया
-घर
-जब
-जहाँ
-जा
-जितना
-जिन
-जिन्हें
-जिन्हों
-जिस
-जिसे
-जीधर
-जैसा
-जैसे
-जो
-तक
-तब
-तरह
-तिन
-तिन्हें
-तिन्हों
-तिस
-तिसे
-तो
-था
-थी
-थे
-दबारा
-दिया
-दुसरा
-दूसरे
-दो
-द्वारा
-न
-नहीं
-ना
-निहायत
-नीचे
-ने
-पर
-पर  
-पहले
-पूरा
-पे
-फिर
-बनी
-बही
-बहुत
-बाद
-बाला
-बिलकुल
-भी
-भीतर
-मगर
-मानो
-मे
-में
-यदि
-यह
-यहाँ
-यही
-या
-यिह 
-ये
-रखें
-रहा
-रहे
-ऱ्वासा
-लिए
-लिये
-लेकिन
-व
-वर्ग
-वह
-वह 
-वहाँ
-वहीं
-वाले
-वुह 
-वे
-वग़ैरह
-संग
-सकता
-सकते
-सबसे
-सभी
-साथ
-साबुत
-साभ
-सारा
-से
-सो
-ही
-हुआ
-हुई
-हुए
-है
-हैं
-हो
-होता
-होती
-होते
-होना
-होने
-# additional normalized forms of the above
-अपनि
-जेसे
-होति
-सभि
-तिंहों
-इंहों
-दवारा
-इसि
-किंहें
-थि
-उंहों
-ओर
-जिंहें
-वहिं
-अभि
-बनि
-हि
-उंहिं
-उंहें
-हें
-वगेरह
-एसे
-रवासा
-कोन
-निचे
-काफि
-उसि
-पुरा
-भितर
-हे
-बहि
-वहां
-कोइ
-यहां
-जिंहों
-तिंहें
-किसि
-कइ
-यहि
-इंहिं
-जिधर
-इंहें
-अदि
-इतयादि
-हुइ
-कोनसा
-इसकि
-दुसरे
-जहां
-अप
-किंहों
-उनकि
-भि
-वरग
-हुअ
-जेसा
-नहिं
diff --git a/solr/example/files/conf/lang/stopwords_hu.txt b/solr/example/files/conf/lang/stopwords_hu.txt
deleted file mode 100644
index 37526da..0000000
--- a/solr/example/files/conf/lang/stopwords_hu.txt
+++ /dev/null
@@ -1,211 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/hungarian/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
- 
-| Hungarian stop word list
-| prepared by Anna Tordai
-
-a
-ahogy
-ahol
-aki
-akik
-akkor
-alatt
-által
-általában
-amely
-amelyek
-amelyekben
-amelyeket
-amelyet
-amelynek
-ami
-amit
-amolyan
-amíg
-amikor
-át
-abban
-ahhoz
-annak
-arra
-arról
-az
-azok
-azon
-azt
-azzal
-azért
-aztán
-azután
-azonban
-bár
-be
-belül
-benne
-cikk
-cikkek
-cikkeket
-csak
-de
-e
-eddig
-egész
-egy
-egyes
-egyetlen
-egyéb
-egyik
-egyre
-ekkor
-el
-elég
-ellen
-elő
-először
-előtt
-első
-én
-éppen
-ebben
-ehhez
-emilyen
-ennek
-erre
-ez
-ezt
-ezek
-ezen
-ezzel
-ezért
-és
-fel
-felé
-hanem
-hiszen
-hogy
-hogyan
-igen
-így
-illetve
-ill.
-ill
-ilyen
-ilyenkor
-ison
-ismét
-itt
-jó
-jól
-jobban
-kell
-kellett
-keresztül
-keressünk
-ki
-kívül
-között
-közül
-legalább
-lehet
-lehetett
-legyen
-lenne
-lenni
-lesz
-lett
-maga
-magát
-majd
-majd
-már
-más
-másik
-meg
-még
-mellett
-mert
-mely
-melyek
-mi
-mit
-míg
-miért
-milyen
-mikor
-minden
-mindent
-mindenki
-mindig
-mint
-mintha
-mivel
-most
-nagy
-nagyobb
-nagyon
-ne
-néha
-nekem
-neki
-nem
-néhány
-nélkül
-nincs
-olyan
-ott
-össze

-ők
-őket
-pedig
-persze
-rá
-s
-saját
-sem
-semmi
-sok
-sokat
-sokkal
-számára
-szemben
-szerint
-szinte
-talán
-tehát
-teljes
-tovább
-továbbá
-több
-úgy
-ugyanis
-új
-újabb
-újra
-után
-utána
-utolsó
-vagy
-vagyis
-valaki
-valami
-valamint
-való
-vagyok
-van
-vannak
-volt
-voltam
-voltak
-voltunk
-vissza
-vele
-viszont
-volna
diff --git a/solr/example/files/conf/lang/stopwords_hy.txt b/solr/example/files/conf/lang/stopwords_hy.txt
deleted file mode 100644
index 60c1c50..0000000
--- a/solr/example/files/conf/lang/stopwords_hy.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-# example set of Armenian stopwords.
-այդ
-այլ
-այն
-այս
-դու
-դուք
-եմ
-են
-ենք
-ես
-եք

-էի
-էին
-էինք
-էիր
-էիք
-էր
-ըստ


-ին
-իսկ
-իր
-կամ
-համար
-հետ
-հետո
-մենք
-մեջ
-մի

-նա
-նաև
-նրա
-նրանք
-որ
-որը
-որոնք
-որպես
-ու
-ում
-պիտի
-վրա

diff --git a/solr/example/files/conf/lang/stopwords_id.txt b/solr/example/files/conf/lang/stopwords_id.txt
deleted file mode 100644
index 4617f83..0000000
--- a/solr/example/files/conf/lang/stopwords_id.txt
+++ /dev/null
@@ -1,359 +0,0 @@
-# from appendix D of: A Study of Stemming Effects on Information
-# Retrieval in Bahasa Indonesia
-ada
-adanya
-adalah
-adapun
-agak
-agaknya
-agar
-akan
-akankah
-akhirnya
-aku
-akulah
-amat
-amatlah
-anda
-andalah
-antar
-diantaranya
-antara
-antaranya
-diantara
-apa
-apaan
-mengapa
-apabila
-apakah
-apalagi
-apatah
-atau
-ataukah
-ataupun
-bagai
-bagaikan
-sebagai
-sebagainya
-bagaimana
-bagaimanapun
-sebagaimana
-bagaimanakah
-bagi
-bahkan
-bahwa
-bahwasanya
-sebaliknya
-banyak
-sebanyak
-beberapa
-seberapa
-begini
-beginian
-beginikah
-beginilah
-sebegini
-begitu
-begitukah
-begitulah
-begitupun
-sebegitu
-belum
-belumlah
-sebelum
-sebelumnya
-sebenarnya
-berapa
-berapakah
-berapalah
-berapapun
-betulkah
-sebetulnya
-biasa
-biasanya
-bila
-bilakah
-bisa
-bisakah
-sebisanya
-boleh
-bolehkah
-bolehlah
-buat
-bukan
-bukankah
-bukanlah
-bukannya
-cuma
-percuma
-dahulu
-dalam
-dan
-dapat
-dari
-daripada
-dekat
-demi
-demikian
-demikianlah
-sedemikian
-dengan
-depan
-di
-dia
-dialah
-dini
-diri
-dirinya
-terdiri
-dong
-dulu
-enggak
-enggaknya
-entah
-entahlah
-terhadap
-terhadapnya
-hal
-hampir
-hanya
-hanyalah
-harus
-haruslah
-harusnya
-seharusnya
-hendak
-hendaklah
-hendaknya
-hingga
-sehingga
-ia
-ialah
-ibarat
-ingin
-inginkah
-inginkan
-ini
-inikah
-inilah
-itu
-itukah
-itulah
-jangan
-jangankan
-janganlah
-jika
-jikalau
-juga
-justru
-kala
-kalau
-kalaulah
-kalaupun
-kalian
-kami
-kamilah
-kamu
-kamulah
-kan
-kapan
-kapankah
-kapanpun
-dikarenakan
-karena
-karenanya
-ke
-kecil
-kemudian
-kenapa
-kepada
-kepadanya
-ketika
-seketika
-khususnya
-kini
-kinilah
-kiranya
-sekiranya
-kita
-kitalah
-kok
-lagi
-lagian
-selagi
-lah
-lain
-lainnya
-melainkan
-selaku
-lalu
-melalui
-terlalu
-lama
-lamanya
-selama
-selama
-selamanya
-lebih
-terlebih
-bermacam
-macam
-semacam
-maka
-makanya
-makin
-malah
-malahan
-mampu
-mampukah
-mana
-manakala
-manalagi
-masih
-masihkah
-semasih
-masing
-mau
-maupun
-semaunya
-memang
-mereka
-merekalah
-meski
-meskipun
-semula
-mungkin
-mungkinkah
-nah
-namun
-nanti
-nantinya
-nyaris
-oleh
-olehnya
-seorang
-seseorang
-pada
-padanya
-padahal
-paling
-sepanjang
-pantas
-sepantasnya
-sepantasnyalah
-para
-pasti
-pastilah
-per
-pernah
-pula
-pun
-merupakan
-rupanya
-serupa
-saat
-saatnya
-sesaat
-saja
-sajalah
-saling
-bersama
-sama
-sesama
-sambil
-sampai
-sana
-sangat
-sangatlah
-saya
-sayalah
-se
-sebab
-sebabnya
-sebuah
-tersebut
-tersebutlah
-sedang
-sedangkan
-sedikit
-sedikitnya
-segala
-segalanya
-segera
-sesegera
-sejak
-sejenak
-sekali
-sekalian
-sekalipun
-sesekali
-sekaligus
-sekarang
-sekarang
-sekitar
-sekitarnya
-sela
-selain
-selalu
-seluruh
-seluruhnya
-semakin
-sementara
-sempat
-semua
-semuanya
-sendiri
-sendirinya
-seolah
-seperti
-sepertinya
-sering
-seringnya
-serta
-siapa
-siapakah
-siapapun
-disini
-disinilah
-sini
-sinilah
-sesuatu
-sesuatunya
-suatu
-sesudah
-sesudahnya
-sudah
-sudahkah
-sudahlah
-supaya
-tadi
-tadinya
-tak
-tanpa
-setelah
-telah
-tentang
-tentu
-tentulah
-tentunya
-tertentu
-seterusnya
-tapi
-tetapi
-setiap
-tiap
-setidaknya
-tidak
-tidakkah
-tidaklah
-toh
-waduh
-wah
-wahai
-sewaktu
-walau
-walaupun
-wong
-yaitu
-yakni
-yang
diff --git a/solr/example/files/conf/lang/stopwords_it.txt b/solr/example/files/conf/lang/stopwords_it.txt
deleted file mode 100644
index 1219cc7..0000000
--- a/solr/example/files/conf/lang/stopwords_it.txt
+++ /dev/null
@@ -1,303 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/italian/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | An Italian stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
-ad             |  a (to) before vowel
-al             |  a + il
-allo           |  a + lo
-ai             |  a + i
-agli           |  a + gli
-all            |  a + l'
-agl            |  a + gl'
-alla           |  a + la
-alle           |  a + le
-con            |  with
-col            |  con + il
-coi            |  con + i (forms collo, cogli etc are now very rare)
-da             |  from
-dal            |  da + il
-dallo          |  da + lo
-dai            |  da + i
-dagli          |  da + gli
-dall           |  da + l'
-dagl           |  da + gll'
-dalla          |  da + la
-dalle          |  da + le
-di             |  of
-del            |  di + il
-dello          |  di + lo
-dei            |  di + i
-degli          |  di + gli
-dell           |  di + l'
-degl           |  di + gl'
-della          |  di + la
-delle          |  di + le
-in             |  in
-nel            |  in + el
-nello          |  in + lo
-nei            |  in + i
-negli          |  in + gli
-nell           |  in + l'
-negl           |  in + gl'
-nella          |  in + la
-nelle          |  in + le
-su             |  on
-sul            |  su + il
-sullo          |  su + lo
-sui            |  su + i
-sugli          |  su + gli
-sull           |  su + l'
-sugl           |  su + gl'
-sulla          |  su + la
-sulle          |  su + le
-per            |  through, by
-tra            |  among
-contro         |  against
-io             |  I
-tu             |  thou
-lui            |  he
-lei            |  she
-noi            |  we
-voi            |  you
-loro           |  they
-mio            |  my
-mia            |
-miei           |
-mie            |
-tuo            |
-tua            |
-tuoi           |  thy
-tue            |
-suo            |
-sua            |
-suoi           |  his, her
-sue            |
-nostro         |  our
-nostra         |
-nostri         |
-nostre         |
-vostro         |  your
-vostra         |
-vostri         |
-vostre         |
-mi             |  me
-ti             |  thee
-ci             |  us, there
-vi             |  you, there
-lo             |  him, the
-la             |  her, the
-li             |  them
-le             |  them, the
-gli            |  to him, the
-ne             |  from there etc
-il             |  the
-un             |  a
-uno            |  a
-una            |  a
-ma             |  but
-ed             |  and
-se             |  if
-perché         |  why, because
-anche          |  also
-come           |  how
-dov            |  where (as dov')
-dove           |  where
-che            |  who, that
-chi            |  who
-cui            |  whom
-non            |  not
-più            |  more
-quale          |  who, that
-quanto         |  how much
-quanti         |
-quanta         |
-quante         |
-quello         |  that
-quelli         |
-quella         |
-quelle         |
-questo         |  this
-questi         |
-questa         |
-queste         |
-si             |  yes
-tutto          |  all
-tutti          |  all
-
-               |  single letter forms:
-
-a              |  at
-c              |  as c' for ce or ci
-e              |  and
-i              |  the
-l              |  as l'
-o              |  or
-
-               | forms of avere, to have (not including the infinitive):
-
-ho
-hai
-ha
-abbiamo
-avete
-hanno
-abbia
-abbiate
-abbiano
-avrò
-avrai
-avrà
-avremo
-avrete
-avranno
-avrei
-avresti
-avrebbe
-avremmo
-avreste
-avrebbero
-avevo
-avevi
-aveva
-avevamo
-avevate
-avevano
-ebbi
-avesti
-ebbe
-avemmo
-aveste
-ebbero
-avessi
-avesse
-avessimo
-avessero
-avendo
-avuto
-avuta
-avuti
-avute
-
-               | forms of essere, to be (not including the infinitive):
-sono
-sei

-siamo
-siete
-sia
-siate
-siano
-sarò
-sarai
-sarà
-saremo
-sarete
-saranno
-sarei
-saresti
-sarebbe
-saremmo
-sareste
-sarebbero
-ero
-eri
-era
-eravamo
-eravate
-erano
-fui
-fosti
-fu
-fummo
-foste
-furono
-fossi
-fosse
-fossimo
-fossero
-essendo
-
-               | forms of fare, to do (not including the infinitive, fa, fat-):
-faccio
-fai
-facciamo
-fanno
-faccia
-facciate
-facciano
-farò
-farai
-farà
-faremo
-farete
-faranno
-farei
-faresti
-farebbe
-faremmo
-fareste
-farebbero
-facevo
-facevi
-faceva
-facevamo
-facevate
-facevano
-feci
-facesti
-fece
-facemmo
-faceste
-fecero
-facessi
-facesse
-facessimo
-facessero
-facendo
-
-               | forms of stare, to be (not including the infinitive):
-sto
-stai
-sta
-stiamo
-stanno
-stia
-stiate
-stiano
-starò
-starai
-starà
-staremo
-starete
-staranno
-starei
-staresti
-starebbe
-staremmo
-stareste
-starebbero
-stavo
-stavi
-stava
-stavamo
-stavate
-stavano
-stetti
-stesti
-stette
-stemmo
-steste
-stettero
-stessi
-stesse
-stessimo
-stessero
-stando
diff --git a/solr/example/files/conf/lang/stopwords_ja.txt b/solr/example/files/conf/lang/stopwords_ja.txt
deleted file mode 100644
index d4321be..0000000
--- a/solr/example/files/conf/lang/stopwords_ja.txt
+++ /dev/null
@@ -1,127 +0,0 @@
-#
-# This file defines a stopword set for Japanese.
-#
-# This set is made up of hand-picked frequent terms from segmented Japanese Wikipedia.
-# Punctuation characters and frequent kanji have mostly been left out.  See LUCENE-3745
-# for frequency lists, etc. that can be useful for making your own set (if desired)
-#
-# Note that there is an overlap between these stopwords and the terms stopped when used
-# in combination with the JapanesePartOfSpeechStopFilter.  When editing this file, note
-# that comments are not allowed on the same line as stopwords.
-#
-# Also note that stopping is done in a case-insensitive manner.  Change your StopFilter
-# configuration if you need case-sensitive stopping.  Lastly, note that stopping is done
-# using the same character width as the entries in this file.  Since this StopFilter is
-# normally done after a CJKWidthFilter in your chain, you would usually want your romaji
-# entries to be in half-width and your kana entries to be in full-width.
-#
-の
-に
-は
-を
-た
-が
-で
-て
-と
-し
-れ
-さ
-ある
-いる
-も
-する
-から
-な
-こと
-として
-い
-や
-れる
-など
-なっ
-ない
-この
-ため
-その
-あっ
-よう
-また
-もの
-という
-あり
-まで
-られ
-なる
-へ
-か
-だ
-これ
-によって
-により
-おり
-より
-による
-ず
-なり
-られる
-において
-ば
-なかっ
-なく
-しかし
-について
-せ
-だっ
-その後
-できる
-それ
-う
-ので
-なお
-のみ
-でき
-き
-つ
-における
-および
-いう
-さらに
-でも
-ら
-たり
-その他
-に関する
-たち
-ます
-ん
-なら
-に対して
-特に
-せる
-及び
-これら
-とき
-では
-にて
-ほか
-ながら
-うち
-そして
-とともに
-ただし
-かつて
-それぞれ
-または
-お
-ほど
-ものの
-に対する
-ほとんど
-と共に
-といった
-です
-とも
-ところ
-ここ
-##### End of file
diff --git a/solr/example/files/conf/lang/stopwords_lv.txt b/solr/example/files/conf/lang/stopwords_lv.txt
deleted file mode 100644
index e21a23c..0000000
--- a/solr/example/files/conf/lang/stopwords_lv.txt
+++ /dev/null
@@ -1,172 +0,0 @@
-# Set of Latvian stopwords from A Stemming Algorithm for Latvian, Karlis Kreslins
-# the original list of over 800 forms was refined: 
-#   pronouns, adverbs, interjections were removed
-# 
-# prepositions
-aiz
-ap
-ar
-apakš
-ārpus
-augšpus
-bez
-caur
-dēļ
-gar
-iekš
-iz
-kopš
-labad
-lejpus
-līdz
-no
-otrpus
-pa
-par
-pār
-pēc
-pie
-pirms
-pret
-priekš
-starp
-šaipus
-uz
-viņpus
-virs
-virspus
-zem
-apakšpus
-# Conjunctions
-un
-bet
-jo
-ja
-ka
-lai
-tomēr
-tikko
-turpretī
-arī
-kaut
-gan
-tādēļ
-tā
-ne
-tikvien
-vien
-kā
-ir
-te
-vai
-kamēr
-# Particles
-ar
-diezin
-droši
-diemžēl
-nebūt
-ik
-it
-taču
-nu
-pat
-tiklab
-iekšpus
-nedz
-tik
-nevis
-turpretim
-jeb
-iekam
-iekām
-iekāms
-kolīdz
-līdzko
-tiklīdz
-jebšu
-tālab
-tāpēc
-nekā
-itin
-jā
-jau
-jel
-nē
-nezin
-tad
-tikai
-vis
-tak
-iekams
-vien
-# modal verbs
-būt  
-biju 
-biji
-bija
-bijām
-bijāt
-esmu
-esi
-esam
-esat 
-būšu     
-būsi
-būs
-būsim
-būsiet
-tikt
-tiku
-tiki
-tika
-tikām
-tikāt
-tieku
-tiec
-tiek
-tiekam
-tiekat
-tikšu
-tiks
-tiksim
-tiksiet
-tapt
-tapi
-tapāt
-topat
-tapšu
-tapsi
-taps
-tapsim
-tapsiet
-kļūt
-kļuvu
-kļuvi
-kļuva
-kļuvām
-kļuvāt
-kļūstu
-kļūsti
-kļūst
-kļūstam
-kļūstat
-kļūšu
-kļūsi
-kļūs
-kļūsim
-kļūsiet
-# verbs
-varēt
-varēju
-varējām
-varēšu
-varēsim
-var
-varēji
-varējāt
-varēsi
-varēsiet
-varat
-varēja
-varēs
diff --git a/solr/example/files/conf/lang/stopwords_nl.txt b/solr/example/files/conf/lang/stopwords_nl.txt
deleted file mode 100644
index 47a2aea..0000000
--- a/solr/example/files/conf/lang/stopwords_nl.txt
+++ /dev/null
@@ -1,119 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/dutch/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A Dutch stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
- | This is a ranked list (commonest to rarest) of stopwords derived from
- | a large sample of Dutch text.
-
- | Dutch stop words frequently exhibit homonym clashes. These are indicated
- | clearly below.
-
-de             |  the
-en             |  and
-van            |  of, from
-ik             |  I, the ego
-te             |  (1) chez, at etc, (2) to, (3) too
-dat            |  that, which
-die            |  that, those, who, which
-in             |  in, inside
-een            |  a, an, one
-hij            |  he
-het            |  the, it
-niet           |  not, nothing, naught
-zijn           |  (1) to be, being, (2) his, one's, its
-is             |  is
-was            |  (1) was, past tense of all persons sing. of 'zijn' (to be) (2) wax, (3) the washing, (4) rise of river
-op             |  on, upon, at, in, up, used up
-aan            |  on, upon, to (as dative)
-met            |  with, by
-als            |  like, such as, when
-voor           |  (1) before, in front of, (2) furrow
-had            |  had, past tense all persons sing. of 'hebben' (have)
-er             |  there
-maar           |  but, only
-om             |  round, about, for etc
-hem            |  him
-dan            |  then
-zou            |  should/would, past tense all persons sing. of 'zullen'
-of             |  or, whether, if
-wat            |  what, something, anything
-mijn           |  possessive and noun 'mine'
-men            |  people, 'one'
-dit            |  this
-zo             |  so, thus, in this way
-door           |  through by
-over           |  over, across
-ze             |  she, her, they, them
-zich           |  oneself
-bij            |  (1) a bee, (2) by, near, at
-ook            |  also, too
-tot            |  till, until
-je             |  you
-mij            |  me
-uit            |  out of, from
-der            |  Old Dutch form of 'van der' still found in surnames
-daar           |  (1) there, (2) because
-haar           |  (1) her, their, them, (2) hair
-naar           |  (1) unpleasant, unwell etc, (2) towards, (3) as
-heb            |  present first person sing. of 'to have'
-hoe            |  how, why
-heeft          |  present third person sing. of 'to have'
-hebben         |  'to have' and various parts thereof
-deze           |  this
-u              |  you
-want           |  (1) for, (2) mitten, (3) rigging
-nog            |  yet, still
-zal            |  'shall', first and third person sing. of verb 'zullen' (will)
-me             |  me
-zij            |  she, they
-nu             |  now
-ge             |  'thou', still used in Belgium and south Netherlands
-geen           |  none
-omdat          |  because
-iets           |  something, somewhat
-worden         |  to become, grow, get
-toch           |  yet, still
-al             |  all, every, each
-waren          |  (1) 'were' (2) to wander, (3) wares, (3)
-veel           |  much, many
-meer           |  (1) more, (2) lake
-doen           |  to do, to make
-toen           |  then, when
-moet           |  noun 'spot/mote' and present form of 'to must'
-ben            |  (1) am, (2) 'are' in interrogative second person singular of 'to be'
-zonder         |  without
-kan            |  noun 'can' and present form of 'to be able'
-hun            |  their, them
-dus            |  so, consequently
-alles          |  all, everything, anything
-onder          |  under, beneath
-ja             |  yes, of course
-eens           |  once, one day
-hier           |  here
-wie            |  who
-werd           |  imperfect third person sing. of 'become'
-altijd         |  always
-doch           |  yet, but etc
-wordt          |  present third person sing. of 'become'
-wezen          |  (1) to be, (2) 'been' as in 'been fishing', (3) orphans
-kunnen         |  to be able
-ons            |  us/our
-zelf           |  self
-tegen          |  against, towards, at
-na             |  after, near
-reeds          |  already
-wil            |  (1) present tense of 'want', (2) 'will', noun, (3) fender
-kon            |  could; past tense of 'to be able'
-niets          |  nothing
-uw             |  your
-iemand         |  somebody
-geweest        |  been; past participle of 'be'
-andere         |  other
diff --git a/solr/example/files/conf/lang/stopwords_no.txt b/solr/example/files/conf/lang/stopwords_no.txt
deleted file mode 100644
index a7a2c28..0000000
--- a/solr/example/files/conf/lang/stopwords_no.txt
+++ /dev/null
@@ -1,194 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/norwegian/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A Norwegian stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
- | This stop word list is for the dominant bokmål dialect. Words unique
- | to nynorsk are marked *.
-
- | Revised by Jan Bruusgaard <Jan.Bruusgaard@ssb.no>, Jan 2005
-
-og             | and
-i              | in
-jeg            | I
-det            | it/this/that
-at             | to (w. inf.)
-en             | a/an
-et             | a/an
-den            | it/this/that
-til            | to
-er             | is/am/are
-som            | who/that
-på             | on
-de             | they / you(formal)
-med            | with
-han            | he
-av             | of
-ikke           | not
-ikkje          | not *
-der            | there
-så             | so
-var            | was/were
-meg            | me
-seg            | you
-men            | but
-ett            | one
-har            | have
-om             | about
-vi             | we
-min            | my
-mitt           | my
-ha             | have
-hadde          | had
-hun            | she
-nå             | now
-over           | over
-da             | when/as
-ved            | by/know
-fra            | from
-du             | you
-ut             | out
-sin            | your
-dem            | them
-oss            | us
-opp            | up
-man            | you/one
-kan            | can
-hans           | his
-hvor           | where
-eller          | or
-hva            | what
-skal           | shall/must
-selv           | self (reflective)
-sjøl           | self (reflective)
-her            | here
-alle           | all
-vil            | will
-bli            | become
-ble            | became
-blei           | became *
-blitt          | have become
-kunne          | could
-inn            | in
-når            | when
-være           | be
-kom            | come
-noen           | some
-noe            | some
-ville          | would
-dere           | you
-som            | who/which/that
-deres          | their/theirs
-kun            | only/just
-ja             | yes
-etter          | after
-ned            | down
-skulle         | should
-denne          | this
-for            | for/because
-deg            | you
-si             | hers/his
-sine           | hers/his
-sitt           | hers/his
-mot            | against
-å              | to
-meget          | much
-hvorfor        | why
-dette          | this
-disse          | these/those
-uten           | without
-hvordan        | how
-ingen          | none
-din            | your
-ditt           | your
-blir           | become
-samme          | same
-hvilken        | which
-hvilke         | which (plural)
-sånn           | such a
-inni           | inside/within
-mellom         | between
-vår            | our
-hver           | each
-hvem           | who
-vors           | us/ours
-hvis           | whose
-både           | both
-bare           | only/just
-enn            | than
-fordi          | as/because
-før            | before
-mange          | many
-også           | also
-slik           | just
-vært           | been
-være           | to be
-båe            | both *
-begge          | both
-siden          | since
-dykk           | your *
-dykkar         | yours *
-dei            | they *
-deira          | them *
-deires         | theirs *
-deim           | them *
-di             | your (fem.) *
-då             | as/when *
-eg             | I *
-ein            | a/an *
-eit            | a/an *
-eitt           | a/an *
-elles          | or *
-honom          | he *
-hjå            | at *
-ho             | she *
-hoe            | she *
-henne          | her
-hennar         | her/hers
-hennes         | hers
-hoss           | how *
-hossen         | how *
-ikkje          | not *
-ingi           | noone *
-inkje          | noone *
-korleis        | how *
-korso          | how *
-kva            | what/which *
-kvar           | where *
-kvarhelst      | where *
-kven           | who/whom *
-kvi            | why *
-kvifor         | why *
-me             | we *
-medan          | while *
-mi             | my *
-mine           | my *
-mykje          | much *
-no             | now *
-nokon          | some (masc./neut.) *
-noka           | some (fem.) *
-nokor          | some *
-noko           | some *
-nokre          | some *
-si             | his/hers *
-sia            | since *
-sidan          | since *
-so             | so *
-somt           | some *
-somme          | some *
-um             | about*
-upp            | up *
-vere           | be *
-vore           | was *
-verte          | become *
-vort           | become *
-varte          | became *
-vart           | became *
-
diff --git a/solr/example/files/conf/lang/stopwords_pt.txt b/solr/example/files/conf/lang/stopwords_pt.txt
deleted file mode 100644
index acfeb01..0000000
--- a/solr/example/files/conf/lang/stopwords_pt.txt
+++ /dev/null
@@ -1,253 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/portuguese/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A Portuguese stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
-
- | The following is a ranked list (commonest to rarest) of stopwords
- | deriving from a large sample of text.
-
- | Extra words have been added at the end.
-
-de             |  of, from
-a              |  the; to, at; her
-o              |  the; him
-que            |  who, that
-e              |  and
-do             |  de + o
-da             |  de + a
-em             |  in
-um             |  a
-para           |  for
-  | é          from SER
-com            |  with
-não            |  not, no
-uma            |  a
-os             |  the; them
-no             |  em + o
-se             |  himself etc
-na             |  em + a
-por            |  for
-mais           |  more
-as             |  the; them
-dos            |  de + os
-como           |  as, like
-mas            |  but
-  | foi        from SER
-ao             |  a + o
-ele            |  he
-das            |  de + as
-  | tem        from TER
-à              |  a + a
-seu            |  his
-sua            |  her
-ou             |  or
-  | ser        from SER
-quando         |  when
-muito          |  much
-  | há         from HAV
-nos            |  em + os; us
-já             |  already, now
-  | está       from EST
-eu             |  I
-também         |  also
-só             |  only, just
-pelo           |  per + o
-pela           |  per + a
-até            |  up to
-isso           |  that
-ela            |  he
-entre          |  between
-  | era        from SER
-depois         |  after
-sem            |  without
-mesmo          |  same
-aos            |  a + os
-  | ter        from TER
-seus           |  his
-quem           |  whom
-nas            |  em + as
-me             |  me
-esse           |  that
-eles           |  they
-  | estão      from EST
-você           |  you
-  | tinha      from TER
-  | foram      from SER
-essa           |  that
-num            |  em + um
-nem            |  nor
-suas           |  her
-meu            |  my
-às             |  a + as
-minha          |  my
-  | têm        from TER
-numa           |  em + uma
-pelos          |  per + os
-elas           |  they
-  | havia      from HAV
-  | seja       from SER
-qual           |  which
-  | será       from SER
-nós            |  we
-  | tenho      from TER
-lhe            |  to him, her
-deles          |  of them
-essas          |  those
-esses          |  those
-pelas          |  per + as
-este           |  this
-  | fosse      from SER
-dele           |  of him
-
- | other words. There are many contractions such as naquele = em+aquele,
- | mo = me+o, but they are rare.
- | Indefinite article plural forms are also rare.
-
-tu             |  thou
-te             |  thee
-vocês          |  you (plural)
-vos            |  you
-lhes           |  to them
-meus           |  my
-minhas
-teu            |  thy
-tua
-teus
-tuas
-nosso          | our
-nossa
-nossos
-nossas
-
-dela           |  of her
-delas          |  of them
-
-esta           |  this
-estes          |  these
-estas          |  these
-aquele         |  that
-aquela         |  that
-aqueles        |  those
-aquelas        |  those
-isto           |  this
-aquilo         |  that
-
-               | forms of estar, to be (not including the infinitive):
-estou
-está
-estamos
-estão
-estive
-esteve
-estivemos
-estiveram
-estava
-estávamos
-estavam
-estivera
-estivéramos
-esteja
-estejamos
-estejam
-estivesse
-estivéssemos
-estivessem
-estiver
-estivermos
-estiverem
-
-               | forms of haver, to have (not including the infinitive):
-hei
-há
-havemos
-hão
-houve
-houvemos
-houveram
-houvera
-houvéramos
-haja
-hajamos
-hajam
-houvesse
-houvéssemos
-houvessem
-houver
-houvermos
-houverem
-houverei
-houverá
-houveremos
-houverão
-houveria
-houveríamos
-houveriam
-
-               | forms of ser, to be (not including the infinitive):
-sou
-somos
-são
-era
-éramos
-eram
-fui
-foi
-fomos
-foram
-fora
-fôramos
-seja
-sejamos
-sejam
-fosse
-fôssemos
-fossem
-for
-formos
-forem
-serei
-será
-seremos
-serão
-seria
-seríamos
-seriam
-
-               | forms of ter, to have (not including the infinitive):
-tenho
-tem
-temos
-tém
-tinha
-tínhamos
-tinham
-tive
-teve
-tivemos
-tiveram
-tivera
-tivéramos
-tenha
-tenhamos
-tenham
-tivesse
-tivéssemos
-tivessem
-tiver
-tivermos
-tiverem
-terei
-terá
-teremos
-terão
-teria
-teríamos
-teriam
diff --git a/solr/example/files/conf/lang/stopwords_ro.txt b/solr/example/files/conf/lang/stopwords_ro.txt
deleted file mode 100644
index 4fdee90..0000000
--- a/solr/example/files/conf/lang/stopwords_ro.txt
+++ /dev/null
@@ -1,233 +0,0 @@
-# This file was created by Jacques Savoy and is distributed under the BSD license.
-# See http://members.unine.ch/jacques.savoy/clef/index.html.
-# Also see http://www.opensource.org/licenses/bsd-license.html
-acea
-aceasta
-această
-aceea
-acei
-aceia
-acel
-acela
-acele
-acelea
-acest
-acesta
-aceste
-acestea
-aceşti
-aceştia
-acolo
-acum
-ai
-aia
-aibă
-aici
-al
-ăla
-ale
-alea
-ălea
-altceva
-altcineva
-am
-ar
-are
-aş
-aşadar
-asemenea
-asta
-ăsta
-astăzi
-astea
-ăstea
-ăştia
-asupra
-aţi
-au
-avea
-avem
-aveţi
-azi
-bine
-bucur
-bună
-ca
-că
-căci
-când
-care
-cărei
-căror
-cărui
-cât
-câte
-câţi
-către
-câtva
-ce
-cel
-ceva
-chiar
-cînd
-cine
-cineva
-cît
-cîte
-cîţi
-cîtva
-contra
-cu
-cum
-cumva
-curând
-curînd
-da
-dă
-dacă
-dar
-datorită
-de
-deci
-deja
-deoarece
-departe
-deşi
-din
-dinaintea
-dintr
-dintre
-drept
-după
-ea
-ei
-el
-ele
-eram
-este
-eşti
-eu
-face
-fără
-fi
-fie
-fiecare
-fii
-fim
-fiţi
-iar
-ieri
-îi
-îl
-îmi
-împotriva
-în 
-înainte
-înaintea
-încât
-încît
-încotro
-între
-întrucât
-întrucît
-îţi
-la
-lângă
-le
-li
-lîngă
-lor
-lui
-mă
-mâine
-mea
-mei
-mele
-mereu
-meu
-mi
-mine
-mult
-multă
-mulţi
-ne
-nicăieri
-nici
-nimeni
-nişte
-noastră
-noastre
-noi
-noştri
-nostru
-nu
-ori
-oricând
-oricare
-oricât
-orice
-oricînd
-oricine
-oricît
-oricum
-oriunde
-până
-pe
-pentru
-peste
-pînă
-poate
-pot
-prea
-prima
-primul
-prin
-printr
-sa
-să
-săi
-sale
-sau
-său
-se
-şi
-sînt
-sîntem
-sînteţi
-spre
-sub
-sunt
-suntem
-sunteţi
-ta
-tăi
-tale
-tău
-te
-ţi
-ţie
-tine
-toată
-toate
-tot
-toţi
-totuşi
-tu
-un
-una
-unde
-undeva
-unei
-unele
-uneori
-unor
-vă
-vi
-voastră
-voastre
-voi
-voştri
-vostru
-vouă
-vreo
-vreun
diff --git a/solr/example/files/conf/lang/stopwords_ru.txt b/solr/example/files/conf/lang/stopwords_ru.txt
deleted file mode 100644
index 5527140..0000000
--- a/solr/example/files/conf/lang/stopwords_ru.txt
+++ /dev/null
@@ -1,243 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/russian/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | a russian stop word list. comments begin with vertical bar. each stop
- | word is at the start of a line.
-
- | this is a ranked list (commonest to rarest) of stopwords derived from
- | a large text sample.
-
- | letter `ё' is translated to `е'.
-
-и              | and
-в              | in/into
-во             | alternative form
-не             | not
-что            | what/that
-он             | he
-на             | on/onto
-я              | i
-с              | from
-со             | alternative form
-как            | how
-а              | milder form of `no' (but)
-то             | conjunction and form of `that'
-все            | all
-она            | she
-так            | so, thus
-его            | him
-но             | but
-да             | yes/and
-ты             | thou
-к              | towards, by
-у              | around, chez
-же             | intensifier particle
-вы             | you
-за             | beyond, behind
-бы             | conditional/subj. particle
-по             | up to, along
-только         | only
-ее             | her
-мне            | to me
-было           | it was
-вот            | here is/are, particle
-от             | away from
-меня           | me
-еще            | still, yet, more
-нет            | no, there isnt/arent
-о              | about
-из             | out of
-ему            | to him
-теперь         | now
-когда          | when
-даже           | even
-ну             | so, well
-вдруг          | suddenly
-ли             | interrogative particle
-если           | if
-уже            | already, but homonym of `narrower'
-или            | or
-ни             | neither
-быть           | to be
-был            | he was
-него           | prepositional form of его
-до             | up to
-вас            | you accusative
-нибудь         | indef. suffix preceded by hyphen
-опять          | again
-уж             | already, but homonym of `adder'
-вам            | to you
-сказал         | he said
-ведь           | particle `after all'
-там            | there
-потом          | then
-себя           | oneself
-ничего         | nothing
-ей             | to her
-может          | usually with `быть' as `maybe'
-они            | they
-тут            | here
-где            | where
-есть           | there is/are
-надо           | got to, must
-ней            | prepositional form of  ей
-для            | for
-мы             | we
-тебя           | thee
-их             | them, their
-чем            | than
-была           | she was
-сам            | self
-чтоб           | in order to
-без            | without
-будто          | as if
-человек        | man, person, one
-чего           | genitive form of `what'
-раз            | once
-тоже           | also
-себе           | to oneself
-под            | beneath
-жизнь          | life
-будет          | will be
-ж              | short form of intensifer particle `же'
-тогда          | then
-кто            | who
-этот           | this
-говорил        | was saying
-того           | genitive form of `that'
-потому         | for that reason
-этого          | genitive form of `this'
-какой          | which
-совсем         | altogether
-ним            | prepositional form of `его', `они'
-здесь          | here
-этом           | prepositional form of `этот'
-один           | one
-почти          | almost
-мой            | my
-тем            | instrumental/dative plural of `тот', `то'
-чтобы          | full form of `in order that'
-нее            | her (acc.)
-кажется        | it seems
-сейчас         | now
-были           | they were
-куда           | where to
-зачем          | why
-сказать        | to say
-всех           | all (acc., gen. preposn. plural)
-никогда        | never
-сегодня        | today
-можно          | possible, one can
-при            | by
-наконец        | finally
-два            | two
-об             | alternative form of `о', about
-другой         | another
-хоть           | even
-после          | after
-над            | above
-больше         | more
-тот            | that one (masc.)
-через          | across, in
-эти            | these
-нас            | us
-про            | about
-всего          | in all, only, of all
-них            | prepositional form of `они' (they)
-какая          | which, feminine
-много          | lots
-разве          | interrogative particle
-сказала        | she said
-три            | three
-эту            | this, acc. fem. sing.
-моя            | my, feminine
-впрочем        | moreover, besides
-хорошо         | good
-свою           | ones own, acc. fem. sing.
-этой           | oblique form of `эта', fem. `this'
-перед          | in front of
-иногда         | sometimes
-лучше          | better
-чуть           | a little
-том            | preposn. form of `that one'
-нельзя         | one must not
-такой          | such a one
-им             | to them
-более          | more
-всегда         | always
-конечно        | of course
-всю            | acc. fem. sing of `all'
-между          | between
-
-
-  | b: some paradigms
-  |
-  | personal pronouns
-  |
-  | я  меня  мне  мной  [мною]
-  | ты  тебя  тебе  тобой  [тобою]
-  | он  его  ему  им  [него, нему, ним]
-  | она  ее  эи  ею  [нее, нэи, нею]
-  | оно  его  ему  им  [него, нему, ним]
-  |
-  | мы  нас  нам  нами
-  | вы  вас  вам  вами
-  | они  их  им  ими  [них, ним, ними]
-  |
-  |   себя  себе  собой   [собою]
-  |
-  | demonstrative pronouns: этот (this), тот (that)
-  |
-  | этот  эта  это  эти
-  | этого  эты  это  эти
-  | этого  этой  этого  этих
-  | этому  этой  этому  этим
-  | этим  этой  этим  [этою]  этими
-  | этом  этой  этом  этих
-  |
-  | тот  та  то  те
-  | того  ту  то  те
-  | того  той  того  тех
-  | тому  той  тому  тем
-  | тем  той  тем  [тою]  теми
-  | том  той  том  тех
-  |
-  | determinative pronouns
-  |
-  | (a) весь (all)
-  |
-  | весь  вся  все  все
-  | всего  всю  все  все
-  | всего  всей  всего  всех
-  | всему  всей  всему  всем
-  | всем  всей  всем  [всею]  всеми
-  | всем  всей  всем  всех
-  |
-  | (b) сам (himself etc)
-  |
-  | сам  сама  само  сами
-  | самого саму  само  самих
-  | самого самой самого  самих
-  | самому самой самому  самим
-  | самим  самой  самим  [самою]  самими
-  | самом самой самом  самих
-  |
-  | stems of verbs `to be', `to have', `to do' and modal
-  |
-  | быть  бы  буд  быв  есть  суть
-  | име
-  | дел
-  | мог   мож  мочь
-  | уме
-  | хоч  хот
-  | долж
-  | можн
-  | нужн
-  | нельзя
-
diff --git a/solr/example/files/conf/lang/stopwords_sv.txt b/solr/example/files/conf/lang/stopwords_sv.txt
deleted file mode 100644
index 096f87f..0000000
--- a/solr/example/files/conf/lang/stopwords_sv.txt
+++ /dev/null
@@ -1,133 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/swedish/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A Swedish stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
- | This is a ranked list (commonest to rarest) of stopwords derived from
- | a large text sample.
-
- | Swedish stop words occasionally exhibit homonym clashes. For example
- |  så = so, but also seed. These are indicated clearly below.
-
-och            | and
-det            | it, this/that
-att            | to (with infinitive)
-i              | in, at
-en             | a
-jag            | I
-hon            | she
-som            | who, that
-han            | he
-på             | on
-den            | it, this/that
-med            | with
-var            | where, each
-sig            | him(self) etc
-för            | for
-så             | so (also: seed)
-till           | to
-är             | is
-men            | but
-ett            | a
-om             | if; around, about
-hade           | had
-de             | they, these/those
-av             | of
-icke           | not, no
-mig            | me
-du             | you
-henne          | her
-då             | then, when
-sin            | his
-nu             | now
-har            | have
-inte           | inte någon = no one
-hans           | his
-honom          | him
-skulle         | 'sake'
-hennes         | her
-där            | there
-min            | my
-man            | one (pronoun)
-ej             | nor
-vid            | at, by, on (also: vast)
-kunde          | could
-något          | some etc
-från           | from, off
-ut             | out
-när            | when
-efter          | after, behind
-upp            | up
-vi             | we
-dem            | them
-vara           | be
-vad            | what
-över           | over
-än             | than
-dig            | you
-kan            | can
-sina           | his
-här            | here
-ha             | have
-mot            | towards
-alla           | all
-under          | under (also: wonder)
-någon          | some etc
-eller          | or (else)
-allt           | all
-mycket         | much
-sedan          | since
-ju             | why
-denna          | this/that
-själv          | myself, yourself etc
-detta          | this/that
-åt             | to
-utan           | without
-varit          | was
-hur            | how
-ingen          | no
-mitt           | my
-ni             | you
-bli            | to be, become
-blev           | from bli
-oss            | us
-din            | thy
-dessa          | these/those
-några          | some etc
-deras          | their
-blir           | from bli
-mina           | my
-samma          | (the) same
-vilken         | who, that
-er             | you, your
-sådan          | such a
-vår            | our
-blivit         | from bli
-dess           | its
-inom           | within
-mellan         | between
-sådant         | such a
-varför         | why
-varje          | each
-vilka          | who, that
-ditt           | thy
-vem            | who
-vilket         | who, that
-sitta          | his
-sådana         | such a
-vart           | each
-dina           | thy
-vars           | whose
-vårt           | our
-våra           | our
-ert            | your
-era            | your
-vilkas         | whose
-
diff --git a/solr/example/files/conf/lang/stopwords_th.txt b/solr/example/files/conf/lang/stopwords_th.txt
deleted file mode 100644
index 07f0fab..0000000
--- a/solr/example/files/conf/lang/stopwords_th.txt
+++ /dev/null
@@ -1,119 +0,0 @@
-# Thai stopwords from:
-# "Opinion Detection in Thai Political News Columns
-# Based on Subjectivity Analysis"
-# Khampol Sukhum, Supot Nitsuwat, and Choochart Haruechaiyasak
-ไว้
-ไม่
-ไป
-ได้
-ให้
-ใน
-โดย
-แห่ง
-แล้ว
-และ
-แรก
-แบบ
-แต่
-เอง
-เห็น
-เลย
-เริ่ม
-เรา
-เมื่อ
-เพื่อ
-เพราะ
-เป็นการ
-เป็น
-เปิดเผย
-เปิด
-เนื่องจาก
-เดียวกัน
-เดียว
-เช่น
-เฉพาะ
-เคย
-เข้า
-เขา
-อีก
-อาจ
-อะไร
-ออก
-อย่าง
-อยู่
-อยาก
-หาก
-หลาย
-หลังจาก
-หลัง
-หรือ
-หนึ่ง
-ส่วน
-ส่ง
-สุด
-สําหรับ
-ว่า
-วัน
-ลง
-ร่วม
-ราย
-รับ
-ระหว่าง
-รวม
-ยัง
-มี
-มาก
-มา
-พร้อม
-พบ
-ผ่าน
-ผล
-บาง
-น่า
-นี้
-นํา
-นั้น
-นัก
-นอกจาก
-ทุก
-ที่สุด
-ที่
-ทําให้
-ทํา
-ทาง
-ทั้งนี้
-ทั้ง
-ถ้า
-ถูก
-ถึง
-ต้อง
-ต่างๆ
-ต่าง
-ต่อ
-ตาม
-ตั้งแต่
-ตั้ง
-ด้าน
-ด้วย
-ดัง
-ซึ่ง
-ช่วง
-จึง
-จาก
-จัด
-จะ
-คือ
-ความ
-ครั้ง
-คง
-ขึ้น
-ของ
-ขอ
-ขณะ
-ก่อน
-ก็
-การ
-กับ
-กัน
-กว่า
-กล่าว
diff --git a/solr/example/files/conf/lang/stopwords_tr.txt b/solr/example/files/conf/lang/stopwords_tr.txt
deleted file mode 100644
index 84d9408..0000000
--- a/solr/example/files/conf/lang/stopwords_tr.txt
+++ /dev/null
@@ -1,212 +0,0 @@
-# Turkish stopwords from LUCENE-559
-# merged with the list from "Information Retrieval on Turkish Texts"
-#   (http://www.users.muohio.edu/canf/papers/JASIST2008offPrint.pdf)
-acaba
-altmış
-altı
-ama
-ancak
-arada
-aslında
-ayrıca
-bana
-bazı
-belki
-ben
-benden
-beni
-benim
-beri
-beş
-bile
-bin
-bir
-birçok
-biri
-birkaç
-birkez
-birşey
-birşeyi
-biz
-bize
-bizden
-bizi
-bizim
-böyle
-böylece
-bu
-buna
-bunda
-bundan
-bunlar
-bunları
-bunların
-bunu
-bunun
-burada
-çok
-çünkü
-da
-daha
-dahi
-de
-defa
-değil
-diğer
-diye
-doksan
-dokuz
-dolayı
-dolayısıyla
-dört
-edecek
-eden
-ederek
-edilecek
-ediliyor
-edilmesi
-ediyor
-eğer
-elli
-en
-etmesi
-etti
-ettiği
-ettiğini
-gibi
-göre
-halen
-hangi
-hatta
-hem
-henüz
-hep
-hepsi
-her
-herhangi
-herkesin
-hiç
-hiçbir
-için
-iki
-ile
-ilgili
-ise
-işte
-itibaren
-itibariyle
-kadar
-karşın
-katrilyon
-kendi
-kendilerine
-kendini
-kendisi
-kendisine
-kendisini
-kez
-ki
-kim
-kimden
-kime
-kimi
-kimse
-kırk
-milyar
-milyon
-mu
-mü
-mı
-nasıl
-ne
-neden
-nedenle
-nerde
-nerede
-nereye
-niye
-niçin
-o
-olan
-olarak
-oldu
-olduğu
-olduğunu
-olduklarını
-olmadı
-olmadığı
-olmak
-olması
-olmayan
-olmaz
-olsa
-olsun
-olup
-olur
-olursa
-oluyor
-on
-ona
-ondan
-onlar
-onlardan
-onları
-onların
-onu
-onun
-otuz
-oysa
-öyle
-pek
-rağmen
-sadece
-sanki
-sekiz
-seksen
-sen
-senden
-seni
-senin
-siz
-sizden
-sizi
-sizin
-şey
-şeyden
-şeyi
-şeyler
-şöyle
-şu
-şuna
-şunda
-şundan
-şunları
-şunu
-tarafından
-trilyon
-tüm
-üç
-üzere
-var
-vardı
-ve
-veya
-ya
-yani
-yapacak
-yapılan
-yapılması
-yapıyor
-yapmak
-yaptı
-yaptığı
-yaptığını
-yaptıkları
-yedi
-yerine
-yetmiş
-yine
-yirmi
-yoksa
-yüz
-zaten
diff --git a/solr/example/files/conf/lang/userdict_ja.txt b/solr/example/files/conf/lang/userdict_ja.txt
deleted file mode 100644
index 6f0368e..0000000
--- a/solr/example/files/conf/lang/userdict_ja.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# This is a sample user dictionary for Kuromoji (JapaneseTokenizer)
-#
-# Add entries to this file in order to override the statistical model in terms
-# of segmentation, readings and part-of-speech tags.  Notice that entries do
-# not have weights since they are always used when found.  This is by-design
-# in order to maximize ease-of-use.
-#
-# Entries are defined using the following CSV format:
-#  <text>,<token 1> ... <token n>,<reading 1> ... <reading n>,<part-of-speech tag>
-#
-# Notice that a single half-width space separates tokens and readings, and
-# that the number tokens and readings must match exactly.
-#
-# Also notice that multiple entries with the same <text> is undefined.
-#
-# Whitespace only lines are ignored.  Comments are not allowed on entry lines.
-#
-
-# Custom segmentation for kanji compounds
-日本経済新聞,日本 経済 新聞,ニホン ケイザイ シンブン,カスタム名詞
-関西国際空港,関西 国際 空港,カンサイ コクサイ クウコウ,カスタム名詞
-
-# Custom segmentation for compound katakana
-トートバッグ,トート バッグ,トート バッグ,かずカナ名詞
-ショルダーバッグ,ショルダー バッグ,ショルダー バッグ,かずカナ名詞
-
-# Custom reading for former sumo wrestler
-朝青龍,朝青龍,アサショウリュウ,カスタム人名
diff --git a/solr/example/files/conf/managed-schema b/solr/example/files/conf/managed-schema
deleted file mode 100644
index 5ad9883..0000000
--- a/solr/example/files/conf/managed-schema
+++ /dev/null
@@ -1,520 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- Solr managed schema - automatically generated - DO NOT EDIT -->
-<schema name="example-data-driven-schema" version="1.6">
-  <uniqueKey>id</uniqueKey>
-  <fieldType name="ancestor_path" class="solr.TextField">
-    <analyzer type="index">
-      <tokenizer name="keyword"/>
-    </analyzer>
-    <analyzer type="query">
-      <tokenizer name="pathHierarchy" delimiter="/"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="binary" class="solr.BinaryField"/>
-  <fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
-  <fieldType name="booleans" class="solr.BoolField" sortMissingLast="true" multiValued="true"/>
-  <fieldType name="currency" class="solr.CurrencyFieldType" amountLongSuffix="_l_ns" codeStrSuffix="_s_ns" defaultCurrency="USD" currencyConfig="currency.xml" />
-  <fieldType name="descendent_path" class="solr.TextField">
-    <analyzer type="index">
-      <tokenizer name="pathHierarchy" delimiter="/"/>
-    </analyzer>
-    <analyzer type="query">
-      <tokenizer name="keyword"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="ignored" class="solr.StrField" indexed="false" stored="false" multiValued="true"/>
-  <fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
-  <fieldType name="location_rpt" class="solr.SpatialRecursivePrefixTreeFieldType" geo="true" maxDistErr="0.001" distErrPct="0.025" distanceUnits="kilometers"/>
-  <fieldType name="lowercase" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="keyword"/>
-      <filter name="lowercase"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="phonetic_en" class="solr.TextField" indexed="true" stored="false">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="doubleMetaphone" inject="false"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="pdate" class="solr.DatePointField" docValues="true"/>
-  <fieldType name="pdates" class="solr.DatePointField" docValues="true" multiValued="true"/>
-  <fieldType name="pdouble" class="solr.DoublePointField" docValues="true"/>
-  <fieldType name="pdoubles" class="solr.DoublePointField" docValues="true" multiValued="true"/>
-  <fieldType name="pfloat" class="solr.FloatPointField" docValues="true"/>
-  <fieldType name="pfloats" class="solr.FloatPointField" docValues="true" multiValued="true"/>
-  <fieldType name="pint" class="solr.IntPointField" docValues="true"/>
-  <fieldType name="pints" class="solr.IntPointField" docValues="true" multiValued="true"/>
-  <fieldType name="plong" class="solr.LongPointField" docValues="true"/>
-  <fieldType name="plongs" class="solr.LongPointField" docValues="true" multiValued="true"/>
-  <fieldType name="point" class="solr.PointType" subFieldSuffix="_d" dimension="2"/>
-  <fieldType name="random" class="solr.RandomSortField" indexed="true"/>
-  <fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
-  <fieldType name="strings" class="solr.StrField" sortMissingLast="true" multiValued="true"/>
-  <fieldType name="text_ar" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" words="lang/stopwords_ar.txt" ignoreCase="true"/>
-      <filter name="arabicNormalization"/>
-      <filter name="arabicStem"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_bg" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" words="lang/stopwords_bg.txt" ignoreCase="true"/>
-      <filter name="bulgarianStem"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_ca" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="elision" articles="lang/contractions_ca.txt" ignoreCase="true"/>
-      <filter name="lowercase"/>
-      <filter name="stop" words="lang/stopwords_ca.txt" ignoreCase="true"/>
-      <filter name="snowballPorter" language="Catalan"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_cjk" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="cjkWidth"/>
-      <filter name="lowercase"/>
-      <filter name="cjkBigram"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_cz" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" words="lang/stopwords_cz.txt" ignoreCase="true"/>
-      <filter name="czechStem"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_da" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" format="snowball" words="lang/stopwords_da.txt" ignoreCase="true"/>
-      <filter name="snowballPorter" language="Danish"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_de" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" format="snowball" words="lang/stopwords_de.txt" ignoreCase="true"/>
-      <filter name="germanNormalization"/>
-      <filter name="germanLightStem"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_el" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="greekLowercase"/>
-      <filter name="stop" words="lang/stopwords_el.txt" ignoreCase="false"/>
-      <filter name="greekStem"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_en" class="solr.TextField" positionIncrementGap="100">
-    <analyzer type="index">
-      <tokenizer name="standard"/>
-      <filter name="stop" words="lang/stopwords_en.txt" ignoreCase="true"/>
-      <filter name="lowercase"/>
-      <filter name="englishPossessive"/>
-      <filter name="keywordMarker" protected="protwords.txt"/>
-      <filter name="porterStem"/>
-    </analyzer>
-    <analyzer type="query">
-      <tokenizer name="standard"/>
-      <filter name="synonymGraph" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
-      <filter name="stop" words="lang/stopwords_en.txt" ignoreCase="true"/>
-      <filter name="lowercase"/>
-      <filter name="englishPossessive"/>
-      <filter name="keywordMarker" protected="protwords.txt"/>
-      <filter name="porterStem"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_en_splitting" class="solr.TextField" autoGeneratePhraseQueries="true" positionIncrementGap="100">
-    <analyzer type="index">
-      <tokenizer name="whitespace"/>
-      <filter name="stop" words="lang/stopwords_en.txt" ignoreCase="true"/>
-      <filter name="wordDelimiterGraph" catenateNumbers="1" generateNumberParts="1" splitOnCaseChange="1" generateWordParts="1" catenateAll="0" catenateWords="1"/>
-      <filter name="lowercase"/>
-      <filter name="keywordMarker" protected="protwords.txt"/>
-      <filter name="porterStem"/>
-      <filter name="flattenGraph" />
-    </analyzer>
-    <analyzer type="query">
-      <tokenizer name="whitespace"/>
-      <filter name="synonymGraph" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
-      <filter name="stop" words="lang/stopwords_en.txt" ignoreCase="true"/>
-      <filter name="wordDelimiterGraph" catenateNumbers="0" generateNumberParts="1" splitOnCaseChange="1" generateWordParts="1" catenateAll="0" catenateWords="0"/>
-      <filter name="lowercase"/>
-      <filter name="keywordMarker" protected="protwords.txt"/>
-      <filter name="porterStem"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_en_splitting_tight" class="solr.TextField" autoGeneratePhraseQueries="true" positionIncrementGap="100">
-    <analyzer type="index">
-      <tokenizer name="whitespace"/>
-      <filter name="synonymGraph" expand="false" ignoreCase="true" synonyms="synonyms.txt"/>
-      <filter name="stop" words="lang/stopwords_en.txt" ignoreCase="true"/>
-      <filter name="wordDelimiterGraph" catenateNumbers="1" generateNumberParts="0" generateWordParts="0" catenateAll="0" catenateWords="1"/>
-      <filter name="lowercase"/>
-      <filter name="keywordMarker" protected="protwords.txt"/>
-      <filter name="englishMinimalStem"/>
-      <filter name="removeDuplicates"/>
-      <filter name="flattenGraph" />
-    </analyzer>
-    <analyzer type="query">
-      <tokenizer name="whitespace"/>
-      <filter name="synonymGraph" expand="false" ignoreCase="true" synonyms="synonyms.txt"/>
-      <filter name="stop" words="lang/stopwords_en.txt" ignoreCase="true"/>
-      <filter name="wordDelimiterGraph" catenateNumbers="1" generateNumberParts="0" generateWordParts="0" catenateAll="0" catenateWords="1"/>
-      <filter name="lowercase"/>
-      <filter name="keywordMarker" protected="protwords.txt"/>
-      <filter name="englishMinimalStem"/>
-      <filter name="removeDuplicates"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_es" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" format="snowball" words="lang/stopwords_es.txt" ignoreCase="true"/>
-      <filter name="spanishLightStem"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_eu" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" words="lang/stopwords_eu.txt" ignoreCase="true"/>
-      <filter name="snowballPorter" language="Basque"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_fa" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <charFilter name="persian"/>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="arabicNormalization"/>
-      <filter name="persianNormalization"/>
-      <filter name="stop" words="lang/stopwords_fa.txt" ignoreCase="true"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_fi" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" format="snowball" words="lang/stopwords_fi.txt" ignoreCase="true"/>
-      <filter name="snowballPorter" language="Finnish"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_fr" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="elision" articles="lang/contractions_fr.txt" ignoreCase="true"/>
-      <filter name="lowercase"/>
-      <filter name="stop" format="snowball" words="lang/stopwords_fr.txt" ignoreCase="true"/>
-      <filter name="frenchLightStem"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_ga" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="elision" articles="lang/contractions_ga.txt" ignoreCase="true"/>
-      <filter name="stop" words="lang/hyphenations_ga.txt" ignoreCase="true"/>
-      <filter name="irishLowercase"/>
-      <filter name="stop" words="lang/stopwords_ga.txt" ignoreCase="true"/>
-      <filter name="snowballPorter" language="Irish"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_general" class="solr.TextField" positionIncrementGap="100" multiValued="true">
-    <analyzer type="index">
-      <tokenizer name="standard"/>
-      <filter name="stop" words="stopwords.txt" ignoreCase="true"/>
-      <filter name="lowercase"/>
-    </analyzer>
-    <analyzer type="query">
-      <tokenizer name="standard"/>
-      <filter name="stop" words="stopwords.txt" ignoreCase="true"/>
-      <filter name="synonymGraph" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
-      <filter name="lowercase"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
-    <analyzer type="index">
-      <tokenizer name="standard"/>
-      <filter name="stop" words="stopwords.txt" ignoreCase="true"/>
-      <filter name="lowercase"/>
-      <filter name="reversedWildcard" maxPosQuestion="2" maxFractionAsterisk="0.33" maxPosAsterisk="3" withOriginal="true"/>
-    </analyzer>
-    <analyzer type="query">
-      <tokenizer name="standard"/>
-      <filter name="synonymGraph" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
-      <filter name="stop" words="stopwords.txt" ignoreCase="true"/>
-      <filter name="lowercase"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_gl" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" words="lang/stopwords_gl.txt" ignoreCase="true"/>
-      <filter name="galicianStem"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_hi" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="indicNormalization"/>
-      <filter name="hindiNormalization"/>
-      <filter name="stop" words="lang/stopwords_hi.txt" ignoreCase="true"/>
-      <filter name="hindiStem"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_hu" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" format="snowball" words="lang/stopwords_hu.txt" ignoreCase="true"/>
-      <filter name="snowballPorter" language="Hungarian"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_hy" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" words="lang/stopwords_hy.txt" ignoreCase="true"/>
-      <filter name="snowballPorter" language="Armenian"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_id" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" words="lang/stopwords_id.txt" ignoreCase="true"/>
-      <filter name="indonesianStem" stemDerivational="true"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_it" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="elision" articles="lang/contractions_it.txt" ignoreCase="true"/>
-      <filter name="lowercase"/>
-      <filter name="stop" format="snowball" words="lang/stopwords_it.txt" ignoreCase="true"/>
-      <filter name="italianLightStem"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_ja" class="solr.TextField" autoGeneratePhraseQueries="false" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="japanese" mode="search"/>
-      <filter name="japaneseBaseForm"/>
-      <filter name="japanesePartOfSpeechStop" tags="lang/stoptags_ja.txt"/>
-      <filter name="cjkWidth"/>
-      <filter name="stop" words="lang/stopwords_ja.txt" ignoreCase="true"/>
-      <filter name="japaneseKatakanaStem" minimumLength="4"/>
-      <filter name="lowercase"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_ko" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="korean" decompoundMode="discard" outputUnknownUnigrams="false"/>
-      <filter name="koreanPartOfSpeechStop" />
-      <filter name="koreanReadingForm" />
-      <filter name="lowercase" />
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_lv" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" words="lang/stopwords_lv.txt" ignoreCase="true"/>
-      <filter name="latvianStem"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_nl" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" format="snowball" words="lang/stopwords_nl.txt" ignoreCase="true"/>
-      <filter name="stemmerOverride" dictionary="lang/stemdict_nl.txt" ignoreCase="false"/>
-      <filter name="snowballPorter" language="Dutch"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_no" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" format="snowball" words="lang/stopwords_no.txt" ignoreCase="true"/>
-      <filter name="snowballPorter" language="Norwegian"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_pt" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" format="snowball" words="lang/stopwords_pt.txt" ignoreCase="true"/>
-      <filter name="portugueseLightStem"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_ro" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" words="lang/stopwords_ro.txt" ignoreCase="true"/>
-      <filter name="snowballPorter" language="Romanian"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_ru" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" format="snowball" words="lang/stopwords_ru.txt" ignoreCase="true"/>
-      <filter name="snowballPorter" language="Russian"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_sv" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="lowercase"/>
-      <filter name="stop" format="snowball" words="lang/stopwords_sv.txt" ignoreCase="true"/>
-      <filter name="snowballPorter" language="Swedish"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_th" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="thai"/>
-      <filter name="lowercase"/>
-      <filter name="stop" words="lang/stopwords_th.txt" ignoreCase="true"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_tr" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="standard"/>
-      <filter name="turkishLowercase"/>
-      <filter name="stop" words="lang/stopwords_tr.txt" ignoreCase="false"/>
-      <filter name="snowballPorter" language="Turkish"/>
-    </analyzer>
-  </fieldType>
-  <fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
-      <tokenizer name="whitespace"/>
-    </analyzer>
-  </fieldType>
-
-  <fieldType name="text_email_url" class="solr.TextField">
-    <analyzer>
-      <tokenizer name="UAX29URLEmail"/>
-      <filter name="type" types="email_url_types.txt" useWhitelist="true"/>
-    </analyzer>
-  </fieldType>
-
-  <fieldType name="text_shingles" class="solr.TextField" positionIncrementGap="100" multiValued="true">
-    <analyzer type="index">
-      <tokenizer name="standard"/>
-      <!-- <filter name="stop" words="lang/stopwords_en.txt" ignoreCase="false" /> -->
-      <filter name="length" min="2" max="18"/>
-      <filter name="lowercase"/>
-      <filter name="patternReplace" pattern="(^[^a-z]+$)" replacement="" replace="all"/>
-      <filter name="shingle" minShingleSize="3"  maxShingleSize="3"
-             outputUnigrams="false" outputUnigramsIfNoShingles="false" tokenSeparator=" " fillerToken="*"/>
-      <filter name="patternReplace" pattern="(.*[\*].*)"  replacement=""/>
-      <filter name="trim"/>
-
-      <!-- PRFF could have removed everything down to an empty string, remove if so -->
-      <filter name="length" min="1" max="100"/>
-    </analyzer>
-    <analyzer type="query">
-      <tokenizer name="keyword"/>
-      <filter name="lowercase"/>
-    </analyzer>
-  </fieldType>
-
-  <field name="id" type="string" multiValued="false" indexed="true" required="true" stored="true"/>
-  <field name="_version_" type="plong" indexed="true" stored="true"/>
-  <field name="content_type" type="string" indexed="true" stored="true"/>
-  <field name="doc_type" type="string" indexed="true" stored="true"/>
-  <field name="title" type="string" indexed="true" stored="true"/>
-  <field name="language" type="string" indexed="true" stored="true"/>
-  <field name="content" type="text_general" multiValued="false" indexed="true" stored="true"/>
-  <field name="text_shingles" type="text_shingles" indexed="true" stored="false"/>
-  <field name="_text_" type="text_general" multiValued="true" indexed="true" stored="false"/>
-
-  <dynamicField name="*_txt_en_split_tight" type="text_en_splitting_tight" indexed="true" stored="true"/>
-  <dynamicField name="*_descendent_path" type="descendent_path" indexed="true" stored="true"/>
-  <dynamicField name="*_ancestor_path" type="ancestor_path" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_en_split" type="text_en_splitting" indexed="true" stored="true"/>
-  <dynamicField name="*_coordinate" type="pdouble" indexed="true" stored="false"/>
-  <dynamicField name="ignored_*" type="ignored" multiValued="true"/>
-  <dynamicField name="*_txt_rev" type="text_general_rev" indexed="true" stored="true"/>
-  <dynamicField name="*_phon_en" type="phonetic_en" indexed="true" stored="true"/>
-  <dynamicField name="*_s_lower" type="lowercase" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_cjk" type="text_cjk" indexed="true" stored="true"/>
-  <dynamicField name="random_*" type="random"/>
-  <dynamicField name="*_txt_en" type="text_en" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_ar" type="text_ar" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_bg" type="text_bg" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_ca" type="text_ca" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_cz" type="text_cz" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_da" type="text_da" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_de" type="text_de" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_el" type="text_el" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_es" type="text_es" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_eu" type="text_eu" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_fa" type="text_fa" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_fi" type="text_fi" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_fr" type="text_fr" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_ga" type="text_ga" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_gl" type="text_gl" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_hi" type="text_hi" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_hu" type="text_hu" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_hy" type="text_hy" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_id" type="text_id" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_it" type="text_it" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_ja" type="text_ja" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_ko" type="text_ko" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_lv" type="text_lv" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_nl" type="text_nl" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_no" type="text_no" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_pt" type="text_pt" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_ro" type="text_ro" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_ru" type="text_ru" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_sv" type="text_sv" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_th" type="text_th" indexed="true" stored="true"/>
-  <dynamicField name="*_txt_tr" type="text_tr" indexed="true" stored="true"/>
-  <dynamicField name="*_point" type="point" indexed="true" stored="true"/>
-  <dynamicField name="*_srpt" type="location_rpt" indexed="true" stored="true"/>
-  <dynamicField name="attr_*" type="text_general" multiValued="true" indexed="true" stored="true"/>
-  <dynamicField name="*_l_ns" type="plong" indexed="true" stored="false"/>
-  <dynamicField name="*_s_ns" type="string" indexed="true" stored="false"/>
-  <dynamicField name="*_txt" type="text_general" indexed="true" stored="true"/>
-  <dynamicField name="*_dts" type="pdate" multiValued="true" indexed="true" stored="true"/>
-  <dynamicField name="*_is" type="pints" indexed="true" stored="true"/>
-  <dynamicField name="*_ss" type="strings" indexed="true" stored="true"/>
-  <dynamicField name="*_ls" type="plongs" indexed="true" stored="true"/>
-  <dynamicField name="*_bs" type="booleans" indexed="true" stored="true"/>
-  <dynamicField name="*_fs" type="pfloats" indexed="true" stored="true"/>
-  <dynamicField name="*_ds" type="pdoubles" indexed="true" stored="true"/>
-  <dynamicField name="*_dt" type="pdate" indexed="true" stored="true"/>
-  <dynamicField name="*_ws" type="text_ws" indexed="true" stored="true"/>
-  <dynamicField name="*_i" type="pint" indexed="true" stored="true"/>
-  <dynamicField name="*_s" type="string" indexed="true" stored="true"/>
-  <dynamicField name="*_l" type="plong" indexed="true" stored="true"/>
-  <dynamicField name="*_t" type="text_general" indexed="true" stored="true"/>
-  <dynamicField name="*_b" type="boolean" indexed="true" stored="true"/>
-  <dynamicField name="*_f" type="pfloat" indexed="true" stored="true"/>
-  <dynamicField name="*_d" type="pdouble" indexed="true" stored="true"/>
-  <dynamicField name="*_p" type="location" indexed="true" stored="true"/>
-  <dynamicField name="*_c" type="currency" indexed="true" stored="true"/>
-
-  <copyField source="content" dest="text_shingles"/>
-  <copyField source="*" dest="_text_"/>
-
-
-</schema>
diff --git a/solr/example/files/conf/params.json b/solr/example/files/conf/params.json
deleted file mode 100644
index 22aadcc..0000000
--- a/solr/example/files/conf/params.json
+++ /dev/null
@@ -1,34 +0,0 @@
-{"params":{
-  "query":{
-    "defType":"edismax",
-    "q.alt":"*:*",
-    "rows":"10",
-    "fl":"*,score",
-    "":{"v":0}},
-  "facets":{
-    "facet":"on",
-    "facet.mincount":"1",
-    "f.doc_type.facet.mincount":"0",
-    "facet.field":["text_shingles","{!ex=type}doc_type", "language"],
-    "f.text_shingles.facet.limit":10,
-    "facet.query":"{!ex=type key=all_types}*:*",
-    "f.doc_type.facet.missing":true,
-    "":{"v":0}},
-  "browse":{
-    "type_fq":"{!field f=doc_type v=$type}",
-    "hl":"on",
-    "hl.fl":"content",
-    "v.locale":"${locale}",
-    "debug":"true",
-    "hl.simple.pre":"HL_START",
-    "hl.simple.post":"HL_END",
-    "echoParams": "explicit",
-    "_appends_": {
-      "fq": "{!switch v=$type tag=type case='*:*' case.all='*:*' case.unknown='-doc_type:[* TO *]' default=$type_fq}"
-    },
-    "":{"v":0}},
-  "velocity":{
-    "wt":"velocity",
-    "v.template":"browse",
-    "v.layout":"layout",
-    "":{"v":0}}}}
diff --git a/solr/example/files/conf/protwords.txt b/solr/example/files/conf/protwords.txt
deleted file mode 100644
index 1dfc0ab..0000000
--- a/solr/example/files/conf/protwords.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#-----------------------------------------------------------------------
-# Use a protected word file to protect against the stemmer reducing two
-# unrelated words to the same base word.
-
-# Some non-words that normally won't be encountered,
-# just to test that they won't be stemmed.
-dontstems
-zwhacky
-
diff --git a/solr/example/files/conf/solrconfig.xml b/solr/example/files/conf/solrconfig.xml
deleted file mode 100644
index 5d7bedd..0000000
--- a/solr/example/files/conf/solrconfig.xml
+++ /dev/null
@@ -1,1423 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!--
-     For more details about configurations options that may appear in
-     this file, see http://wiki.apache.org/solr/SolrConfigXml.
--->
-<config>
-  <!-- In all configuration below, a prefix of "solr." for class names
-       is an alias that causes solr to search appropriate packages,
-       including org.apache.solr.(search|update|request|core|analysis)
-
-       You may also specify a fully qualified Java classname if you
-       have your own custom plugins.
-    -->
-
-  <!-- Controls what version of Lucene various components of Solr
-       adhere to.  Generally, you want to use the latest version to
-       get all bug fixes and improvements. It is highly recommended
-       that you fully re-index after changing this setting as it can
-       affect both how text is indexed and queried.
-  -->
-  <luceneMatchVersion>9.0.0</luceneMatchVersion>
-
-  <!-- <lib/> directives can be used to instruct Solr to load any Jars
-       identified and use them to resolve any "plugins" specified in
-       your solrconfig.xml or schema.xml (ie: Analyzers, Request
-       Handlers, etc...).
-
-       All directories and paths are resolved relative to the
-       instanceDir.
-
-       Please note that <lib/> directives are processed in the order
-       that they appear in your solrconfig.xml file, and are "stacked"
-       on top of each other when building a ClassLoader - so if you have
-       plugin jars with dependencies on other jars, the "lower level"
-       dependency jars should be loaded first.
-
-       If a "./lib" directory exists in your instanceDir, all files
-       found in it are included as if you had used the following
-       syntax...
-
-              <lib dir="./lib" />
-    -->
-
-  <!-- A 'dir' option by itself adds any files found in the directory
-       to the classpath, this is useful for including all jars in a
-       directory.
-
-       When a 'regex' is specified in addition to a 'dir', only the
-       files in that directory which completely match the regex
-       (anchored on both ends) will be included.
-
-       If a 'dir' option (with or without a regex) is used and nothing
-       is found that matches, a warning will be logged.
-
-       The examples below can be used to load some solr-contribs along
-       with their external dependencies.
-    -->
-  <lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
-
-  <lib dir="${solr.install.dir:../../../..}/contrib/clustering/lib/" regex=".*\.jar" />
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-clustering-\d.*\.jar" />
-
-  <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
-
-  <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
-  <!-- browse-resources must come before solr-velocity JAR in order to override localized resources -->
-  <lib path="${solr.install.dir:../../../..}/example/files/browse-resources"/>
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
-  <!-- an exact 'path' can be used instead of a 'dir' to specify a
-       specific jar file.  This will cause a serious error to be logged
-       if it can't be loaded.
-    -->
-  <!--
-     <lib path="../a-jar-that-does-not-exist.jar" />
-  -->
-
-  <!-- Data Directory
-
-       Used to specify an alternate directory to hold all index data
-       other than the default ./data under the Solr home.  If
-       replication is in use, this should match the replication
-       configuration.
-    -->
-  <dataDir>${solr.data.dir:}</dataDir>
-
-
-  <!-- The DirectoryFactory to use for indexes.
-
-       solr.StandardDirectoryFactory is filesystem
-       based and tries to pick the best implementation for the current
-       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,
-       wraps solr.StandardDirectoryFactory and caches small files in memory
-       for better NRT performance.
-
-       One can force a particular implementation via solr.MMapDirectoryFactory
-       or solr.NIOFSDirectoryFactory.
-
-       solr.RAMDirectoryFactory is memory based and not persistent.
-    -->
-  <directoryFactory name="DirectoryFactory"
-                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
-
-  <!-- The CodecFactory for defining the format of the inverted index.
-       The default implementation is SchemaCodecFactory, which is the official Lucene
-       index format, but hooks into the schema to provide per-field customization of
-       the postings lists and per-document values in the fieldType element
-       (postingsFormat/docValuesFormat). Note that most of the alternative implementations
-       are experimental, so if you choose to customize the index format, it's a good
-       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
-       before upgrading to a newer version to avoid unnecessary reindexing.
-  -->
-  <codecFactory class="solr.SchemaCodecFactory"/>
-
-  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-       Index Config - These settings control low-level behavior of indexing
-       Most example settings here show the default value, but are commented
-       out, to more easily see where customizations have been made.
-
-       Note: This replaces <indexDefaults> and <mainIndex> from older versions
-       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
-  <indexConfig>
-    <!-- maxFieldLength was removed in 4.0. To get similar behavior, include a
-         LimitTokenCountFilterFactory in your fieldType definition. E.g.
-     <filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
-    -->
-    <!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->
-    <!-- <writeLockTimeout>1000</writeLockTimeout>  -->
-
-    <!-- Expert: Enabling compound file will use less files for the index,
-         using fewer file descriptors on the expense of performance decrease.
-         Default in Lucene is "true". Default in Solr is "false" (since 3.6) -->
-    <!-- <useCompoundFile>false</useCompoundFile> -->
-
-    <!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene
-         indexing for buffering added documents and deletions before they are
-         flushed to the Directory.
-         maxBufferedDocs sets a limit on the number of documents buffered
-         before flushing.
-         If both ramBufferSizeMB and maxBufferedDocs is set, then
-         Lucene will flush based on whichever limit is hit first.  -->
-    <!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
-    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
-
-    <!-- Expert: Merge Policy
-         The Merge Policy in Lucene controls how merging of segments is done.
-         The default since Solr/Lucene 3.3 is TieredMergePolicy.
-         The default since Lucene 2.3 was the LogByteSizeMergePolicy,
-         Even older versions of Lucene used LogDocMergePolicy.
-      -->
-    <!--
-        <mergePolicyFactory class="solr.TieredMergePolicyFactory">
-          <int name="maxMergeAtOnce">10</int>
-          <int name="segmentsPerTier">10</int>
-        </mergePolicyFactory>
-     -->
-
-    <!-- Expert: Merge Scheduler
-         The Merge Scheduler in Lucene controls how merges are
-         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)
-         can perform merges in the background using separate threads.
-         The SerialMergeScheduler (Lucene 2.2 default) does not.
-     -->
-    <!--
-       <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
-       -->
-
-    <!-- LockFactory
-
-         This option specifies which Lucene LockFactory implementation
-         to use.
-
-         single = SingleInstanceLockFactory - suggested for a
-                  read-only index or when there is no possibility of
-                  another process trying to modify the index.
-         native = NativeFSLockFactory - uses OS native file locking.
-                  Do not use when multiple solr webapps in the same
-                  JVM are attempting to share a single index.
-         simple = SimpleFSLockFactory  - uses a plain file for locking
-
-         Defaults: 'native' is default for Solr3.6 and later, otherwise
-                   'simple' is the default
-
-         More details on the nuances of each LockFactory...
-         http://wiki.apache.org/lucene-java/AvailableLockFactories
-    -->
-    <lockType>${solr.lock.type:native}</lockType>
-
-    <!-- Commit Deletion Policy
-         Custom deletion policies can be specified here. The class must
-         implement org.apache.lucene.index.IndexDeletionPolicy.
-
-         The default Solr IndexDeletionPolicy implementation supports
-         deleting index commit points on number of commits, age of
-         commit point and optimized status.
-
-         The latest commit point should always be preserved regardless
-         of the criteria.
-    -->
-    <!--
-    <deletionPolicy class="solr.SolrDeletionPolicy">
-    -->
-    <!-- The number of commit points to be kept -->
-    <!-- <str name="maxCommitsToKeep">1</str> -->
-    <!-- The number of optimized commit points to be kept -->
-    <!-- <str name="maxOptimizedCommitsToKeep">0</str> -->
-    <!--
-        Delete all commit points once they have reached the given age.
-        Supports DateMathParser syntax e.g.
-      -->
-    <!--
-       <str name="maxCommitAge">30MINUTES</str>
-       <str name="maxCommitAge">1DAY</str>
-    -->
-    <!--
-    </deletionPolicy>
-    -->
-
-    <!-- Lucene Infostream
-
-         To aid in advanced debugging, Lucene provides an "InfoStream"
-         of detailed information when indexing.
-
-         Setting The value to true will instruct the underlying Lucene
-         IndexWriter to write its debugging info the specified file
-      -->
-    <!-- <infoStream file="INFOSTREAM.txt">false</infoStream> -->
-  </indexConfig>
-
-
-  <!-- JMX
-
-       This example enables JMX if and only if an existing MBeanServer
-       is found, use this if you want to configure JMX through JVM
-       parameters. Remove this to disable exposing Solr configuration
-       and statistics to JMX.
-
-       For more details see http://wiki.apache.org/solr/SolrJmx
-    -->
-  <jmx />
-  <!-- If you want to connect to a particular server, specify the
-       agentId
-    -->
-  <!-- <jmx agentId="myAgent" /> -->
-  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
-  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
-    -->
-
-  <!-- The default high-performance update handler -->
-  <updateHandler class="solr.DirectUpdateHandler2">
-
-    <!-- Enables a transaction log, used for real-time get, durability, and
-         and solr cloud replica recovery.  The log can grow as big as
-         uncommitted changes to the index, so use of a hard autoCommit
-         is recommended (see below).
-         "dir" - the target directory for transaction logs, defaults to the
-                solr data directory.  -->
-    <updateLog>
-      <str name="dir">${solr.ulog.dir:}</str>
-    </updateLog>
-
-    <!-- AutoCommit
-
-         Perform a hard commit automatically under certain conditions.
-         Instead of enabling autoCommit, consider using "commitWithin"
-         when adding documents.
-
-         http://wiki.apache.org/solr/UpdateXmlMessages
-
-         maxDocs - Maximum number of documents to add since the last
-                   commit before automatically triggering a new commit.
-
-         maxTime - Maximum amount of time in ms that is allowed to pass
-                   since a document was added before automatically
-                   triggering a new commit.
-         openSearcher - if false, the commit causes recent index changes
-           to be flushed to stable storage, but does not cause a new
-           searcher to be opened to make those changes visible.
-
-         If the updateLog is enabled, then it's highly recommended to
-         have some sort of hard autoCommit to limit the log size.
-      -->
-    <autoCommit>
-      <maxTime>15000</maxTime>
-      <openSearcher>false</openSearcher>
-    </autoCommit>
-
-    <!-- softAutoCommit is like autoCommit except it causes a
-         'soft' commit which only ensures that changes are visible
-         but does not ensure that data is synced to disk.  This is
-         faster and more near-realtime friendly than a hard commit.
-      -->
-    <!--
-      <autoSoftCommit>
-        <maxTime>1000</maxTime>
-      </autoSoftCommit>
-     -->
-
-    <!-- Update Related Event Listeners
-
-         Various IndexWriter related events can trigger Listeners to
-         take actions.
-
-         postCommit - fired after every commit or optimize command
-         postOptimize - fired after every optimize command
-      -->
-
-  </updateHandler>
-
-  <!-- IndexReaderFactory
-
-       Use the following format to specify a custom IndexReaderFactory,
-       which allows for alternate IndexReader implementations.
-
-       ** Experimental Feature **
-
-       Please note - Using a custom IndexReaderFactory may prevent
-       certain other features from working. The API to
-       IndexReaderFactory may change without warning or may even be
-       removed from future releases if the problems cannot be
-       resolved.
-
-
-       ** Features that may not work with custom IndexReaderFactory **
-
-       The ReplicationHandler assumes a disk-resident index. Using a
-       custom IndexReader implementation may cause incompatibility
-       with ReplicationHandler and may cause replication to not work
-       correctly. See SOLR-1366 for details.
-
-    -->
-  <!--
-  <indexReaderFactory name="IndexReaderFactory" class="package.class">
-    <str name="someArg">Some Value</str>
-  </indexReaderFactory >
-  -->
-
-  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-       Query section - these settings control query time things like caches
-       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
-  <query>
-    <!-- Max Boolean Clauses
-
-         Maximum number of clauses in each BooleanQuery,  an exception
-         is thrown if exceeded.
-
-         ** WARNING **
-
-         This option actually modifies a global Lucene property that
-         will affect all SolrCores.  If multiple solrconfig.xml files
-         disagree on this property, the value at any given moment will
-         be based on the last SolrCore to be initialized.
-
-      -->
-    <maxBooleanClauses>${solr.max.booleanClauses:1024}</maxBooleanClauses>
-
-
-    <!-- Solr Internal Query Caches
-         Starting with Solr 9.0 the default cache implementation used is CaffeineCache.
-    -->
-
-    <!-- Filter Cache
-
-         Cache used by SolrIndexSearcher for filters (DocSets),
-         unordered sets of *all* documents that match a query.  When a
-         new searcher is opened, its caches may be prepopulated or
-         "autowarmed" using data from caches in the old searcher.
-         autowarmCount is the number of items to prepopulate.
-
-         Parameters:
-           class - the SolrCache implementation (CaffeineCache by default)
-           size - the maximum number of entries in the cache
-           initialSize - the initial capacity (number of entries) of
-               the cache.  (see java.util.HashMap)
-           autowarmCount - the number of entries to prepopulate from
-               and old cache.
-      -->
-    <filterCache class="solr.CaffeineCache"
-                 size="512"
-                 initialSize="512"
-                 autowarmCount="0"/>
-
-    <!-- Query Result Cache
-
-         Caches results of searches - ordered lists of document ids
-         (DocList) based on a query, a sort, and the range of documents requested.
-         Additional supported parameter by CaffeineCache:
-            maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
-                       to occupy
-      -->
-    <queryResultCache class="solr.CaffeineCache"
-                      size="512"
-                      initialSize="512"
-                      autowarmCount="0"/>
-
-    <!-- Document Cache
-
-         Caches Lucene Document objects (the stored fields for each
-         document).  Since Lucene internal document ids are transient,
-         this cache will not be autowarmed.
-      -->
-    <documentCache class="solr.CaffeineCache"
-                   size="512"
-                   initialSize="512"
-                   autowarmCount="0"/>
-
-    <!-- Field Value Cache
-
-         Cache used to hold field values that are quickly accessible
-         by document id.  The fieldValueCache is created by default
-         even if not configured here.
-      -->
-    <!--
-       <fieldValueCache class="solr.CaffeineCache"
-                        size="512"
-                        autowarmCount="128"
-                        showItems="32" />
-      -->
-
-    <!-- Custom Cache
-
-         Example of a generic cache.  These caches may be accessed by
-         name through SolrIndexSearcher.getCache(),cacheLookup(), and
-         cacheInsert().  The purpose is to enable easy caching of
-         user/application level data.  The regenerator argument should
-         be specified as an implementation of solr.CacheRegenerator
-         if autowarming is desired.
-      -->
-    <!--
-       <cache name="myUserCache"
-              class="solr.CaffeineCache"
-              size="4096"
-              initialSize="1024"
-              autowarmCount="1024"
-              regenerator="com.mycompany.MyRegenerator"
-              />
-      -->
-
-
-    <!-- Lazy Field Loading
-
-         If true, stored fields that are not requested will be loaded
-         lazily.  This can result in a significant speed improvement
-         if the usual case is to not load all stored fields,
-         especially if the skipped fields are large compressed text
-         fields.
-    -->
-    <enableLazyFieldLoading>true</enableLazyFieldLoading>
-
-    <!-- Use Filter For Sorted Query
-
-         A possible optimization that attempts to use a filter to
-         satisfy a search.  If the requested sort does not include
-         score, then the filterCache will be checked for a filter
-         matching the query. If found, the filter will be used as the
-         source of document ids, and then the sort will be applied to
-         that.
-
-         For most situations, this will not be useful unless you
-         frequently get the same search repeatedly with different sort
-         options, and none of them ever use "score"
-      -->
-    <!--
-       <useFilterForSortedQuery>true</useFilterForSortedQuery>
-      -->
-
-    <!-- Result Window Size
-
-         An optimization for use with the queryResultCache.  When a search
-         is requested, a superset of the requested number of document ids
-         are collected.  For example, if a search for a particular query
-         requests matching documents 10 through 19, and queryWindowSize is 50,
-         then documents 0 through 49 will be collected and cached.  Any further
-         requests in that range can be satisfied via the cache.
-      -->
-    <queryResultWindowSize>20</queryResultWindowSize>
-
-    <!-- Maximum number of documents to cache for any entry in the
-         queryResultCache.
-      -->
-    <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
-
-    <!-- Query Related Event Listeners
-
-         Various IndexSearcher related events can trigger Listeners to
-         take actions.
-
-         newSearcher - fired whenever a new searcher is being prepared
-         and there is a current searcher handling requests (aka
-         registered).  It can be used to prime certain caches to
-         prevent long request times for certain requests.
-
-         firstSearcher - fired whenever a new searcher is being
-         prepared but there is no current registered searcher to handle
-         requests or to gain autowarming data from.
-
-
-      -->
-    <!-- QuerySenderListener takes an array of NamedList and executes a
-         local query request for each NamedList in sequence.
-      -->
-    <listener event="newSearcher" class="solr.QuerySenderListener">
-      <arr name="queries">
-        <!--
-           <lst><str name="q">solr</str><str name="sort">price asc</str></lst>
-           <lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
-          -->
-      </arr>
-    </listener>
-    <listener event="firstSearcher" class="solr.QuerySenderListener">
-      <arr name="queries">
-        <!--
-        <lst>
-          <str name="q">static firstSearcher warming in solrconfig.xml</str>
-        </lst>
-        -->
-      </arr>
-    </listener>
-
-    <!-- Use Cold Searcher
-
-         If a search request comes in and there is no current
-         registered searcher, then immediately register the still
-         warming searcher and use it.  If "false" then all requests
-         will block until the first searcher is done warming.
-      -->
-    <useColdSearcher>false</useColdSearcher>
-
-  </query>
-
-  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-     Circuit Breaker Section - This section consists of configurations for
-     circuit breakers
-     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
-  <circuitBreaker class="solr.CircuitBreakerManager" enabled="true">
-    <!-- Enable Circuit Breakers
-
-    Circuit breakers are designed to allow stability and predictable query
-    execution. They prevent operations that can take down the node and cause
-    noisy neighbour issues.
-
-    This flag is the uber control switch which controls the activation/deactivation of all circuit
-    breakers. At the moment, the only circuit breaker (max JVM circuit breaker) does not have its
-    own specific configuration. However, if a circuit breaker wishes to be independently configurable,
-    they are free to add their specific configuration but need to ensure that this flag is always
-    respected - this should have veto over all independent configuration flags.
-    -->
-
-    <!-- Memory Circuit Breaker Control Flag
-
-    Use the following flag to control the behaviour of this circuit breaker
-    -->
-    <str name="memEnabled">true</str>
-
-    <!-- Memory Circuit Breaker Threshold In Percentage
-
-    Specific configuration for max JVM heap usage circuit breaker. This configuration defines the
-    threshold percentage of maximum heap allocated beyond which queries will be rejected until the
-    current JVM usage goes below the threshold. The valid value range for this parameter is 50 - 95.
-
-    Consider a scenario where the max heap allocated is 4 GB and memoryCircuitBreakerThresholdPct is
-    defined as 75. Threshold JVM usage will be 4 * 0.75 = 3 GB. Its generally a good idea to keep this value between 75 - 80% of maximum heap
-    allocated.
-
-    If, at any point, the current JVM heap usage goes above 3 GB, queries will be rejected until the heap usage goes below 3 GB again.
-    If you see queries getting rejected with 503 error code, check for "Circuit Breakers tripped"
-    in logs and the corresponding error message should tell you what transpired (if the failure
-    was caused by tripped circuit breakers).
-    -->
-    <str name="memThreshold">75</str>
-
-    <!-- CPU Based Circuit Breaker Control Flag
-
-    Use the following flag to control the behaviour of this circuit breaker
-    -->
-    <str name="cpuEnabled">true</str>
-
-    <!-- CPU Based Circuit Breaker Triggering Threshold
-
-    The triggering threshold is defined in units of CPU utilization. The configuration to control this is as below:
-    -->
-    <str name="cpuThreshold">75</str>
-
-
-  </circuitBreaker>
-
-
-  <!-- Request Dispatcher
-
-       This section contains instructions for how the SolrDispatchFilter
-       should behave when processing requests for this SolrCore.
-    -->
-  <requestDispatcher>
-    <!-- Request Parsing
-
-         These settings indicate how Solr Requests may be parsed, and
-         what restrictions may be placed on the ContentStreams from
-         those requests
-
-         enableRemoteStreaming - enables use of the stream.file
-         and stream.url parameters for specifying remote streams.
-
-         multipartUploadLimitInKB - specifies the max size (in KiB) of
-         Multipart File Uploads that Solr will allow in a Request.
-
-         formdataUploadLimitInKB - specifies the max size (in KiB) of
-         form data (application/x-www-form-urlencoded) sent via
-         POST. You can use POST to pass request parameters not
-         fitting into the URL.
-
-         addHttpRequestToContext - if set to true, it will instruct
-         the requestParsers to include the original HttpServletRequest
-         object in the context map of the SolrQueryRequest under the
-         key "httpRequest". It will not be used by any of the existing
-         Solr components, but may be useful when developing custom
-         plugins.
-
-         *** WARNING ***
-         Before enabling remote streaming, you should make sure your
-         system has authentication enabled.
-
-    <requestParsers enableRemoteStreaming="false"
-                    multipartUploadLimitInKB="-1"
-                    formdataUploadLimitInKB="-1"
-                    addHttpRequestToContext="false"/>
-      -->
-
-    <!-- HTTP Caching
-
-         Set HTTP caching related parameters (for proxy caches and clients).
-
-         The options below instruct Solr not to output any HTTP Caching
-         related headers
-      -->
-    <httpCaching never304="true" />
-    <!-- If you include a <cacheControl> directive, it will be used to
-         generate a Cache-Control header (as well as an Expires header
-         if the value contains "max-age=")
-
-         By default, no Cache-Control header is generated.
-
-         You can use the <cacheControl> option even if you have set
-         never304="true"
-      -->
-    <!--
-       <httpCaching never304="true" >
-         <cacheControl>max-age=30, public</cacheControl>
-       </httpCaching>
-      -->
-    <!-- To enable Solr to respond with automatically generated HTTP
-         Caching headers, and to response to Cache Validation requests
-         correctly, set the value of never304="false"
-
-         This will cause Solr to generate Last-Modified and ETag
-         headers based on the properties of the Index.
-
-         The following options can also be specified to affect the
-         values of these headers...
-
-         lastModFrom - the default value is "openTime" which means the
-         Last-Modified value (and validation against If-Modified-Since
-         requests) will all be relative to when the current Searcher
-         was opened.  You can change it to lastModFrom="dirLastMod" if
-         you want the value to exactly correspond to when the physical
-         index was last modified.
-
-         etagSeed="..." is an option you can change to force the ETag
-         header (and validation against If-None-Match requests) to be
-         different even if the index has not changed (ie: when making
-         significant changes to your config file)
-
-         (lastModifiedFrom and etagSeed are both ignored if you use
-         the never304="true" option)
-      -->
-    <!--
-       <httpCaching lastModifiedFrom="openTime"
-                    etagSeed="Solr">
-         <cacheControl>max-age=30, public</cacheControl>
-       </httpCaching>
-      -->
-  </requestDispatcher>
-
-  <!-- Request Handlers
-
-       http://wiki.apache.org/solr/SolrRequestHandler
-
-       Incoming queries will be dispatched to a specific handler by name
-       based on the path specified in the request.
-
-       If a Request Handler is declared with startup="lazy", then it will
-       not be initialized until the first request that uses it.
-
-    -->
-  <!-- SearchHandler
-
-       http://wiki.apache.org/solr/SearchHandler
-
-       For processing Search Queries, the primary Request Handler
-       provided with Solr is "SearchHandler" It delegates to a sequent
-       of SearchComponents (see below) and supports distributed
-       queries across multiple shards
-    -->
-  <requestHandler name="/select" class="solr.SearchHandler">
-    <!-- default values for query parameters can be specified, these
-         will be overridden by parameters in the request
-      -->
-    <lst name="defaults">
-      <str name="echoParams">explicit</str>
-      <int name="rows">10</int>
-      <!-- Default search field
-         <str name="df">text</str> 
-        -->
-      <!-- Change from JSON to XML format (the default prior to Solr 7.0)
-         <str name="wt">xml</str> 
-        -->
-    </lst>
-    <!-- In addition to defaults, "appends" params can be specified
-         to identify values which should be appended to the list of
-         multi-val params from the query (or the existing "defaults").
-      -->
-    <!-- In this example, the param "fq=instock:true" would be appended to
-         any query time fq params the user may specify, as a mechanism for
-         partitioning the index, independent of any user selected filtering
-         that may also be desired (perhaps as a result of faceted searching).
-
-         NOTE: there is *absolutely* nothing a client can do to prevent these
-         "appends" values from being used, so don't use this mechanism
-         unless you are sure you always want it.
-      -->
-    <!--
-       <lst name="appends">
-         <str name="fq">inStock:true</str>
-       </lst>
-      -->
-    <!-- "invariants" are a way of letting the Solr maintainer lock down
-         the options available to Solr clients.  Any params values
-         specified here are used regardless of what values may be specified
-         in either the query, the "defaults", or the "appends" params.
-
-         In this example, the facet.field and facet.query params would
-         be fixed, limiting the facets clients can use.  Faceting is
-         not turned on by default - but if the client does specify
-         facet=true in the request, these are the only facets they
-         will be able to see counts for; regardless of what other
-         facet.field or facet.query params they may specify.
-
-         NOTE: there is *absolutely* nothing a client can do to prevent these
-         "invariants" values from being used, so don't use this mechanism
-         unless you are sure you always want it.
-      -->
-    <!--
-       <lst name="invariants">
-         <str name="facet.field">cat</str>
-         <str name="facet.field">manu_exact</str>
-         <str name="facet.query">price:[* TO 500]</str>
-         <str name="facet.query">price:[500 TO *]</str>
-       </lst>
-      -->
-    <!-- If the default list of SearchComponents is not desired, that
-         list can either be overridden completely, or components can be
-         prepended or appended to the default list.  (see below)
-      -->
-    <!--
-       <arr name="components">
-         <str>nameOfCustomComponent1</str>
-         <str>nameOfCustomComponent2</str>
-       </arr>
-      -->
-  </requestHandler>
-
-  <!-- A request handler that returns indented JSON by default -->
-  <requestHandler name="/query" class="solr.SearchHandler">
-    <lst name="defaults">
-      <str name="echoParams">explicit</str>
-      <str name="wt">json</str>
-      <str name="indent">true</str>
-    </lst>
-  </requestHandler>
-
-
-  <!--These useParams values are available in params.json-->
-  <requestHandler name="/browse" class="solr.SearchHandler" useParams="query,facets,velocity,browse"/>
-
-
-  <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell,/browse">
-    <lst name="defaults">
-      <str name="df">_text_</str>
-    </lst>
-  </initParams>
-
-  <initParams path="/update/**">
-    <lst name="defaults">
-      <str name="update.chain">files-update-processor</str>
-    </lst>
-  </initParams>
-
-  <!-- Solr Cell Update Request Handler
-
-       http://wiki.apache.org/solr/ExtractingRequestHandler
-
-    -->
-  <requestHandler name="/update/extract"
-                  startup="lazy"
-                  class="solr.extraction.ExtractingRequestHandler" >
-    <lst name="defaults">
-      <str name="xpath">/xhtml:html/xhtml:body/descendant:node()</str>
-      <str name="capture">content</str>
-      <str name="fmap.meta">attr_meta_</str>
-      <str name="uprefix">attr_</str>
-      <str name="lowernames">true</str>
-    </lst>
-  </requestHandler>
-  <!-- Search Components
-
-       Search components are registered to SolrCore and used by
-       instances of SearchHandler (which can access them by name)
-
-       By default, the following components are available:
-
-       <searchComponent name="query"     class="solr.QueryComponent" />
-       <searchComponent name="facet"     class="solr.FacetComponent" />
-       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
-       <searchComponent name="highlight" class="solr.HighlightComponent" />
-       <searchComponent name="stats"     class="solr.StatsComponent" />
-       <searchComponent name="debug"     class="solr.DebugComponent" />
-
-       Default configuration in a requestHandler would look like:
-
-       <arr name="components">
-         <str>query</str>
-         <str>facet</str>
-         <str>mlt</str>
-         <str>highlight</str>
-         <str>stats</str>
-         <str>debug</str>
-       </arr>
-
-       If you register a searchComponent to one of the standard names,
-       that will be used instead of the default.
-
-       To insert components before or after the 'standard' components, use:
-
-       <arr name="first-components">
-         <str>myFirstComponentName</str>
-       </arr>
-
-       <arr name="last-components">
-         <str>myLastComponentName</str>
-       </arr>
-
-       NOTE: The component registered with the name "debug" will
-       always be executed after the "last-components"
-
-     -->
-
-  <!-- Spell Check
-
-       The spell check component can return a list of alternative spelling
-       suggestions.
-
-       http://wiki.apache.org/solr/SpellCheckComponent
-    -->
-  <searchComponent name="spellcheck" class="solr.SpellCheckComponent">
-
-    <str name="queryAnalyzerFieldType">text_general</str>
-
-    <!-- Multiple "Spell Checkers" can be declared and used by this
-         component
-      -->
-
-    <!-- a spellchecker built from a field of the main index -->
-    <lst name="spellchecker">
-      <str name="name">default</str>
-      <str name="field">text</str>
-      <str name="classname">solr.DirectSolrSpellChecker</str>
-      <!-- the spellcheck distance measure used, the default is the internal levenshtein -->
-      <str name="distanceMeasure">internal</str>
-      <!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
-      <float name="accuracy">0.5</float>
-      <!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
-      <int name="maxEdits">2</int>
-      <!-- the minimum shared prefix when enumerating terms -->
-      <int name="minPrefix">1</int>
-      <!-- maximum number of inspections per result. -->
-      <int name="maxInspections">5</int>
-      <!-- minimum length of a query term to be considered for correction -->
-      <int name="minQueryLength">4</int>
-      <!-- maximum threshold of documents a query term can appear to be considered for correction -->
-      <float name="maxQueryFrequency">0.01</float>
-      <!-- uncomment this to require suggestions to occur in 1% of the documents
-        <float name="thresholdTokenFrequency">.01</float>
-      -->
-    </lst>
-
-    <!-- a spellchecker that can break or combine words.  See "/spell" handler below for usage -->
-    <lst name="spellchecker">
-      <str name="name">wordbreak</str>
-      <str name="classname">solr.WordBreakSolrSpellChecker</str>
-      <str name="field">name</str>
-      <str name="combineWords">true</str>
-      <str name="breakWords">true</str>
-      <int name="maxChanges">10</int>
-    </lst>
-
-    <!-- a spellchecker that uses a different distance measure -->
-    <!--
-       <lst name="spellchecker">
-         <str name="name">jarowinkler</str>
-         <str name="field">spell</str>
-         <str name="classname">solr.DirectSolrSpellChecker</str>
-         <str name="distanceMeasure">
-           org.apache.lucene.search.spell.JaroWinklerDistance
-         </str>
-       </lst>
-     -->
-
-    <!-- a spellchecker that use an alternate comparator
-
-         comparatorClass be one of:
-          1. score (default)
-          2. freq (Frequency first, then score)
-          3. A fully qualified class name
-      -->
-    <!--
-       <lst name="spellchecker">
-         <str name="name">freq</str>
-         <str name="field">lowerfilt</str>
-         <str name="classname">solr.DirectSolrSpellChecker</str>
-         <str name="comparatorClass">freq</str>
-      -->
-
-    <!-- A spellchecker that reads the list of words from a file -->
-    <!--
-       <lst name="spellchecker">
-         <str name="classname">solr.FileBasedSpellChecker</str>
-         <str name="name">file</str>
-         <str name="sourceLocation">spellings.txt</str>
-         <str name="characterEncoding">UTF-8</str>
-         <str name="spellcheckIndexDir">spellcheckerFile</str>
-       </lst>
-      -->
-  </searchComponent>
-
-  <!-- A request handler for demonstrating the spellcheck component.
-
-       NOTE: This is purely as an example.  The whole purpose of the
-       SpellCheckComponent is to hook it into the request handler that
-       handles your normal user queries so that a separate request is
-       not needed to get suggestions.
-
-       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS
-       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!
-
-       See http://wiki.apache.org/solr/SpellCheckComponent for details
-       on the request parameters.
-    -->
-  <requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
-    <lst name="defaults">
-      <!-- Solr will use suggestions from both the 'default' spellchecker
-           and from the 'wordbreak' spellchecker and combine them.
-           collations (re-written queries) can include a combination of
-           corrections from both spellcheckers -->
-      <str name="spellcheck.dictionary">default</str>
-      <str name="spellcheck.dictionary">wordbreak</str>
-      <str name="spellcheck">on</str>
-      <str name="spellcheck.extendedResults">true</str>
-      <str name="spellcheck.count">10</str>
-      <str name="spellcheck.alternativeTermCount">5</str>
-      <str name="spellcheck.maxResultsForSuggest">5</str>
-      <str name="spellcheck.collate">true</str>
-      <str name="spellcheck.collateExtendedResults">true</str>
-      <str name="spellcheck.maxCollationTries">10</str>
-      <str name="spellcheck.maxCollations">5</str>
-    </lst>
-    <arr name="last-components">
-      <str>spellcheck</str>
-    </arr>
-  </requestHandler>
-
-  <!-- Term Vector Component
-
-       http://wiki.apache.org/solr/TermVectorComponent
-    -->
-  <searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
-
-  <!-- A request handler for demonstrating the term vector component
-
-       This is purely as an example.
-
-       In reality you will likely want to add the component to your
-       already specified request handlers.
-    -->
-  <requestHandler name="/tvrh" class="solr.SearchHandler" startup="lazy">
-    <lst name="defaults">
-      <bool name="tv">true</bool>
-    </lst>
-    <arr name="last-components">
-      <str>tvComponent</str>
-    </arr>
-  </requestHandler>
-
-  <!-- Clustering Component. (Omitted here. See the default Solr example for a typical configuration.) -->
-
-  <!-- Terms Component
-
-       http://wiki.apache.org/solr/TermsComponent
-
-       A component to return terms and document frequency of those
-       terms
-    -->
-  <searchComponent name="terms" class="solr.TermsComponent"/>
-
-  <!-- A request handler for demonstrating the terms component -->
-  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
-    <lst name="defaults">
-      <bool name="terms">true</bool>
-      <bool name="distrib">false</bool>
-    </lst>
-    <arr name="components">
-      <str>terms</str>
-    </arr>
-  </requestHandler>
-
-
-  <!-- Query Elevation Component
-
-       http://wiki.apache.org/solr/QueryElevationComponent
-
-       a search component that enables you to configure the top
-       results for a given query regardless of the normal lucene
-       scoring.
-    -->
-  <searchComponent name="elevator" class="solr.QueryElevationComponent" >
-    <!-- pick a fieldType to analyze queries -->
-    <str name="queryFieldType">string</str>
-    <str name="config-file">elevate.xml</str>
-  </searchComponent>
-
-  <!-- A request handler for demonstrating the elevator component -->
-  <requestHandler name="/elevate" class="solr.SearchHandler" startup="lazy">
-    <lst name="defaults">
-      <str name="echoParams">explicit</str>
-    </lst>
-    <arr name="last-components">
-      <str>elevator</str>
-    </arr>
-  </requestHandler>
-
-  <!-- Highlighting Component
-
-       http://wiki.apache.org/solr/HighlightingParameters
-    -->
-  <searchComponent class="solr.HighlightComponent" name="highlight">
-    <highlighting>
-      <!-- Configure the standard fragmenter -->
-      <!-- This could most likely be commented out in the "default" case -->
-      <fragmenter name="gap"
-                  default="true"
-                  class="solr.highlight.GapFragmenter">
-        <lst name="defaults">
-          <int name="hl.fragsize">100</int>
-        </lst>
-      </fragmenter>
-
-      <!-- A regular-expression-based fragmenter
-           (for sentence extraction)
-        -->
-      <fragmenter name="regex"
-                  class="solr.highlight.RegexFragmenter">
-        <lst name="defaults">
-          <!-- slightly smaller fragsizes work better because of slop -->
-          <int name="hl.fragsize">70</int>
-          <!-- allow 50% slop on fragment sizes -->
-          <float name="hl.regex.slop">0.5</float>
-          <!-- a basic sentence pattern -->
-          <str name="hl.regex.pattern">[-\w ,/\n\&quot;&apos;]{20,200}</str>
-        </lst>
-      </fragmenter>
-
-      <!-- Configure the standard formatter -->
-      <formatter name="html"
-                 default="true"
-                 class="solr.highlight.HtmlFormatter">
-        <lst name="defaults">
-          <str name="hl.simple.pre"><![CDATA[<em>]]></str>
-          <str name="hl.simple.post"><![CDATA[</em>]]></str>
-        </lst>
-      </formatter>
-
-      <!-- Configure the standard encoder -->
-      <encoder name="html"
-               class="solr.highlight.HtmlEncoder" />
-
-      <!-- Configure the standard fragListBuilder -->
-      <fragListBuilder name="simple"
-                       class="solr.highlight.SimpleFragListBuilder"/>
-
-      <!-- Configure the single fragListBuilder -->
-      <fragListBuilder name="single"
-                       class="solr.highlight.SingleFragListBuilder"/>
-
-      <!-- Configure the weighted fragListBuilder -->
-      <fragListBuilder name="weighted"
-                       default="true"
-                       class="solr.highlight.WeightedFragListBuilder"/>
-
-      <!-- default tag FragmentsBuilder -->
-      <fragmentsBuilder name="default"
-                        default="true"
-                        class="solr.highlight.ScoreOrderFragmentsBuilder">
-        <!--
-        <lst name="defaults">
-          <str name="hl.multiValuedSeparatorChar">/</str>
-        </lst>
-        -->
-      </fragmentsBuilder>
-
-      <!-- multi-colored tag FragmentsBuilder -->
-      <fragmentsBuilder name="colored"
-                        class="solr.highlight.ScoreOrderFragmentsBuilder">
-        <lst name="defaults">
-          <str name="hl.tag.pre"><![CDATA[
-               <b style="background:yellow">,<b style="background:lawgreen">,
-               <b style="background:aquamarine">,<b style="background:magenta">,
-               <b style="background:palegreen">,<b style="background:coral">,
-               <b style="background:wheat">,<b style="background:khaki">,
-               <b style="background:lime">,<b style="background:deepskyblue">]]></str>
-          <str name="hl.tag.post"><![CDATA[</b>]]></str>
-        </lst>
-      </fragmentsBuilder>
-
-      <boundaryScanner name="default"
-                       default="true"
-                       class="solr.highlight.SimpleBoundaryScanner">
-        <lst name="defaults">
-          <str name="hl.bs.maxScan">10</str>
-          <str name="hl.bs.chars">.,!? &#9;&#10;&#13;</str>
-        </lst>
-      </boundaryScanner>
-
-      <boundaryScanner name="breakIterator"
-                       class="solr.highlight.BreakIteratorBoundaryScanner">
-        <lst name="defaults">
-          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
-          <str name="hl.bs.type">WORD</str>
-          <!-- language and country are used when constructing Locale object.  -->
-          <!-- And the Locale object will be used when getting instance of BreakIterator -->
-          <str name="hl.bs.language">en</str>
-          <str name="hl.bs.country">US</str>
-        </lst>
-      </boundaryScanner>
-    </highlighting>
-  </searchComponent>
-
-  <!-- Update Processors
-
-       Chains of Update Processor Factories for dealing with Update
-       Requests can be declared, and then used by name in Update
-       Request Processors
-
-       http://wiki.apache.org/solr/UpdateRequestProcessor
-
-    -->
-
-  <!-- Add unknown fields to the schema
-
-       An example field type guessing update processor that will
-       attempt to parse string-typed field values as Booleans, Longs,
-       Doubles, or Dates, and then add schema fields with the guessed
-       field types.
-
-       This requires that the schema is both managed and mutable, by
-       declaring schemaFactory as ManagedIndexSchemaFactory, with
-       mutable specified as true.
-
-       See http://wiki.apache.org/solr/GuessingFieldTypes
-    -->
-  <updateRequestProcessorChain name="files-update-processor">
-    <!-- UUIDUpdateProcessorFactory will generate an id if none is present in the incoming document -->
-    <processor class="solr.UUIDUpdateProcessorFactory" />
-    <processor class="solr.RemoveBlankFieldUpdateProcessorFactory"/>
-    <processor class="solr.FieldNameMutatingUpdateProcessorFactory">
-      <str name="pattern">[^\w-\.]</str>
-      <str name="replacement">_</str>
-    </processor>
-    <processor class="solr.ParseBooleanFieldUpdateProcessorFactory"/>
-    <processor class="solr.ParseLongFieldUpdateProcessorFactory"/>
-    <processor class="solr.ParseDoubleFieldUpdateProcessorFactory"/>
-    <processor class="solr.ParseDateFieldUpdateProcessorFactory">
-      <arr name="format">
-        <str>yyyy-MM-dd['T'[HH:mm[:ss[.SSS]][z</str>
-        <str>yyyy-MM-dd['T'[HH:mm[:ss[,SSS]][z</str>
-        <str>yyyy-MM-dd HH:mm[:ss[.SSS]][z</str>
-        <str>yyyy-MM-dd HH:mm[:ss[,SSS]][z</str>
-        <str>[EEE, ]dd MMM yyyy HH:mm[:ss] z</str>
-        <str>EEEE, dd-MMM-yy HH:mm:ss z</str>
-        <str>EEE MMM ppd HH:mm:ss [z ]yyyy</str>
-      </arr>
-    </processor>
-    <processor class="solr.AddSchemaFieldsUpdateProcessorFactory">
-      <str name="defaultFieldType">strings</str>
-      <lst name="typeMapping">
-        <str name="valueClass">java.lang.Boolean</str>
-        <str name="fieldType">booleans</str>
-      </lst>
-      <lst name="typeMapping">
-        <str name="valueClass">java.util.Date</str>
-        <str name="fieldType">pdates</str>
-      </lst>
-      <lst name="typeMapping">
-        <str name="valueClass">java.lang.Long</str>
-        <str name="valueClass">java.lang.Integer</str>
-        <str name="fieldType">plongs</str>
-      </lst>
-      <lst name="typeMapping">
-        <str name="valueClass">java.lang.Number</str>
-        <str name="fieldType">pdoubles</str>
-      </lst>
-    </processor>
-
-
-    <processor class="org.apache.solr.update.processor.LangDetectLanguageIdentifierUpdateProcessorFactory">
-      <lst name="defaults">
-        <str name="langid.fl">content</str>
-        <str name="langid.langField">language</str>
-      </lst>
-    </processor>
-
-    <processor class="solr.StatelessScriptUpdateProcessorFactory">
-      <str name="script">update-script.js</str>
-      <!--<lst name="params">-->
-        <!--<str name="config_param">example config parameter</str>-->
-      <!--</lst>-->
-    </processor>
-
-    <processor class="solr.LogUpdateProcessorFactory"/>
-    <processor class="solr.DistributedUpdateProcessorFactory"/>
-    <processor class="solr.RunUpdateProcessorFactory"/>
-  </updateRequestProcessorChain>
-
-  <!-- Deduplication
-
-       An example dedup update processor that creates the "id" field
-       on the fly based on the hash code of some other fields.  This
-       example has overwriteDupes set to false since we are using the
-       id field as the signatureField and Solr will maintain
-       uniqueness based on that anyway.
-
-    -->
-  <!--
-     <updateRequestProcessorChain name="dedupe">
-       <processor class="solr.processor.SignatureUpdateProcessorFactory">
-         <bool name="enabled">true</bool>
-         <str name="signatureField">id</str>
-         <bool name="overwriteDupes">false</bool>
-         <str name="fields">name,features,cat</str>
-         <str name="signatureClass">solr.processor.Lookup3Signature</str>
-       </processor>
-       <processor class="solr.LogUpdateProcessorFactory" />
-       <processor class="solr.RunUpdateProcessorFactory" />
-     </updateRequestProcessorChain>
-    -->
-
-  <!-- Language identification
-
-       This example update chain identifies the language of the incoming
-       documents using the langid contrib. The detected language is
-       written to field language_s. No field name mapping is done.
-       The fields used for detection are text, title, subject and description,
-       making this example suitable for detecting languages form full-text
-       rich documents injected via ExtractingRequestHandler.
-       See more about langId at http://wiki.apache.org/solr/LanguageDetection
-    -->
-  <!--
-   <updateRequestProcessorChain name="langid">
-     <processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
-       <str name="langid.fl">text,title,subject,description</str>
-       <str name="langid.langField">language_s</str>
-       <str name="langid.fallback">en</str>
-     </processor>
-     <processor class="solr.LogUpdateProcessorFactory" />
-     <processor class="solr.RunUpdateProcessorFactory" />
-   </updateRequestProcessorChain>
-  -->
-
-  <!-- Script update processor
-
-    This example hooks in an update processor implemented using JavaScript.
-
-    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor
-  -->
-  <!--
-    <updateRequestProcessorChain name="script">
-      <processor class="solr.StatelessScriptUpdateProcessorFactory">
-        <str name="script">update-script.js</str>
-        <lst name="params">
-          <str name="config_param">example config parameter</str>
-        </lst>
-      </processor>
-      <processor class="solr.RunUpdateProcessorFactory" />
-    </updateRequestProcessorChain>
-  -->
-
-  <!-- Response Writers
-
-       http://wiki.apache.org/solr/QueryResponseWriter
-
-       Request responses will be written using the writer specified by
-       the 'wt' request parameter matching the name of a registered
-       writer.
-
-       The "default" writer is the default and will be used if 'wt' is
-       not specified in the request.
-    -->
-  <!-- The following response writers are implicitly configured unless
-       overridden...
-    -->
-  <!--
-     <queryResponseWriter name="xml"
-                          default="true"
-                          class="solr.XMLResponseWriter" />
-     <queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
-     <queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
-     <queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
-     <queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
-     <queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
-     <queryResponseWriter name="csv" class="solr.CSVResponseWriter"/>
-     <queryResponseWriter name="schema.xml" class="solr.SchemaXmlResponseWriter"/>
-    -->
-
-  <queryResponseWriter name="json" class="solr.JSONResponseWriter">
-    <!-- For the purposes of the tutorial, JSON responses are written as
-     plain text so that they are easy to read in *any* browser.
-     If you expect a MIME type of "application/json" just remove this override.
-    -->
-    <str name="content-type">text/plain; charset=UTF-8</str>
-  </queryResponseWriter>
-
-  <!--
-     Custom response writers can be declared as needed...
-    -->
-  <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy">
-    <str name="template.base.dir">${velocity.template.base.dir:}</str>
-  </queryResponseWriter>
-
-  <!-- XSLT response writer transforms the XML output by any xslt file found
-       in Solr's conf/xslt directory.  Changes to xslt files are checked for
-       every xsltCacheLifetimeSeconds.
-    -->
-  <queryResponseWriter name="xslt" class="solr.XSLTResponseWriter">
-    <int name="xsltCacheLifetimeSeconds">5</int>
-  </queryResponseWriter>
-
-  <!-- Query Parsers
-
-       https://lucene.apache.org/solr/guide/query-syntax-and-parsing.html
-
-       Multiple QParserPlugins can be registered by name, and then
-       used in either the "defType" param for the QueryComponent (used
-       by SearchHandler) or in LocalParams
-    -->
-  <!-- example of registering a query parser -->
-  <!--
-     <queryParser name="myparser" class="com.mycompany.MyQParserPlugin"/>
-    -->
-
-  <!-- Function Parsers
-
-       http://wiki.apache.org/solr/FunctionQuery
-
-       Multiple ValueSourceParsers can be registered by name, and then
-       used as function names when using the "func" QParser.
-    -->
-  <!-- example of registering a custom function parser  -->
-  <!--
-     <valueSourceParser name="myfunc"
-                        class="com.mycompany.MyValueSourceParser" />
-    -->
-
-
-  <!-- Document Transformers
-       http://wiki.apache.org/solr/DocTransformers
-    -->
-  <!--
-     Could be something like:
-     <transformer name="db" class="com.mycompany.LoadFromDatabaseTransformer" >
-       <int name="connection">jdbc://....</int>
-     </transformer>
-
-     To add a constant value to all docs, use:
-     <transformer name="mytrans2" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
-       <int name="value">5</int>
-     </transformer>
-
-     If you want the user to still be able to change it with _value:something_ use this:
-     <transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
-       <double name="defaultValue">5</double>
-     </transformer>
-
-      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The
-      EditorialMarkerFactory will do exactly that:
-     <transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
-    -->
-
-</config>
diff --git a/solr/example/files/conf/stopwords.txt b/solr/example/files/conf/stopwords.txt
deleted file mode 100644
index ae1e83e..0000000
--- a/solr/example/files/conf/stopwords.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/solr/example/files/conf/synonyms.txt b/solr/example/files/conf/synonyms.txt
deleted file mode 100644
index eab4ee8..0000000
--- a/solr/example/files/conf/synonyms.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#-----------------------------------------------------------------------
-#some test synonym mappings unlikely to appear in real input text
-aaafoo => aaabar
-bbbfoo => bbbfoo bbbbar
-cccfoo => cccbar cccbaz
-fooaaa,baraaa,bazaaa
-
-# Some synonym groups specific to this example
-GB,gib,gigabyte,gigabytes
-MB,mib,megabyte,megabytes
-Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
-#after us won't split it into two words.
-
-# Synonym mappings can be used for spelling correction too
-pixima => pixma
-
diff --git a/solr/example/files/conf/update-script.js b/solr/example/files/conf/update-script.js
deleted file mode 100644
index 2589968..0000000
--- a/solr/example/files/conf/update-script.js
+++ /dev/null
@@ -1,115 +0,0 @@
-function get_class(name) {
-  var clazz;
-  try {
-    // Java8 Nashorn
-    clazz = eval("Java.type(name).class");
-  } catch(e) {
-    // Java7 Rhino
-    clazz = eval("Packages."+name);
-  }
-
-  return clazz;
-}
-
-function processAdd(cmd) {
-
-  doc = cmd.solrDoc;  // org.apache.solr.common.SolrInputDocument
-  var id = doc.getFieldValue("id");
-  logger.info("update-script#processAdd: id=" + id);
-
-  // The idea here is to use the file's content_type value to
-  // simplify into user-friendly values, such that types of, say, image/jpeg and image/tiff
-  // are in an "Images" facet
-
-  var ct = doc.getFieldValue("content_type");
-  if (ct) {
-    // strip off semicolon onward
-    var semicolon_index = ct.indexOf(';');
-    if (semicolon_index != -1) {
-      ct = ct.substring(0,semicolon_index);
-    }
-    // and split type/subtype
-    var ct_type = ct.substring(0,ct.indexOf('/'));
-    var ct_subtype = ct.substring(ct.indexOf('/')+1);
-
-    var doc_type;
-    switch(true) {
-      case /^application\/rtf/.test(ct) || /wordprocessing/.test(ct):
-        doc_type = "doc";
-        break;
-
-      case /html/.test(ct):
-        doc_type = "html";
-        break;
-
-      case /^image\/.*/.test(ct):
-        doc_type = "image";
-        break;
-
-      case /presentation|powerpoint/.test(ct):
-        doc_type = "presentation";
-        break;
-
-      case /spreadsheet|excel/.test(ct):
-        doc_type = "spreadsheet";
-        break;
-
-      case /^application\/pdf/.test(ct):
-        doc_type = "pdf";
-        break;
-
-      case /^text\/plain/.test(ct):
-        doc_type = "text"
-        break;
-
-      default:
-        break;
-    }
-
-    // TODO: error handling needed?   What if there is no slash?
-    if(doc_type) { doc.setField("doc_type", doc_type); }
-    doc.setField("content_type_type_s", ct_type);
-    doc.setField("content_type_subtype_s", ct_subtype);
-  }
-
-  var content = doc.getFieldValue("content");
-  if (!content) {
-    return; //No content found, so we are done here
-  }
-
-    var analyzer =
-         req.getCore().getLatestSchema()
-         .getFieldTypeByName("text_email_url")
-         .getIndexAnalyzer();
-
-  var token_stream =
-       analyzer.tokenStream("content", content);
-  var term_att = token_stream.getAttribute(get_class("org.apache.lucene.analysis.tokenattributes.CharTermAttribute"));
-  var type_att = token_stream.getAttribute(get_class("org.apache.lucene.analysis.tokenattributes.TypeAttribute"));
-  token_stream.reset();
-  while (token_stream.incrementToken()) {
-    doc.addField(type_att.type().replace(/\<|\>/g,'').toLowerCase()+"_ss", term_att.toString());
-  }
-  token_stream.end();
-  token_stream.close();
-}
-
-function processDelete(cmd) {
-  // no-op
-}
-
-function processMergeIndexes(cmd) {
-  // no-op
-}
-
-function processCommit(cmd) {
-  // no-op
-}
-
-function processRollback(cmd) {
-  // no-op
-}
-
-function finish() {
-  // no-op
-}
diff --git a/solr/example/files/conf/velocity/browse.vm b/solr/example/files/conf/velocity/browse.vm
deleted file mode 100644
index 535a771..0000000
--- a/solr/example/files/conf/velocity/browse.vm
+++ /dev/null
@@ -1,32 +0,0 @@
-<div id="query-box">
-  <form id="query-form" action="#{url_for_home}" method="GET">
-    $resource.find:
-    <input type="text" id="q" name="q" style="width: 50%" value="$!esc.html($request.params.get('q'))"/>
-    <input type="submit" value="$resource.submit"/>
-    <div id="debug_query" class="debug">
-      <span id="parsed_query">$esc.html($response.response.debug.parsedquery)</span>
-    </div>
-
-    <input type="hidden" name="type" value="#current_type"/>
-    #if("#current_locale"!="")<input type="hidden" value="locale" value="#current_locale"/>#end
-    #foreach($fq in $response.responseHeader.params.getAll("fq"))
-      <input type="hidden" name="fq" id="allFQs" value="$esc.html($fq)"/>
-    #end
-  </form>
-
-  <div id="constraints">
-    #foreach($fq in $response.responseHeader.params.getAll("fq"))
-      #set($previous_fq_count=$velocityCount - 1)
-      #if($fq != '')
-      &gt; $fq<a href="#url_for_filters($response.responseHeader.params.fq.subList(0,$previous_fq_count))">x</a>
-      #end
-    #end
-  </div>
-
-</div>
-
-
-<div id="browse_results">
-  #parse("results.vm")
-</div>
-
diff --git a/solr/example/files/conf/velocity/dropit.js b/solr/example/files/conf/velocity/dropit.js
deleted file mode 100644
index e69de29..0000000
--- a/solr/example/files/conf/velocity/dropit.js
+++ /dev/null
diff --git a/solr/example/files/conf/velocity/facet_doc_type.vm b/solr/example/files/conf/velocity/facet_doc_type.vm
deleted file mode 100644
index ff47167..0000000
--- a/solr/example/files/conf/velocity/facet_doc_type.vm
+++ /dev/null
@@ -1,2 +0,0 @@
-## intentionally empty
-
diff --git a/solr/example/files/conf/velocity/facet_text_shingles.vm b/solr/example/files/conf/velocity/facet_text_shingles.vm
deleted file mode 100644
index ddd9693..0000000
--- a/solr/example/files/conf/velocity/facet_text_shingles.vm
+++ /dev/null
@@ -1,12 +0,0 @@
-<div id="facet_$field.name">
-  <span class="facet-field">$resource.facet.top_phrases</span><br/>
-
-  <ul id="tagcloud">
-    #foreach($facet in $sort.sort($field.values,"name"))
-    <li data-weight="$math.mul($facet.count,1)">
-      <a href="#url_for_facet_filter($field.name, $facet.name)">$facet.name</a>
-    </li>
-
-    #end
-  </ul>
-</div>
\ No newline at end of file
diff --git a/solr/example/files/conf/velocity/facets.vm b/solr/example/files/conf/velocity/facets.vm
deleted file mode 100644
index bb27b5c..0000000
--- a/solr/example/files/conf/velocity/facets.vm
+++ /dev/null
@@ -1,24 +0,0 @@
-#if($response.facetFields.size() > 0)
-  #foreach($field in $response.facetFields)
-    #if($field.values.size() > 0)
-        #if($engine.resourceExists("facet_${field.name}.vm"))
-          #parse("facet_${field.name}.vm")
-        #else
-          <div id="facet_$field.name" class="facet_field">
-            <span class="facet-field">#label("facet.${field.name}",$field.name)</span><br/>
-
-            <ul>
-              #foreach($facet in $field.values)
-                <li><a href="#url_for_facet_filter($field.name, $facet.name)">#if($facet.name!=$null)#label("${field.name}.${facet.name}","${field.name}.${facet.name}")#else<em>missing</em>#end</a> ($facet.count)</li>
-              #end
-            </ul>
-          </div>
-        #end
-    #end
-  #end ## end if field.values > 0
-#end  ## end if facetFields > 0
-
-
-
-
-
diff --git a/solr/example/files/conf/velocity/footer.vm b/solr/example/files/conf/velocity/footer.vm
deleted file mode 100644
index 6cb0096..0000000
--- a/solr/example/files/conf/velocity/footer.vm
+++ /dev/null
@@ -1,29 +0,0 @@
-<hr/>
-
-<div>
-
-  <div id="admin"><a href="#url_root/index.html#/#{core_name}">Solr Admin</a></div>
-
-  <a href="#" onclick='jQuery(".debug").toggle(); return false;'>toggle debug mode</a>
-  <a href="#url_for_lens&wt=xml#if($debug)&debug=true#end">XML results</a> ## TODO: Add links for other formats, maybe dynamically?
-
-</div>
-
-<div>
-  <a href="http://lucene.apache.org/solr">Solr Home Page</a>
-</div>
-
-
-<div class="debug">
-  <hr/>
-  Request:
-  <pre>
-    $esc.html($request)
-  </pre>
-
-  <hr/>
-  Debug:
-  <pre>
-    $esc.html($response.response.debug)
-  </pre>
-</div>
\ No newline at end of file
diff --git a/solr/example/files/conf/velocity/head.vm b/solr/example/files/conf/velocity/head.vm
deleted file mode 100644
index a7e9b08..0000000
--- a/solr/example/files/conf/velocity/head.vm
+++ /dev/null
@@ -1,290 +0,0 @@
-<title>Solr browse: #core_name</title>
-
-<meta http-equiv="content-type" content="text/html; charset=UTF-8"/>
-
-<link rel="icon" type="image/x-icon" href="#{url_root}/img/favicon.ico"/>
-<link rel="shortcut icon" type="image/x-icon" href="#{url_root}/img/favicon.ico"/>
-
-<script type="text/javascript" src="#{url_root}/libs/jquery-3.4.1.min.js"></script>
-<script type="text/javascript" src="#{url_for_solr}/admin/file?file=/velocity/js/jquery.tx3-tag-cloud.js&contentType=text/javascript"></script>
-<script type="text/javascript" src="#{url_for_solr}/admin/file?file=/velocity/js/dropit.js&contentType=text/javascript"></script>
-<script type="text/javascript" src="#{url_for_solr}/admin/file?file=/velocity/js/jquery.autocomplete.js&contentType=text/javascript"></script>
-
-<script type="text/javascript">
-  $(document).ready(function() {
-
-    $("#tagcloud").tx3TagCloud({
-      multiplier: 1
-    });
-
-    $('.menu').dropit();
-
-    $( document ).ajaxComplete(function() {
-      $("#tagcloud").tx3TagCloud({
-        multiplier: 5
-      });
-    });
-
-    $('\#q').keyup(function() {
-      $('#browse_results').load('#{url_for_home}?#lensNoQ&v.layout.enabled=false&v.template=results&q='+encodeURI($('\#q').val()));
-
-      $("\#q").autocomplete('#{url_for_solr}/suggest', {
-        extraParams: {
-          'suggest.q': function() { return $("\#q").val();},
-          'suggest.build': 'true',
-          'wt': 'json',
-        }
-      }).keydown(function(e) {
-        if (e.keyCode === 13){
-          $("#query-form").trigger('submit');
-        }
-      });
-    });
-
-  });
-</script>
-
-<style>
-
-  html {
-    background-color: #F0F8FF;
-  }
-
-  body {
-    font-family: Helvetica, Arial, sans-serif;
-    font-size: 10pt;
-  }
-
-  #header {
-    width: 100%;
-    font-size: 20pt;
-  }
-
-  #header2 {
-    margin-left:1200px;
-  }
-
-  #logo {
-    width: 115px;
-    margin: 0px 0px 0px 0px;
-    border-style: none;
-  }
-
-  a {
-    color: #305CB3;
-  }
-
-  a.hidden {
-    display:none;
-  }
-
-  em {
-    color: #FF833D;
-  }
-
-  .error {
-    color: white;
-    background-color: red;
-    left: 210px;
-    width:80%;
-    position: relative;
-  }
-
-  .debug { display: none; font-size: 10pt}
-  #debug_query {
-    font-family: Helvetica, Arial, sans-serif;
-    font-size: 10pt;
-    font-weight: bold;
-  }
-  #parsed_query {
-    font-family: Courier, Courier New, monospaced;
-    font-size: 10pt;
-    font-weight: normal;
-  }
-
-  #admin {
-    text-align: right;
-    vertical-align: top;
-  }
-
-  #query-form {
-    width: 90%;
-  }
-
-  #query-box {
-    padding: 5px;
-    margin: 5px;
-    font-weight: normal;
-    font-size: 24px;
-    letter-spacing: 0.08em;
-  }
-  #constraints {
-    margin: 10px;
-  }
-
-  #tabs {  }
-  #tabs li { display: inline; font-size: 10px;}
-  #tabs li a { border-radius: 20px; border: 2px solid #C1CDCD; padding: 10px;color: #42454a; background-color: #dedbde;}
-  #tabs li a:hover { background-color: #f1f0ee; }
-  #tabs li a.selected { color: #000; background-color: #f1f0ee; font-weight: bold; padding: 5px }
-  #tabs li a.no_results { color: #000; background-color: #838B8B; font-style: italic; padding: 5px; pointer-events: none;
-  cursor: default; text-decoration: none;}
-
-  .pagination {
-    width: 305px;
-    border-radius: 25px;
-    border: 2px solid #C1CDCD;
-    padding: 20px;
-    padding-left: 10%;
-    background: #eee;
-    margin-left: 190px;
-    margin-top : 42px;
-    padding-top: 5px;
-    padding-bottom: 5px;
-    text-align:left;
-  }
-
-  #results_list { width: 70%; }
-  .result-document {
-    border-radius: 25px;
-    border: 2px solid #C1CDCD;
-    padding: 10px;
-//    width: 800px;
-//    height: 120px;
-    margin: 5px;
-//    margin-left: 60px;
-//    margin-right: 210px;
-//    margin-bottom: 15px;
-    transition: 1s ease;
-  }
-  .result-document:hover
-  {
-    webkit-transform: scale(1.1);
-    -ms-transform: scale(1.1);
-    transform: scale(1.1);
-    transition: 1s ease;
-  }
-  .result-document div {
-    padding: 5px;
-  }
-  .result-title {
-    width:60%;
-  }
-  .result-body {
-    background: #ddd;
-  }
-  .result-document:nth-child(2n+1) {
-    background-color: #FFFFFD;
-  }
-
-  #facets {
-    margin: 5px;
-    margin-top: 0px;
-    padding: 5px;
-    top: -20px;
-    position: relative;
-    float: right;
-    width: 25%;
-  }
-  .facet-field {
-    font-weight: bold;
-  }
-  #facets ul {
-    list-style: none;
-    margin: 0;
-    margin-bottom: 5px;
-    margin-top: 5px;
-    padding-left: 10px;
-  }
-  #facets ul li {
-    color: #999;
-    padding: 2px;
-  }
-
-  div.facet_field {
-    clear: left;
-  }
-
-  ul.tx3-tag-cloud { }
-  ul.tx3-tag-cloud li {
-    display: block;
-    float: left;
-    list-style: none;
-    margin-right: 4px;
-  }
-  ul.tx3-tag-cloud li a {
-    display: block;
-    text-decoration: none;
-    color: #c9c9c9;
-    padding: 3px 10px;
-  }
-  ul.tx3-tag-cloud li a:hover {
-    color: #000000;
-    -webkit-transition: color 250ms linear;
-    -moz-transition: color 250ms linear;
-    -o-transition: color 250ms linear;
-    -ms-transition: color 250ms linear;
-    transition: color 250ms linear;
-  }
-
-  .dropit {
-    list-style: none;
-    padding: 0;
-    margin: 0;
-  }
-  .dropit .dropit-trigger { position: relative; }
-  .dropit .dropit-submenu {
-    position: absolute;
-    top: 100%;
-    left: 0; /* dropdown left or right */
-    z-index: 1000;
-    display: none;
-    min-width: 150px;
-    list-style: none;
-    padding: 0;
-    margin: 0;
-  }
-  .dropit .dropit-open .dropit-submenu { display: block; }
-
-
-  <!--autocomplete css-->
-  .ac_results {
-    padding: 0px;
-    border: 1px solid black;
-    background-color: white;
-    overflow: hidden;
-    z-index: 99999;
-  }
-
-  .ac_results ul {
-    width: 100%;
-    list-style-position: outside;
-    list-style: none;
-    padding: 0;
-    margin: 0;
-  }
-
-  .ac_results li {
-    margin: 0px;
-    padding: 2px 5px;
-    cursor: default;
-    display: block;
-    font: menu;
-    font-size: 12px;
-    line-height: 16px;
-    overflow: hidden;
-  }
-
-  .ac_loading {
-//    background: white url('˜indicator.gif') right center no-repeat;
-  }
-
-  .ac_odd {
-    background-color: #eee;
-  }
-
-  .ac_over {
-    background-color: #0A246A;
-    color: white;
-  }
-</style>
diff --git a/solr/example/files/conf/velocity/hit.vm b/solr/example/files/conf/velocity/hit.vm
deleted file mode 100644
index 2c658cd..0000000
--- a/solr/example/files/conf/velocity/hit.vm
+++ /dev/null
@@ -1,77 +0,0 @@
-
-#set($docId = $doc.getFirstValue($request.schema.uniqueKeyField.name))
-
-## Load Mime-Type List and Mapping
-#parse('mime_type_lists.vm')
-
-## Title
-#if($doc.getFieldValue('title'))
-  #set($title = $esc.html($doc.getFirstValue('title')))
-#else
-  #set($title = "$doc.getFirstValue('id').substring($math.add(1,$doc.getFirstValue('id').lastIndexOf('/')))")
-#end
-
-## Date
-#if($doc.getFieldValue('attr_meta_creation_date'))
-  #set($date = $esc.html($doc.getFirstValue('attr_meta_creation_date')))
-#else
-  #set($date = "No date found")
-#end
-
-
-
-## URL
-#if($doc.getFieldValue('url'))
-  #set($url = $doc.getFieldValue('url'))
-#elseif($doc.getFieldValue('resourcename'))
-  #set($url = "file:///$doc.getFirstValue('resourcename')")
-#else
-  #set($url = "$doc.getFieldValue('id')")
-#end
-
-## Sort out Mime-Type
-#set($ct = $doc.getFirstValue('content_type').split(";").get(0))
-#set($filename = $doc.getFirstValue('resourcename'))
-#set($filetype = false)
-#set($filetype = $mimeExtensionsMap.get($ct))
-#if(!$filetype)
-  #set($filetype = $filename.substring($filename.lastIndexOf(".")).substring(1))
-#end
-#if(!$filetype)
-  #set($filetype = "file")
-#end
-#if(!$supportedMimeTypes.contains($filetype))
-  #set($filetype = "file")
-#end
-
-<div class="result-document">
-  <span class="result-title">
-    <img src="#{url_root}/img/filetypes/${filetype}.png" align="center">
-    <b>$title</b>
-  </span>
-
-  <div>
-    id: $docId </br>
-  </div>
-
-  #set($pad = "")
-  #foreach($v in $response.response.highlighting.get($docId).get("content"))
-    $pad$esc.html($v).replace("HL_START","<em>").replace("HL_END","</em>")
-    #set($pad = " ... ")
-  #end
-
-</div>
-
-<a href="#" class="debug" onclick='jQuery(this).next().toggle(); return false;'>toggle explain</a>
-<pre style="display: none;">
-    $esc.html($response.getExplainMap().get($doc.getFirstValue('id')))
-</pre>
-
-<a href="#" class="debug" onclick='jQuery(this).next().toggle(); return false;'>show all fields</a>
-<pre style="display:none;">
-  #foreach($fieldname in $doc.fieldNames)
-    <span>$fieldname :</span>
-    <span>#foreach($value in $doc.getFieldValues($fieldname))$esc.html($value)#end</span>
-  #end
-</pre>
-
diff --git a/solr/example/files/conf/velocity/img/english_640.png b/solr/example/files/conf/velocity/img/english_640.png
deleted file mode 100644
index 81256a1..0000000
--- a/solr/example/files/conf/velocity/img/english_640.png
+++ /dev/null
Binary files differ
diff --git a/solr/example/files/conf/velocity/img/france_640.png b/solr/example/files/conf/velocity/img/france_640.png
deleted file mode 100644
index 16d4541..0000000
--- a/solr/example/files/conf/velocity/img/france_640.png
+++ /dev/null
Binary files differ
diff --git a/solr/example/files/conf/velocity/img/germany_640.png b/solr/example/files/conf/velocity/img/germany_640.png
deleted file mode 100644
index f5d6ae8..0000000
--- a/solr/example/files/conf/velocity/img/germany_640.png
+++ /dev/null
Binary files differ
diff --git a/solr/example/files/conf/velocity/img/globe_256.png b/solr/example/files/conf/velocity/img/globe_256.png
deleted file mode 100644
index 514597b..0000000
--- a/solr/example/files/conf/velocity/img/globe_256.png
+++ /dev/null
Binary files differ
diff --git a/solr/example/files/conf/velocity/jquery.tx3-tag-cloud.js b/solr/example/files/conf/velocity/jquery.tx3-tag-cloud.js
deleted file mode 100644
index e69de29..0000000
--- a/solr/example/files/conf/velocity/jquery.tx3-tag-cloud.js
+++ /dev/null
diff --git a/solr/example/files/conf/velocity/js/dropit.js b/solr/example/files/conf/velocity/js/dropit.js
deleted file mode 100644
index 3094414..0000000
--- a/solr/example/files/conf/velocity/js/dropit.js
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Dropit v1.1.0
- * http://dev7studios.com/dropit
- *
- * Copyright 2012, Dev7studios
- * Free to use and abuse under the MIT license.
- * http://www.opensource.org/licenses/mit-license.php
- */
-
-;(function($) {
-
-    $.fn.dropit = function(method) {
-
-        var methods = {
-
-            init : function(options) {
-                this.dropit.settings = $.extend({}, this.dropit.defaults, options);
-                return this.each(function() {
-                    var $el = $(this),
-                         el = this,
-                         settings = $.fn.dropit.settings;
-
-                    // Hide initial submenus
-                    $el.addClass('dropit')
-                    .find('>'+ settings.triggerParentEl +':has('+ settings.submenuEl +')').addClass('dropit-trigger')
-                    .find(settings.submenuEl).addClass('dropit-submenu').hide();
-
-                    // Open on click
-                    $el.off(settings.action).on(settings.action, settings.triggerParentEl +':has('+ settings.submenuEl +') > '+ settings.triggerEl +'', function(){
-                        // Close click menu's if clicked again
-                        if(settings.action == 'click' && $(this).parents(settings.triggerParentEl).hasClass('dropit-open')){
-                            settings.beforeHide.call(this);
-                            $(this).parents(settings.triggerParentEl).removeClass('dropit-open').find(settings.submenuEl).hide();
-                            settings.afterHide.call(this);
-                            return false;
-                        }
-
-                        // Hide open menus
-                        settings.beforeHide.call(this);
-                        $('.dropit-open').removeClass('dropit-open').find('.dropit-submenu').hide();
-                        settings.afterHide.call(this);
-
-                        // Open this menu
-                        settings.beforeShow.call(this);
-                        $(this).parents(settings.triggerParentEl).addClass('dropit-open').find(settings.submenuEl).show();
-                        settings.afterShow.call(this);
-
-                        return false;
-                    });
-
-                    // Close if outside click
-                    $(document).on('click', function(){
-                        settings.beforeHide.call(this);
-                        $('.dropit-open').removeClass('dropit-open').find('.dropit-submenu').hide();
-                        settings.afterHide.call(this);
-                    });
-
-                    // If hover
-                    if(settings.action == 'mouseenter'){
-                        $el.on('mouseleave', '.dropit-open', function(){
-                            settings.beforeHide.call(this);
-                            $(this).removeClass('dropit-open').find(settings.submenuEl).hide();
-                            settings.afterHide.call(this);
-                        });
-                    }
-
-                    settings.afterLoad.call(this);
-                });
-            }
-
-        };
-
-        if (methods[method]) {
-            return methods[method].apply(this, Array.prototype.slice.call(arguments, 1));
-        } else if (typeof method === 'object' || !method) {
-            return methods.init.apply(this, arguments);
-        } else {
-            $.error( 'Method "' +  method + '" does not exist in dropit plugin!');
-        }
-
-    };
-
-    $.fn.dropit.defaults = {
-        action: 'mouseenter', // The open action for the trigger
-        submenuEl: 'ul', // The submenu element
-        triggerEl: 'a', // The trigger element
-        triggerParentEl: 'li', // The trigger parent element
-        afterLoad: function(){}, // Triggers when plugin has loaded
-        beforeShow: function(){}, // Triggers before submenu is shown
-        afterShow: function(){}, // Triggers after submenu is shown
-        beforeHide: function(){}, // Triggers before submenu is hidden
-        afterHide: function(){} // Triggers before submenu is hidden
-    };
-
-    $.fn.dropit.settings = {};
-
-})(jQuery);
diff --git a/solr/example/files/conf/velocity/js/jquery.autocomplete.js b/solr/example/files/conf/velocity/js/jquery.autocomplete.js
deleted file mode 100644
index 442f5a0..0000000
--- a/solr/example/files/conf/velocity/js/jquery.autocomplete.js
+++ /dev/null
@@ -1,763 +0,0 @@
-/*
- * Autocomplete - jQuery plugin 1.1pre
- *
- * Copyright (c) 2007 Dylan Verheul, Dan G. Switzer, Anjesh Tuladhar, Jörn Zaefferer
- *
- * Dual licensed under the MIT and GPL licenses:
- *   http://www.opensource.org/licenses/mit-license.php
- *   http://www.gnu.org/licenses/gpl.html
- *
- * Revision: Id: jquery.autocomplete.js 5785 2008-07-12 10:37:33Z joern.zaefferer $
- *
- */
-
-;(function($) {
-  
-$.fn.extend({
-  autocomplete: function(urlOrData, options) {
-    var isUrl = typeof urlOrData == "string";
-    options = $.extend({}, $.Autocompleter.defaults, {
-      url: isUrl ? urlOrData : null,
-      data: isUrl ? null : urlOrData,
-      delay: isUrl ? $.Autocompleter.defaults.delay : 10,
-      max: options && !options.scroll ? 10 : 150
-    }, options);
-    
-    // if highlight is set to false, replace it with a do-nothing function
-    options.highlight = options.highlight || function(value) { return value; };
-    
-    // if the formatMatch option is not specified, then use formatItem for backwards compatibility
-    options.formatMatch = options.formatMatch || options.formatItem;
-    
-    return this.each(function() {
-      new $.Autocompleter(this, options);
-    });
-  },
-  result: function(handler) {
-    return this.bind("result", handler);
-  },
-  search: function(handler) {
-    return this.trigger("search", [handler]);
-  },
-  flushCache: function() {
-    return this.trigger("flushCache");
-  },
-  setOptions: function(options){
-    return this.trigger("setOptions", [options]);
-  },
-  unautocomplete: function() {
-    return this.trigger("unautocomplete");
-  }
-});
-
-$.Autocompleter = function(input, options) {
-
-  var KEY = {
-    UP: 38,
-    DOWN: 40,
-    DEL: 46,
-    TAB: 9,
-    RETURN: 13,
-    ESC: 27,
-    COMMA: 188,
-    PAGEUP: 33,
-    PAGEDOWN: 34,
-    BACKSPACE: 8
-  };
-
-  // Create $ object for input element
-  var $input = $(input).attr("autocomplete", "off").addClass(options.inputClass);
-
-  var timeout;
-  var previousValue = "";
-  var cache = $.Autocompleter.Cache(options);
-  var hasFocus = 0;
-  var lastKeyPressCode;
-  var config = {
-    mouseDownOnSelect: false
-  };
-  var select = $.Autocompleter.Select(options, input, selectCurrent, config);
-  
-  var blockSubmit;
-  
-  // prevent form submit in opera when selecting with return key
-  $.browser.opera && $(input.form).bind("submit.autocomplete", function() {
-    if (blockSubmit) {
-      blockSubmit = false;
-      return false;
-    }
-  });
-  
-  // only opera doesn't trigger keydown multiple times while pressed, others don't work with keypress at all
-  $input.bind(($.browser.opera ? "keypress" : "keydown") + ".autocomplete", function(event) {
-    // track last key pressed
-    lastKeyPressCode = event.keyCode;
-    switch(event.keyCode) {
-    
-      case KEY.UP:
-        event.preventDefault();
-        if ( select.visible() ) {
-          select.prev();
-        } else {
-          onChange(0, true);
-        }
-        break;
-        
-      case KEY.DOWN:
-        event.preventDefault();
-        if ( select.visible() ) {
-          select.next();
-        } else {
-          onChange(0, true);
-        }
-        break;
-        
-      case KEY.PAGEUP:
-        event.preventDefault();
-        if ( select.visible() ) {
-          select.pageUp();
-        } else {
-          onChange(0, true);
-        }
-        break;
-        
-      case KEY.PAGEDOWN:
-        event.preventDefault();
-        if ( select.visible() ) {
-          select.pageDown();
-        } else {
-          onChange(0, true);
-        }
-        break;
-      
-      // matches also semicolon
-      case options.multiple && $.trim(options.multipleSeparator) == "," && KEY.COMMA:
-      case KEY.TAB:
-      case KEY.RETURN:
-        if( selectCurrent() ) {
-          // stop default to prevent a form submit, Opera needs special handling
-          event.preventDefault();
-          blockSubmit = true;
-          return false;
-        }
-        break;
-        
-      case KEY.ESC:
-        select.hide();
-        break;
-        
-      default:
-        clearTimeout(timeout);
-        timeout = setTimeout(onChange, options.delay);
-        break;
-    }
-  }).focus(function(){
-    // track whether the field has focus, we shouldn't process any
-    // results if the field no longer has focus
-    hasFocus++;
-  }).blur(function() {
-    hasFocus = 0;
-    if (!config.mouseDownOnSelect) {
-      hideResults();
-    }
-  }).click(function() {
-    // show select when clicking in a focused field
-    if ( hasFocus++ > 1 && !select.visible() ) {
-      onChange(0, true);
-    }
-  }).bind("search", function() {
-    // TODO why not just specifying both arguments?
-    var fn = (arguments.length > 1) ? arguments[1] : null;
-    function findValueCallback(q, data) {
-      var result;
-      if( data && data.length ) {
-        for (var i=0; i < data.length; i++) {
-          if( data[i].result.toLowerCase() == q.toLowerCase() ) {
-            result = data[i];
-            break;
-          }
-        }
-      }
-      if( typeof fn == "function" ) fn(result);
-      else $input.trigger("result", result && [result.data, result.value]);
-    }
-    $.each(trimWords($input.val()), function(i, value) {
-      request(value, findValueCallback, findValueCallback);
-    });
-  }).bind("flushCache", function() {
-    cache.flush();
-  }).bind("setOptions", function() {
-    $.extend(options, arguments[1]);
-    // if we've updated the data, repopulate
-    if ( "data" in arguments[1] )
-      cache.populate();
-  }).bind("unautocomplete", function() {
-    select.unbind();
-    $input.unbind();
-    $(input.form).unbind(".autocomplete");
-  });
-  
-  
-  function selectCurrent() {
-    var selected = select.selected();
-    if( !selected )
-      return false;
-    
-    var v = selected.result;
-    previousValue = v;
-    
-    if ( options.multiple ) {
-      var words = trimWords($input.val());
-      if ( words.length > 1 ) {
-        v = words.slice(0, words.length - 1).join( options.multipleSeparator ) + options.multipleSeparator + v;
-      }
-      v += options.multipleSeparator;
-    }
-    
-    $input.val(v);
-    hideResultsNow();
-    $input.trigger("result", [selected.data, selected.value]);
-    return true;
-  }
-  
-  function onChange(crap, skipPrevCheck) {
-    if( lastKeyPressCode == KEY.DEL ) {
-      select.hide();
-      return;
-    }
-    
-    var currentValue = $input.val();
-    
-    if ( !skipPrevCheck && currentValue == previousValue )
-      return;
-    
-    previousValue = currentValue;
-    
-    currentValue = lastWord(currentValue);
-    if ( currentValue.length >= options.minChars) {
-      $input.addClass(options.loadingClass);
-      if (!options.matchCase)
-        currentValue = currentValue.toLowerCase();
-      request(currentValue, receiveData, hideResultsNow);
-    } else {
-      stopLoading();
-      select.hide();
-    }
-  };
-  
-  function trimWords(value) {
-    if ( !value ) {
-      return [""];
-    }
-    var words = value.split( options.multipleSeparator );
-    var result = [];
-    $.each(words, function(i, value) {
-      if ( $.trim(value) )
-        result[i] = $.trim(value);
-    });
-    return result;
-  }
-  
-  function lastWord(value) {
-    if ( !options.multiple )
-      return value;
-    var words = trimWords(value);
-    return words[words.length - 1];
-  }
-  
-  // fills in the input box w/the first match (assumed to be the best match)
-  // q: the term entered
-  // sValue: the first matching result
-  function autoFill(q, sValue){
-    // autofill in the complete box w/the first match as long as the user hasn't entered in more data
-    // if the last user key pressed was backspace, don't autofill
-    if( options.autoFill && (lastWord($input.val()).toLowerCase() == q.toLowerCase()) && lastKeyPressCode != KEY.BACKSPACE ) {
-      // fill in the value (keep the case the user has typed)
-      $input.val($input.val() + sValue.substring(lastWord(previousValue).length));
-      // select the portion of the value not typed by the user (so the next character will erase)
-      $.Autocompleter.Selection(input, previousValue.length, previousValue.length + sValue.length);
-    }
-  };
-
-  function hideResults() {
-    clearTimeout(timeout);
-    timeout = setTimeout(hideResultsNow, 200);
-  };
-
-  function hideResultsNow() {
-    var wasVisible = select.visible();
-    select.hide();
-    clearTimeout(timeout);
-    stopLoading();
-    if (options.mustMatch) {
-      // call search and run callback
-      $input.search(
-        function (result){
-          // if no value found, clear the input box
-          if( !result ) {
-            if (options.multiple) {
-              var words = trimWords($input.val()).slice(0, -1);
-              $input.val( words.join(options.multipleSeparator) + (words.length ? options.multipleSeparator : "") );
-            }
-            else
-              $input.val( "" );
-          }
-        }
-      );
-    }
-    if (wasVisible)
-      // position cursor at end of input field
-      $.Autocompleter.Selection(input, input.value.length, input.value.length);
-  };
-
-  function receiveData(q, data) {
-    if ( data && data.length && hasFocus ) {
-      stopLoading();
-      select.display(data, q);
-      autoFill(q, data[0].value);
-      select.show();
-    } else {
-      hideResultsNow();
-    }
-  };
-
-  function request(term, success, failure) {
-    if (!options.matchCase)
-      term = term.toLowerCase();
-    var data = cache.load(term);
-    data = null; // Avoid buggy cache and go to Solr every time 
-    // recieve the cached data
-    if (data && data.length) {
-      success(term, data);
-    // if an AJAX url has been supplied, try loading the data now
-    } else if( (typeof options.url == "string") && (options.url.length > 0) ){
-      
-      var extraParams = {
-        timestamp: +new Date()
-      };
-      $.each(options.extraParams, function(key, param) {
-        extraParams[key] = typeof param == "function" ? param() : param;
-      });
-      
-      $.ajax({
-        // try to leverage ajaxQueue plugin to abort previous requests
-        mode: "abort",
-        // limit abortion to this input
-        port: "autocomplete" + input.name,
-        dataType: options.dataType,
-        url: options.url,
-        data: $.extend({
-          q: lastWord(term),
-          limit: options.max
-        }, extraParams),
-        success: function(data) {
-          var parsed = options.parse && options.parse(data) || parse(data);
-          cache.add(term, parsed);
-          success(term, parsed);
-        }
-      });
-    } else {
-      // if we have a failure, we need to empty the list -- this prevents the the [TAB] key from selecting the last successful match
-      select.emptyList();
-      failure(term);
-    }
-  };
-  
-  function parse(data) {
-    var parsed = [];
-    var rows = data.split("\n");
-    for (var i=0; i < rows.length; i++) {
-      var row = $.trim(rows[i]);
-      if (row) {
-        row = row.split("|");
-        parsed[parsed.length] = {
-          data: row,
-          value: row[0],
-          result: options.formatResult && options.formatResult(row, row[0]) || row[0]
-        };
-      }
-    }
-    return parsed;
-  };
-
-  function stopLoading() {
-    $input.removeClass(options.loadingClass);
-  };
-
-};
-
-$.Autocompleter.defaults = {
-  inputClass: "ac_input",
-  resultsClass: "ac_results",
-  loadingClass: "ac_loading",
-  minChars: 1,
-  delay: 400,
-  matchCase: false,
-  matchSubset: true,
-  matchContains: false,
-  cacheLength: 10,
-  max: 100,
-  mustMatch: false,
-  extraParams: {},
-  selectFirst: false,
-  formatItem: function(row) { return row[0]; },
-  formatMatch: null,
-  autoFill: false,
-  width: 0,
-  multiple: false,
-  multipleSeparator: ", ",
-  highlight: function(value, term) {
-    return value.replace(new RegExp("(?![^&;]+;)(?!<[^<>]*)(" + term.replace(/([\^\$\(\)\[\]\{\}\*\.\+\?\|\\])/gi, "\\$1") + ")(?![^<>]*>)(?![^&;]+;)", "gi"), "<strong>$1</strong>");
-  },
-    scroll: true,
-    scrollHeight: 180
-};
-
-$.Autocompleter.Cache = function(options) {
-
-  var data = {};
-  var length = 0;
-  
-  function matchSubset(s, sub) {
-    if (!options.matchCase) 
-      s = s.toLowerCase();
-    var i = s.indexOf(sub);
-    if (options.matchContains == "word"){
-      i = s.toLowerCase().search("\\b" + sub.toLowerCase());
-    }
-    if (i == -1) return false;
-    return i == 0 || options.matchContains;
-  };
-  
-  function add(q, value) {
-    if (length > options.cacheLength){
-      flush();
-    }
-    if (!data[q]){ 
-      length++;
-    }
-    data[q] = value;
-  }
-  
-  function populate(){
-    if( !options.data ) return false;
-    // track the matches
-    var stMatchSets = {},
-      nullData = 0;
-
-    // no url was specified, we need to adjust the cache length to make sure it fits the local data store
-    if( !options.url ) options.cacheLength = 1;
-    
-    // track all options for minChars = 0
-    stMatchSets[""] = [];
-    
-    // loop through the array and create a lookup structure
-    for ( var i = 0, ol = options.data.length; i < ol; i++ ) {
-      var rawValue = options.data[i];
-      // if rawValue is a string, make an array otherwise just reference the array
-      rawValue = (typeof rawValue == "string") ? [rawValue] : rawValue;
-      
-      var value = options.formatMatch(rawValue, i+1, options.data.length);
-      if ( value === false )
-        continue;
-        
-      var firstChar = value.charAt(0).toLowerCase();
-      // if no lookup array for this character exists, look it up now
-      if( !stMatchSets[firstChar] ) 
-        stMatchSets[firstChar] = [];
-
-      // if the match is a string
-      var row = {
-        value: value,
-        data: rawValue,
-        result: options.formatResult && options.formatResult(rawValue) || value
-      };
-      
-      // push the current match into the set list
-      stMatchSets[firstChar].push(row);
-
-      // keep track of minChars zero items
-      if ( nullData++ < options.max ) {
-        stMatchSets[""].push(row);
-      }
-    };
-
-    // add the data items to the cache
-    $.each(stMatchSets, function(i, value) {
-      // increase the cache size
-      options.cacheLength++;
-      // add to the cache
-      add(i, value);
-    });
-  }
-  
-  // populate any existing data
-  setTimeout(populate, 25);
-  
-  function flush(){
-    data = {};
-    length = 0;
-  }
-  
-  return {
-    flush: flush,
-    add: add,
-    populate: populate,
-    load: function(q) {
-      if (!options.cacheLength || !length)
-        return null;
-      /* 
-       * if dealing w/local data and matchContains than we must make sure
-       * to loop through all the data collections looking for matches
-       */
-      if( !options.url && options.matchContains ){
-        // track all matches
-        var csub = [];
-        // loop through all the data grids for matches
-        for( var k in data ){
-          // don't search through the stMatchSets[""] (minChars: 0) cache
-          // this prevents duplicates
-          if( k.length > 0 ){
-            var c = data[k];
-            $.each(c, function(i, x) {
-              // if we've got a match, add it to the array
-              if (matchSubset(x.value, q)) {
-                csub.push(x);
-              }
-            });
-          }
-        }        
-        return csub;
-      } else 
-      // if the exact item exists, use it
-      if (data[q]){
-        return data[q];
-      } else
-      if (options.matchSubset) {
-        for (var i = q.length - 1; i >= options.minChars; i--) {
-          var c = data[q.substr(0, i)];
-          if (c) {
-            var csub = [];
-            $.each(c, function(i, x) {
-              if (matchSubset(x.value, q)) {
-                csub[csub.length] = x;
-              }
-            });
-            return csub;
-          }
-        }
-      }
-      return null;
-    }
-  };
-};
-
-$.Autocompleter.Select = function (options, input, select, config) {
-  var CLASSES = {
-    ACTIVE: "ac_over"
-  };
-  
-  var listItems,
-    active = -1,
-    data,
-    term = "",
-    needsInit = true,
-    element,
-    list;
-  
-  // Create results
-  function init() {
-    if (!needsInit)
-      return;
-    element = $("<div/>")
-    .hide()
-    .addClass(options.resultsClass)
-    .css("position", "absolute")
-    .appendTo(document.body);
-  
-    list = $("<ul/>").appendTo(element).mouseover( function(event) {
-      if(target(event).nodeName && target(event).nodeName.toUpperCase() == 'LI') {
-              active = $("li", list).removeClass(CLASSES.ACTIVE).index(target(event));
-          $(target(event)).addClass(CLASSES.ACTIVE);            
-          }
-    }).click(function(event) {
-      $(target(event)).addClass(CLASSES.ACTIVE);
-      select();
-      // TODO provide option to avoid setting focus again after selection? useful for cleanup-on-focus
-      input.focus();
-      return false;
-    }).mousedown(function() {
-      config.mouseDownOnSelect = true;
-    }).mouseup(function() {
-      config.mouseDownOnSelect = false;
-    });
-    
-    if( options.width > 0 )
-      element.css("width", options.width);
-      
-    needsInit = false;
-  } 
-  
-  function target(event) {
-    var element = event.target;
-    while(element && element.tagName != "LI")
-      element = element.parentNode;
-    // more fun with IE, sometimes event.target is empty, just ignore it then
-    if(!element)
-      return [];
-    return element;
-  }
-
-  function moveSelect(step) {
-    listItems.slice(active, active + 1).removeClass(CLASSES.ACTIVE);
-    movePosition(step);
-        var activeItem = listItems.slice(active, active + 1).addClass(CLASSES.ACTIVE);
-        if(options.scroll) {
-            var offset = 0;
-            listItems.slice(0, active).each(function() {
-        offset += this.offsetHeight;
-      });
-            if((offset + activeItem[0].offsetHeight - list.scrollTop()) > list[0].clientHeight) {
-                list.scrollTop(offset + activeItem[0].offsetHeight - list.innerHeight());
-            } else if(offset < list.scrollTop()) {
-                list.scrollTop(offset);
-            }
-        }
-  };
-  
-  function movePosition(step) {
-    active += step;
-    if (active < 0) {
-      active = listItems.size() - 1;
-    } else if (active >= listItems.size()) {
-      active = 0;
-    }
-  }
-  
-  function limitNumberOfItems(available) {
-    return options.max && options.max < available
-      ? options.max
-      : available;
-  }
-  
-  function fillList() {
-    list.empty();
-    var max = limitNumberOfItems(data.length);
-    for (var i=0; i < max; i++) {
-      if (!data[i])
-        continue;
-      var formatted = options.formatItem(data[i].data, i+1, max, data[i].value, term);
-      if ( formatted === false )
-        continue;
-      var li = $("<li/>").html( options.highlight(formatted, term) ).addClass(i%2 == 0 ? "ac_even" : "ac_odd").appendTo(list)[0];
-      $.data(li, "ac_data", data[i]);
-    }
-    listItems = list.find("li");
-    if ( options.selectFirst ) {
-      listItems.slice(0, 1).addClass(CLASSES.ACTIVE);
-      active = 0;
-    }
-    // apply bgiframe if available
-    if ( $.fn.bgiframe )
-      list.bgiframe();
-  }
-  
-  return {
-    display: function(d, q) {
-      init();
-      data = d;
-      term = q;
-      fillList();
-    },
-    next: function() {
-      moveSelect(1);
-    },
-    prev: function() {
-      moveSelect(-1);
-    },
-    pageUp: function() {
-      if (active != 0 && active - 8 < 0) {
-        moveSelect( -active );
-      } else {
-        moveSelect(-8);
-      }
-    },
-    pageDown: function() {
-      if (active != listItems.size() - 1 && active + 8 > listItems.size()) {
-        moveSelect( listItems.size() - 1 - active );
-      } else {
-        moveSelect(8);
-      }
-    },
-    hide: function() {
-      element && element.hide();
-      listItems && listItems.removeClass(CLASSES.ACTIVE);
-      active = -1;
-    },
-    visible : function() {
-      return element && element.is(":visible");
-    },
-    current: function() {
-      return this.visible() && (listItems.filter("." + CLASSES.ACTIVE)[0] || options.selectFirst && listItems[0]);
-    },
-    show: function() {
-      var offset = $(input).offset();
-      element.css({
-        width: typeof options.width == "string" || options.width > 0 ? options.width : $(input).width(),
-        top: offset.top + input.offsetHeight,
-        left: offset.left
-      }).show();
-            if(options.scroll) {
-                list.scrollTop(0);
-                list.css({
-          maxHeight: options.scrollHeight,
-          overflow: 'auto'
-        });
-        
-                if($.browser.msie && typeof document.body.style.maxHeight === "undefined") {
-          var listHeight = 0;
-          listItems.each(function() {
-            listHeight += this.offsetHeight;
-          });
-          var scrollbarsVisible = listHeight > options.scrollHeight;
-                    list.css('height', scrollbarsVisible ? options.scrollHeight : listHeight );
-          if (!scrollbarsVisible) {
-            // IE doesn't recalculate width when scrollbar disappears
-            listItems.width( list.width() - parseInt(listItems.css("padding-left")) - parseInt(listItems.css("padding-right")) );
-          }
-                }
-                
-            }
-    },
-    selected: function() {
-      var selected = listItems && listItems.filter("." + CLASSES.ACTIVE).removeClass(CLASSES.ACTIVE);
-      return selected && selected.length && $.data(selected[0], "ac_data");
-    },
-    emptyList: function (){
-      list && list.empty();
-    },
-    unbind: function() {
-      element && element.remove();
-    }
-  };
-};
-
-$.Autocompleter.Selection = function(field, start, end) {
-  if( field.createTextRange ){
-    var selRange = field.createTextRange();
-    selRange.collapse(true);
-    selRange.moveStart("character", start);
-    selRange.moveEnd("character", end);
-    selRange.select();
-  } else if( field.setSelectionRange ){
-    field.setSelectionRange(start, end);
-  } else {
-    if( field.selectionStart ){
-      field.selectionStart = start;
-      field.selectionEnd = end;
-    }
-  }
-  field.focus();
-};
-
-})(jQuery);
\ No newline at end of file
diff --git a/solr/example/files/conf/velocity/js/jquery.tx3-tag-cloud.js b/solr/example/files/conf/velocity/js/jquery.tx3-tag-cloud.js
deleted file mode 100644
index 3597b4a..0000000
--- a/solr/example/files/conf/velocity/js/jquery.tx3-tag-cloud.js
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * ----------------------------------------------------------------------------
- * "THE BEER-WARE LICENSE" (Revision 42):
- * Tuxes3 wrote this file. As long as you retain this notice you
- * can do whatever you want with this stuff. If we meet some day, and you think
- * this stuff is worth it, you can buy me a beer in return Tuxes3
- * ----------------------------------------------------------------------------
- */
-(function($)
-{
-  var settings;
-    $.fn.tx3TagCloud = function(options)
-    {
-
-      //
-      // DEFAULT SETTINGS
-      //
-      settings = $.extend({
-        multiplier    : 1
-      }, options);
-      main(this);
-
-    }
-
-    function main(element)
-    {
-      // adding style attr
-      element.addClass("tx3-tag-cloud");
-      addListElementFontSize(element);
-    }
-
-    /**
-     * calculates the font size on each li element 
-     * according to their data-weight attribut
-     */
-    function addListElementFontSize(element)
-    {
-      var hDataWeight = -9007199254740992;
-      var lDataWeight = 9007199254740992;
-      $.each(element.find("li"), function(){
-        cDataWeight = getDataWeight(this);
-        if (cDataWeight == undefined)
-        {
-          logWarning("No \"data-weight\" attribut defined on <li> element");
-        }
-        else
-        {
-          hDataWeight = cDataWeight > hDataWeight ? cDataWeight : hDataWeight;
-          lDataWeight = cDataWeight < lDataWeight ? cDataWeight : lDataWeight;
-        }
-      });
-      $.each(element.find("li"), function(){
-        var dataWeight = getDataWeight(this);
-        var percent = Math.abs((dataWeight - lDataWeight)/(lDataWeight - hDataWeight));
-        $(this).css('font-size', (1 + (percent * settings['multiplier'])) + "em");
-      });
-
-    }
-
-    function getDataWeight(element)
-    {
-      return parseInt($(element).attr("data-weight"));
-    }
-
-    function logWarning(message)
-    {
-      console.log("[WARNING] " + Date.now() + " : " + message);
-    }
-
-}(jQuery));
\ No newline at end of file
diff --git a/solr/example/files/conf/velocity/layout.vm b/solr/example/files/conf/velocity/layout.vm
deleted file mode 100644
index ef6caf7..0000000
--- a/solr/example/files/conf/velocity/layout.vm
+++ /dev/null
@@ -1,42 +0,0 @@
-<html>
-<head>
-  #parse("head.vm")
-</head>
-  <body>
-    <div id="header">
-      <a href="#url_for_home"><img src="#{url_root}/img/solr.svg" id="logo" title="Solr"/></a> $resource.powered_file_search
-    </div>
-
-    <div id="header2" onclick="javascript:locale_select()">
-      <ul class="menu">
-
-        <li>
-          <a href="#"><img src="#{url_for_solr}/admin/file?file=/velocity/img/globe_256.png&contentType=image/png" id="locale_pic" title="locale_select" width="30px" height="27px"/></a>
-          <ul>
-            <li><a href="#url_for_locale('fr_FR')" #if("#current_locale"=="fr_FR")class="hidden"#end>
-              <img src="#{url_for_solr}/admin/file?file=/velocity/img/france_640.png&contentType=image/png" id="french_flag"  width="40px" height="40px"/>Fran&ccedil;ais</a></li>
-            <li><a href="#url_for_locale('de_DE')" #if("#current_locale"=="de_DE")class="hidden"#end>
-              <img src="#{url_for_solr}/admin/file?file=/velocity/img/germany_640.png&contentType=image/png" id="german_flag"  width="40px" height="40px"/>Deutsch</a></li>
-            <li><a href="#url_for_locale('')" #if("#current_locale"=="")class="hidden"#end>
-              <img src="#{url_for_solr}/admin/file?file=/velocity/img/english_640.png&contentType=image/png" id="english_flag"  width="40px" height="40px"/>English</a></li>
-          </ul>
-        </li>
-      </ul>
-    </div>
-
-    #if($response.response.error.code)
-      <div class="error">
-        <h1>ERROR $response.response.error.code</h1>
-        $response.response.error.msg
-      </div>
-    #else
-      <div id="content">
-        $content
-      </div>
-    #end
-
-    <div id="footer">
-      #parse("footer.vm")
-    </div>
-  </body>
-</html>
diff --git a/solr/example/files/conf/velocity/macros.vm b/solr/example/files/conf/velocity/macros.vm
deleted file mode 100644
index 8bebb7f..0000000
--- a/solr/example/files/conf/velocity/macros.vm
+++ /dev/null
@@ -1,16 +0,0 @@
-#macro(lensFilterSortOnly)?#if($response.responseHeader.params.getAll("fq").size() > 0)&#fqs($response.responseHeader.params.getAll("fq"))#end#sort($request.params.getParams('sort'))#end
-#macro(lensNoQ)#lensFilterSortOnly&type=#current_type#if("#current_locale"!="")&locale=#current_locale#end#end
-#macro(lensNoType)#lensFilterSortOnly#q#if("#current_locale"!="")&locale=#current_locale#end#end
-#macro(lensNoLocale)#lensFilterSortOnly#q&type=#current_type#end
-
-## lens modified for example/files - to use fq from responseHeader rather than request, and #debug removed too as it is built into browse params now, also added type to lens
-#macro(lens)#lensNoQ#q#end
-
-## Macros defined custom for the "files" example
-#macro(url_for_type $type)#url_for_home#lensNoType&type=$type#end
-#macro(current_type)#if($response.responseHeader.params.type)${response.responseHeader.params.type}#{else}all#end#end
-#macro(url_for_locale $locale)#url_for_home#lensNoLocale#if($locale!="")&locale=$locale#end&start=$page.start#end
-#macro(current_locale)$!{response.responseHeader.params.locale}#end
-
-## Usage: #label(resource_key[, default_value]) - resource_key is used as label if no default value specified and no resource exists
-#macro(label $key $default)#if($resource.get($key).exists)${resource.get($key)}#else#if($default)$default#else${key}#end#end#end
diff --git a/solr/example/files/conf/velocity/mime_type_lists.vm b/solr/example/files/conf/velocity/mime_type_lists.vm
deleted file mode 100644
index 1468bbd..0000000
--- a/solr/example/files/conf/velocity/mime_type_lists.vm
+++ /dev/null
@@ -1,68 +0,0 @@
-#**
- *  Define some Mime-Types, short and long form
- *#
-
-## MimeType to extension map for detecting file type
-## and showing proper icon
-## List of types match the icons in /solr/img/filetypes
-
-## Short MimeType Names
-## Was called $supportedtypes
-#set($supportedMimeTypes = "7z;ai;aiff;asc;audio;bin;bz2;c;cfc;cfm;chm;class;conf;cpp;cs;css;csv;deb;divx;doc;dot;eml;enc;file;gif;gz;hlp;htm;html;image;iso;jar;java;jpeg;jpg;js;lua;m;mm;mov;mp3;mpg;odc;odf;odg;odi;odp;ods;odt;ogg;pdf;pgp;php;pl;png;ppt;ps;py;ram;rar;rb;rm;rpm;rtf;sig;sql;swf;sxc;sxd;sxi;sxw;tar;tex;tgz;txt;vcf;video;vsd;wav;wma;wmv;xls;xml;xpi;xvid;zip")
-
-## Long Form: map MimeType headers to our Short names
-## Was called $extMap
-#set( $mimeExtensionsMap = {
-   "application/x-7z-compressed": "7z",
-   "application/postscript": "ai",
-   "application/pgp-signature": "asc",
-   "application/octet-stream": "bin",
-   "application/x-bzip2": "bz2",
-   "text/x-c": "c",
-   "application/vnd.ms-htmlhelp": "chm",
-   "application/java-vm": "class",
-   "text/css": "css",
-   "text/csv": "csv",
-   "application/x-debian-package": "deb",
-   "application/msword": "doc",
-   "message/rfc822": "eml",
-   "image/gif": "gif",
-   "application/winhlp": "hlp",
-   "text/html": "html",
-   "application/java-archive": "jar",
-   "text/x-java-source": "java",
-   "image/jpeg": "jpeg",
-   "application/javascript": "js",
-   "application/vnd.oasis.opendocument.chart": "odc",
-   "application/vnd.oasis.opendocument.formula": "odf",
-   "application/vnd.oasis.opendocument.graphics": "odg",
-   "application/vnd.oasis.opendocument.image": "odi",
-   "application/vnd.oasis.opendocument.presentation": "odp",
-   "application/vnd.oasis.opendocument.spreadsheet": "ods",
-   "application/vnd.oasis.opendocument.text": "odt",
-   "application/pdf": "pdf",
-   "application/pgp-encrypted": "pgp",
-   "image/png": "png",
-   "application/vnd.ms-powerpoint": "ppt",
-   "audio/x-pn-realaudio": "ram",
-   "application/x-rar-compressed": "rar",
-   "application/vnd.rn-realmedia": "rm",
-   "application/rtf": "rtf",
-   "application/x-shockwave-flash": "swf",
-   "application/vnd.sun.xml.calc": "sxc",
-   "application/vnd.sun.xml.draw": "sxd",
-   "application/vnd.sun.xml.impress": "sxi",
-   "application/vnd.sun.xml.writer": "sxw",
-   "application/x-tar": "tar",
-   "application/x-tex": "tex",
-   "text/plain": "txt",
-   "text/x-vcard": "vcf",
-   "application/vnd.visio": "vsd",
-   "audio/x-wav": "wav",
-   "audio/x-ms-wma": "wma",
-   "video/x-ms-wmv": "wmv",
-   "application/vnd.ms-excel": "xls",
-   "application/xml": "xml",
-   "application/x-xpinstall": "xpi",
-   "application/zip": "zip"
-})
diff --git a/solr/example/files/conf/velocity/results.vm b/solr/example/files/conf/velocity/results.vm
deleted file mode 100644
index b8a17a9..0000000
--- a/solr/example/files/conf/velocity/results.vm
+++ /dev/null
@@ -1,20 +0,0 @@
-<div id="facets">
-  #parse("facets.vm")
-</div>
-
-
-<div id="results_list">
-  <div class="pagination">
-    <span class="results-found">$page.results_found</span> $resource.results_found_in.insert(${response.responseHeader.QTime})
-    $resource.page_of.insert($page.current_page_number,$page.page_count)
-  </div>
-
-  #parse("results_list.vm")
-
-  <div class="pagination">
-    #link_to_previous_page
-    <span class="results-found">$page.results_found</span> $resource.results_found.
-    $resource.page_of.insert($page.current_page_number,$page.page_count)
-    #link_to_next_page
-  </div>
-</div>
diff --git a/solr/example/files/conf/velocity/results_list.vm b/solr/example/files/conf/velocity/results_list.vm
deleted file mode 100644
index 908e45b..0000000
--- a/solr/example/files/conf/velocity/results_list.vm
+++ /dev/null
@@ -1,21 +0,0 @@
-<ul id="tabs">
-  <li><a href="#url_for_type('all')" #if("#current_type"=="all")class="selected"#end>$resource.type.all ($response.response.facet_counts.facet_queries.all_types)</a></li>
-  #foreach($type in $response.response.facet_counts.facet_fields.doc_type)
-    #if($type.key)
-      <li><a href="#url_for_type($type.key)" #if($type.value=="0")class="no_results"#end #if("#current_type"==$type.key)class="selected"#end> #label("type.${type.key}.label", $type.key) ($type.value)</a></li>
-    #else
-      #if($type.value > 0)
-        <li><a href="#url_for_type('unknown')" #if("#current_type"=="unknown")class="selected"#end>$resource.type.unknown ($type.value)</a></li>
-      #end
-    #end
-  #end
-</ul>
-
-
-<div id="results">
-  #foreach($doc in $response.results)
-    #parse("hit.vm")
-  #end
-</div>
-
-
diff --git a/solr/licenses/velocity-engine-core-2.0.jar.sha1 b/solr/licenses/velocity-engine-core-2.0.jar.sha1
deleted file mode 100644
index 9cbf13d..0000000
--- a/solr/licenses/velocity-engine-core-2.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6e5f29e1237b1764a4ce769feeffb85b0b19cfa7
diff --git a/solr/licenses/velocity-engine-core-LICENSE-ASL.txt b/solr/licenses/velocity-engine-core-LICENSE-ASL.txt
deleted file mode 100644
index d645695..0000000
--- a/solr/licenses/velocity-engine-core-LICENSE-ASL.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/solr/licenses/velocity-engine-core-NOTICE.txt b/solr/licenses/velocity-engine-core-NOTICE.txt
deleted file mode 100644
index c016d50..0000000
--- a/solr/licenses/velocity-engine-core-NOTICE.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Apache Velocity
-
-Copyright (C) 2000-2007 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
diff --git a/solr/licenses/velocity-tools-generic-3.0.jar.sha1 b/solr/licenses/velocity-tools-generic-3.0.jar.sha1
deleted file mode 100644
index 018c1b3..0000000
--- a/solr/licenses/velocity-tools-generic-3.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e789f6ec06f9a69ccb8956f407fb685b2938e74b
diff --git a/solr/licenses/velocity-tools-generic-LICENSE-ASL.txt b/solr/licenses/velocity-tools-generic-LICENSE-ASL.txt
deleted file mode 100644
index 261eeb9..0000000
--- a/solr/licenses/velocity-tools-generic-LICENSE-ASL.txt
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/solr/licenses/velocity-tools-generic-NOTICE.txt b/solr/licenses/velocity-tools-generic-NOTICE.txt
deleted file mode 100644
index 7d6375e..0000000
--- a/solr/licenses/velocity-tools-generic-NOTICE.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Apache Velocity Tools
-
-Copyright (C) 2000-2007 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-Support for using SSL with Struts is provided using
-the sslext library package, which is open source software
-under the Apache Software License 1.1 with copyright attributed
-to The Apache Software Foundation.
-This software is available from http://sslext.sourceforge.net/
diff --git a/solr/licenses/velocity-tools-view-3.0.jar.sha1 b/solr/licenses/velocity-tools-view-3.0.jar.sha1
deleted file mode 100644
index 67cf265..0000000
--- a/solr/licenses/velocity-tools-view-3.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-2f72ca8eb2bcb8af2c5fab826d64add20ab70a2e
diff --git a/solr/licenses/velocity-tools-view-LICENSE-ASL.txt b/solr/licenses/velocity-tools-view-LICENSE-ASL.txt
deleted file mode 100644
index 261eeb9..0000000
--- a/solr/licenses/velocity-tools-view-LICENSE-ASL.txt
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/solr/licenses/velocity-tools-view-NOTICE.txt b/solr/licenses/velocity-tools-view-NOTICE.txt
deleted file mode 100644
index 7d6375e..0000000
--- a/solr/licenses/velocity-tools-view-NOTICE.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Apache Velocity Tools
-
-Copyright (C) 2000-2007 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-Support for using SSL with Struts is provided using
-the sslext library package, which is open source software
-under the Apache Software License 1.1 with copyright attributed
-to The Apache Software Foundation.
-This software is available from http://sslext.sourceforge.net/
diff --git a/solr/licenses/velocity-tools-view-jsp-3.0.jar.sha1 b/solr/licenses/velocity-tools-view-jsp-3.0.jar.sha1
deleted file mode 100644
index 45dd7f8..0000000
--- a/solr/licenses/velocity-tools-view-jsp-3.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-27f6a21c7973ffb75001b3e9ac4731facf5757b4
diff --git a/solr/licenses/velocity-tools-view-jsp-LICENSE-ASL.txt b/solr/licenses/velocity-tools-view-jsp-LICENSE-ASL.txt
deleted file mode 100644
index 261eeb9..0000000
--- a/solr/licenses/velocity-tools-view-jsp-LICENSE-ASL.txt
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/solr/licenses/velocity-tools-view-jsp-NOTICE.txt b/solr/licenses/velocity-tools-view-jsp-NOTICE.txt
deleted file mode 100644
index 7d6375e..0000000
--- a/solr/licenses/velocity-tools-view-jsp-NOTICE.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Apache Velocity Tools
-
-Copyright (C) 2000-2007 The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-Support for using SSL with Struts is provided using
-the sslext library package, which is open source software
-under the Apache Software License 1.1 with copyright attributed
-to The Apache Software Foundation.
-This software is available from http://sslext.sourceforge.net/
diff --git a/solr/packaging/build.gradle b/solr/packaging/build.gradle
index 12072bc..a017a0d 100644
--- a/solr/packaging/build.gradle
+++ b/solr/packaging/build.gradle
@@ -51,7 +51,6 @@
    ":solr:contrib:langid",
    ":solr:contrib:ltr",
    ":solr:contrib:prometheus-exporter",
-   ":solr:contrib:velocity",
   ].each { contribName ->
     distSolr project(contribName)
     contrib  project(path: contribName, configuration: "packaging")
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml b/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
index 26141b8..34c7567 100644
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
+++ b/solr/server/solr/configsets/sample_techproducts_configs/conf/solrconfig.xml
@@ -83,9 +83,6 @@
 
   <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-ltr-\d.*\.jar" />
 
-  <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
-
   <!-- an exact 'path' can be used instead of a 'dir' to specify a
        specific jar file.  This will cause a serious error to be logged
        if it can't be loaded.
@@ -852,106 +849,7 @@
      </lst>
   </requestHandler>
 
-  <!-- A Robust Example
-
-       This example SearchHandler declaration shows off usage of the
-       SearchHandler with many defaults declared
-
-       Note that multiple instances of the same Request Handler
-       (SearchHandler) can be registered multiple times with different
-       names (and different init parameters)
-    -->
-  <requestHandler name="/browse" class="solr.SearchHandler">
-     <lst name="defaults">
-       <str name="echoParams">explicit</str>
-
-       <!-- VelocityResponseWriter settings -->
-       <str name="wt">velocity</str>
-       <str name="v.template">browse</str>
-       <str name="v.layout">layout</str>
-       <str name="title">Solritas</str>
-
-       <!-- Query settings -->
-       <str name="defType">edismax</str>
-       <str name="qf">
-          text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
-          title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
-       </str>
-       <str name="mm">100%</str>
-       <str name="q.alt">*:*</str>
-       <str name="rows">10</str>
-       <str name="fl">*,score</str>
-
-       <str name="mlt.qf">
-         text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
-         title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
-       </str>
-       <str name="mlt.fl">text,features,name,sku,id,manu,cat,title,description,keywords,author,resourcename</str>
-       <int name="mlt.count">3</int>
-
-       <!-- Faceting defaults -->
-       <str name="facet">on</str>
-       <str name="facet.missing">true</str>
-       <str name="facet.field">cat</str>
-       <str name="facet.field">manu_exact</str>
-       <str name="facet.field">content_type</str>
-       <str name="facet.field">author_s</str>
-       <str name="facet.query">ipod</str>
-       <str name="facet.query">GB</str>
-       <str name="facet.mincount">1</str>
-       <str name="facet.pivot">cat,inStock</str>
-       <str name="facet.range.other">after</str>
-       <str name="facet.range">price</str>
-       <int name="f.price.facet.range.start">0</int>
-       <int name="f.price.facet.range.end">600</int>
-       <int name="f.price.facet.range.gap">50</int>
-       <str name="facet.range">popularity</str>
-       <int name="f.popularity.facet.range.start">0</int>
-       <int name="f.popularity.facet.range.end">10</int>
-       <int name="f.popularity.facet.range.gap">3</int>
-       <str name="facet.range">manufacturedate_dt</str>
-       <str name="f.manufacturedate_dt.facet.range.start">NOW/YEAR-10YEARS</str>
-       <str name="f.manufacturedate_dt.facet.range.end">NOW</str>
-       <str name="f.manufacturedate_dt.facet.range.gap">+1YEAR</str>
-       <str name="f.manufacturedate_dt.facet.range.other">before</str>
-       <str name="f.manufacturedate_dt.facet.range.other">after</str>
-
-       <!-- Highlighting defaults -->
-       <str name="hl">on</str>
-       <str name="hl.fl">content features title name</str>
-       <str name="hl.preserveMulti">true</str>
-       <str name="hl.encoder">html</str>
-       <str name="hl.simple.pre">&lt;b&gt;</str>
-       <str name="hl.simple.post">&lt;/b&gt;</str>
-       <str name="f.title.hl.fragsize">0</str>
-       <str name="f.title.hl.alternateField">title</str>
-       <str name="f.name.hl.fragsize">0</str>
-       <str name="f.name.hl.alternateField">name</str>
-       <str name="f.content.hl.snippets">3</str>
-       <str name="f.content.hl.fragsize">200</str>
-       <str name="f.content.hl.alternateField">content</str>
-       <str name="f.content.hl.maxAlternateFieldLength">750</str>
-
-       <!-- Spell checking defaults -->
-       <str name="spellcheck">on</str>
-       <str name="spellcheck.extendedResults">false</str>
-       <str name="spellcheck.count">5</str>
-       <str name="spellcheck.alternativeTermCount">2</str>
-       <str name="spellcheck.maxResultsForSuggest">5</str>
-       <str name="spellcheck.collate">true</str>
-       <str name="spellcheck.collateExtendedResults">true</str>
-       <str name="spellcheck.maxCollationTries">5</str>
-       <str name="spellcheck.maxCollations">3</str>
-     </lst>
-
-     <!-- append spellchecking to our list of components -->
-     <arr name="last-components">
-       <str>spellcheck</str>
-     </arr>
-  </requestHandler>
-
-
-  <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell,/browse,update">
+  <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell,update">
     <lst name="defaults">
       <str name="df">text</str>
     </lst>
@@ -1561,14 +1459,6 @@
     <str name="content-type">text/plain; charset=UTF-8</str>
   </queryResponseWriter>
 
-  <!--
-     Custom response writers can be declared as needed...
-    -->
-    <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy">
-      <str name="template.base.dir">${velocity.template.base.dir:}</str>
-    </queryResponseWriter>
-
-
   <!-- XSLT response writer transforms the XML output by any xslt file found
        in Solr's conf/xslt directory.  Changes to xslt files are checked for
        every xsltCacheLifetimeSeconds.
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/README.md b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/README.md
deleted file mode 100644
index 9f4db64..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/README.md
+++ /dev/null
@@ -1,116 +0,0 @@
-Introduction
-------------
-Solr Search Velocity Templates
-
-A quick demo of using Solr using http://wiki.apache.org/solr/VelocityResponseWriter
-
-You typically access these templates via:
-    http://localhost:8983/solr/collection1/browse
-
-It's called "browse" because you can click around with your mouse
-without needing to type any search terms.  And of course it
-also works as a standard search app as well.
-
-Known Limitations
------------------
-* The /browse and the VelocityResponseWriter component
-  serve content directly from Solr, which usually requires
-  Solr's HTTP API to be exposed.  Advanced users could
-  potentially access other parts of Solr directly.
-* There are some hard coded fields in these templates.
-  Since these templates live under conf, they should be
-  considered part of the overall configuration, and
-  must be coordinated with schema.xml and solrconfig.xml
-
-Velocity Info
--------------
-Java-based template language.
-
-It's nice in this context because change to the templates
-are immediately visible in browser on the next visit.
-
-Links:
-
-    http://velocity.apache.org
-    http://wiki.apache.org/velocity/
-    http://velocity.apache.org/engine/releases/velocity-1.7/user-guide.html
-
-
-File List
----------
-
-System and Misc:
-```
-  VM_global_library.vm    - Macros used other templates,
-                            exact filename is important for Velocity to see it
-  error.vm                - shows errors, if any
-  debug.vm                - includes toggle links for "explain" and "all fields"
-                            activated by debug link in footer.vm
-  README.md              - this file
-```
-
-Overall Page Composition:
-
-```
-  browse.vm               - Main entry point into templates
-  layout.vm               - overall HTML page layout
-  head.vm                 - elements in the <head> section of the HTML document
-  header.vm               - top section of page visible to users
-  footer.vm               - bottom section of page visible to users,
-                            includes debug and help links
-  main.css                - CSS style for overall pages
-                            see also jquery.autocomplete.css
-```
-
-Query Form and Options:
-
-```
-  query_form.vm           - renders query form
-  query_group.vm          - group by fields
-                            e.g.: Manufacturer or Poplularity
-  query_spatial.vm        - select box for location based Geospacial search
-```
-
-Spelling Suggestions:
-
-```
-  did_you_mean.vm         - hyperlinked spelling suggestions in results
-  suggest.vm              - dynamic spelling suggestions
-                            as you type in the search form
-  jquery.autocomplete.js  - supporting files for dynamic suggestions
-  jquery.autocomplete.css - Most CSS is defined in main.css
-```
-
-Search Results, General:
-
-```
-  (see also browse.vm)
-  tabs.vm                 - provides navigation to advanced search options
-  pagination_top.vm       - paging and staticis at top of results
-  pagination_bottom.vm    - paging and staticis at bottom of results
-  results_list.vm
-  hit.vm                  - called for each matching doc,
-                            decides which template to use
-  hit_grouped.vm          - display results grouped by field values
-  product_doc.vm          - display a Product
-  join_doc.vm             - display a joined document
-  richtext_doc.vm         - display a complex/misc. document
-  hit_plain.vm            - basic display of all fields,
-                            edit results_list.vm to enable this
-```
-
-Search Results, Facets & Clusters:
-```
-  facets.vm               - calls the 4 facet and 1 cluster template
-  facet_fields.vm         - display facets based on field values
-                            e.g.: fields specified by &facet.field=
-  facet_queries.vm        - display facets based on specific facet queries
-                            e.g.: facets specified by &facet.query=
-  facet_ranges.vm         - display facets based on ranges
-                            e.g.: ranges specified by &facet.range=
-  facet_pivot.vm          - display pivot based facets
-                            e.g.: facets specified by &facet.pivot=
-  cluster.vm              - if clustering is available
-                            then call cluster_results.vm
-  cluster_results.vm      - actual rendering of clusters
-```
\ No newline at end of file
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/VM_global_library.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/VM_global_library.vm
deleted file mode 100644
index 42dde89..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/VM_global_library.vm
+++ /dev/null
@@ -1,186 +0,0 @@
-#**
- *  Global macros used by other templates.
- *  This file must be named VM_global_library.vm
- *  in order for Velocity to find it.
- *#
-
-#macro(param $key)$request.params.get($key)#end
-
-#macro(url_root)/solr#end
-
-## TODO: s/url_for_solr/url_for_core/ and s/url_root/url_for_solr/
-#macro(core_name)$request.core.name#end
-#macro(url_for_solr)#{url_root}#if($request.core.name != "")/$request.core.name#end#end
-#macro(url_for_home)#url_for_solr/browse#end
-
-#macro(q)&q=$!{esc.url($params.get('q'))}#end
-
-#macro(fqs $p)#foreach($fq in $p)#if($velocityCount && $velocityCount>1)&#{end}fq=$esc.url($fq)#end#end
-
-#macro(debug)#if($request.params.get('debugQuery'))&debugQuery=true#end#end
-
-#macro(boostPrice)#if($request.params.get('bf') == 'price')&bf=price#end#end        
-
-#macro(annotate)#if($request.params.get('annotateBrowse'))&annotateBrowse=true#end#end
-
-#macro(annTitle $msg)#if($annotate == true)title="$msg"#end#end
-
-#macro(spatial)#if($request.params.get('sfield'))&sfield=store#end#if($request.params.get('pt'))&pt=$request.params.get('pt')#end#if($request.params.get('d'))&d=$request.params.get('d')#end#end
-
-#macro(qOpts)#set($queryOpts = $request.params.get("queryOpts"))#if($queryOpts && $queryOpts != "")&queryOpts=$queryOpts#end#end
-
-#macro(group)#if($request.params.getBool("group") == true)&group=true#end#if($request.params.get("group.field"))#foreach($grp in $request.params.getParams('group.field'))&group.field=$grp#end#end#end
-
-#macro(sort $p)#if($p)#foreach($s in $p)&sort=$esc.url($s)#end#end#end
-
-#macro(lensNoQ)?#if($request.params.getParams('fq') && $request.params.getParams('fq').size() > 0)&#fqs($request.params.getParams('fq'))#end#sort($request.params.getParams('sort'))#debug#boostPrice#annotate#spatial#qOpts#group#end
-#macro(lens)#lensNoQ#q#end
-        
-
-#macro(url_for_lens)#{url_for_home}#lens#end
-
-#macro(url_for_start $start)#url_for_home#lens&start=$start#end
-
-#macro(url_for_filters $p)#url_for_home?#q#boostPrice#spatial#qOpts#if($p && $p.size() > 0)&#fqs($p)#end#debug#end
-
-#macro(url_for_nested_facet_query $field)#url_for_home#lens&fq=$esc.url($field)#end
-
-## TODO: convert to use {!raw f=$field}$value (with escaping of course)
-#macro(url_for_facet_filter $field $value)#url_for_home#lens&fq=#if($value!=$null)$esc.url($field):%22$esc.url($value)%22#else-$esc.url($field):[*+TO+*]#end#end
-
-#macro(url_for_facet_date_filter $field $value)#url_for_home#lens&fq=$esc.url($field):$esc.url($value)#end
-
-#macro(url_for_facet_range_filter $field $value)#url_for_home#lens&fq=$esc.url($field):$esc.url($value)#end
-
-
-#macro(link_to_previous_page $text)
-  #if($page.current_page_number > 1)
-    #set($prev_start = $page.start - $page.results_per_page)
-    <a class="prev-page" href="#url_for_start($prev_start)">$text</a>
-  #end
-#end
-
-#macro(link_to_next_page $text)
-  #if($page.current_page_number < $page.page_count)
-    #set($next_start = $page.start + $page.results_per_page)
-    <a class="next-page" href="#url_for_start($next_start)">$text</a>
-  #end
-#end
-
-#macro(link_to_page $page_number $text)
-  #if($page_number == $page.current_page_number)
-    $text
-  #else
-    #if($page_number <= $page.page_count)
-      #set($page_start = $page_number * $page.results_per_page - $page.results_per_page)
-      <a class="page" href="#url_for_start($page_start)">$text</a>
-    #end
-  #end
-#end
-
-#macro(display_facet_query $field, $display, $fieldName)
-  #if($field && $field.size() > 0)
-  <span class="facet-field">$display</span>
-    <ul>
-    #foreach ($facet in $field)
-      #if ($facet.value > 0)
-        #set($facetURL = "#url_for_nested_facet_query($facet.key)")
-        #if ($facetURL != '')
-          <li><a href="$facetURL">$facet.key</a> ($facet.value)</li>
-        #end
-      #end
-    #end
-    </ul>
-  #end      
-#end
-
-
-#macro(display_facet_range $field, $display, $fieldName, $start, $end, $gap, $before, $after)
-  <span class="facet-field">$display</span>
-    <ul>
-    #if($before && $before != "")
-      #set($value = "[* TO " + "#format_value($start)" + "}")
-      #set($facetURL = "#url_for_facet_range_filter($fieldName, $value)")
-      <li><a href="$facetURL">Less than #format_value($start)</a> ($before)</li>
-    #end
-    #foreach ($facet in $field)
-      #set($rangeEnd = "#range_get_to_value($facet.key, $gap)")
-      #set($value = "[" + $facet.key + " TO " + $rangeEnd + "}")
-      #set($facetURL = "#url_for_facet_range_filter($fieldName, $value)")
-      #if ($facetURL != '')
-        <li><a href="$facetURL">$facet.key - #format_value($rangeEnd)</a> ($facet.value)</li>
-      #end
-    #end
-    #if($end && $end != "" && $after > 0)
-      #set($value = "[" + "#format_value($end)" + " TO *}")
-      #set($facetURL = "#url_for_facet_range_filter($fieldName, $value)")
-      <li><a href="$facetURL">More than #format_value($end)</a> ($after)</li>
-    #end
-    </ul>
-#end
-
-## $pivots is a list of facet_pivot
-#macro(display_facet_pivot $pivots, $display)
-  #if($pivots && $pivots.size() > 0)
-  <span class="facet-field">$display</span>
-    <ul>
-      #foreach ($pivot in $pivots)
-        #foreach ($entry in $pivot.value)
-          <a href="#url_for_facet_filter($entry.field, $entry.value)">$entry.field::#if($entry.value!=$null)$entry.value#else<em>missing</em>#end</a> ($entry.count)
-          <ul>
-            #foreach($nest in $entry.pivot)
-              <li>
-                #if($nest.value != $null)
-                  <a href="#url_for_facet_filter($entry.field, $entry.value)&fq=$esc.url($nest.field):%22$esc.url($nest.value)%22">$nest.field::$nest.value</a>
-                #else
-                  <a href="#url_for_facet_filter($entry.field, $entry.value)&fq=-$esc.url($nest.field):[*+TO+*]">$nest.field::<em>missing</em></a>
-                #end
-                ($nest.count)
-              </li>
-            #end
-          </ul>
-        #end
-      #end
-    </ul>
-  #end
-#end
-
-#macro(field $f)
-  #if($response.response.highlighting.get($docId).get($f).get(0))
-    #set($pad = "")
-    #foreach($v in $response.response.highlighting.get($docId).get($f))
-$pad$v##
-      #set($pad = " ... ")
-    #end
-  #else
-    #foreach($v in $doc.getFieldValues($f))
-$v##
-    #end
-  #end
-#end  
-
-#macro(utc_date $theDate)
-$date.format("yyyy-MM-dd'T'HH:mm:ss'Z'",$theDate,$date.getLocale(),$date.getTimeZone().getTimeZone("UTC"))##
-#end
-
-#macro(format_value $val)
-#if(${val.class.name} == "java.util.Date")
-#utc_date($val)##
-#else
-$val##
-#end
-#end
-
-#macro(range_get_to_value $inval, $gapval)
-#if(${gapval.class.name} == "java.lang.String")
-#if($gapval.startsWith("+"))
-$inval$gapval## Typically date gaps start with +
-#else
-$inval+$gapval## If the gap does not start with a "+", we add it, such as for currency value
-#end
-#elseif(${gapval.class.name} == "java.lang.Float" || ${inval.class.name} == "java.lang.Float")
-$math.toDouble($math.add($inval,$gapval))##
-#else
-$math.add($inval,$gapval)##
-#end
-#end
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/browse.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/browse.vm
deleted file mode 100644
index 10ecaeb..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/browse.vm
+++ /dev/null
@@ -1,33 +0,0 @@
-#**
- *  Main entry point into the /browse templates
- *#
-
-#set($searcher = $request.searcher)
-#set($params = $request.params)
-#set($clusters = $response.response.clusters)
-#set($mltResults = $response.response.get("moreLikeThis"))
-#set($annotate = $params.get("annotateBrowse"))
-#parse('query_form.vm')
-#parse('did_you_mean.vm')
-
-<div class="navigators">
-  #parse("facets.vm")
-</div>
-
-<div class="pagination">
-  #parse("pagination_top.vm")
-</div>
-
-## Show Error Message, if any
-<div class="error">
-  #parse("error.vm")
-</div>
-
-## Render Results, actual matching docs
-<div class="results">
-  #parse("results_list.vm")
-</div>
-
-<div class="pagination">
-  #parse("pagination_bottom.vm")
-</div>
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/cluster.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/cluster.vm
deleted file mode 100644
index 09885f3..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/cluster.vm
+++ /dev/null
@@ -1,19 +0,0 @@
-#**
- *  Check if Clustering is Enabled and then
- *  call cluster_results.vm
- *#
-
-<h2 #annTitle("Clusters generated by Carrot2 using the /clustering RequestHandler")>
-  Clusters
-</h2>
-
-## Div tag has placeholder text by default
-<div id="clusters">
-  Run Solr with option -Dsolr.clustering.enabled=true to see clustered search results.
-</div>
-
-## Replace the div content *if* Carrot^2 is available
-<script type="text/javascript">
-  $('#clusters').load("#url_for_solr/clustering#lens",
-    {'wt':'velocity', 'v.template':"cluster_results"});
-</script>
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/cluster_results.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/cluster_results.vm
deleted file mode 100644
index 204480d..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/cluster_results.vm
+++ /dev/null
@@ -1,31 +0,0 @@
-#**
- *  Actual rendering of Clusters
- *#
-
-## For each cluster
-#foreach ($clusters in $response.response.clusters)
-
-  #set($labels = $clusters.get('labels'))
-  #set($docs = $clusters.get('docs'))
-
-  ## This Cluster's Heading
-  <h3>
-    #foreach ($label in $labels)
-      ## Keep the following line together to prevent
-      ## a space appearing before each comma
-      $label#if( $foreach.hasNext ),#end
-    #end
-  </h3>
-
-  ## This Cluster's Documents
-  <ol>
-    ## For each doc in this cluster
-    #foreach ($cluDoc in $docs)
-      <li>
-        <a href="#url_for_home?q=id:$cluDoc">
-          $cluDoc</a>
-      </li>
-    #end
-  </ol>
-
-#end   ## end for each Cluster
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/debug.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/debug.vm
deleted file mode 100644
index 8f6d232..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/debug.vm
+++ /dev/null
@@ -1,28 +0,0 @@
-#**
- *  Show Debugging Information, if enabled
- *#
-
-#if( $params.getBool("debugQuery",false) )
-  <a href="#" onclick='jQuery(this).siblings("pre").toggle(); return false;'>
-    toggle explain</a>
-
-  <pre style="display:none">
-    $response.getExplainMap().get($doc.getFirstValue('id'))
-  </pre>
-
-  <a href="#" onclick='jQuery(this).siblings("pre2").toggle(); return false;'>
-    toggle all fields</a>
-
-  <pre2 style="display:none">
-    #foreach($fieldname in $doc.fieldNames)
-      <br>
-        <span class="field-name">$fieldname :</span>
-        <span>
-          #foreach($value in $doc.getFieldValues($fieldname))
-            $esc.html($value)
-          #end
-        </span>
-      </br>
-    #end
-  </pre2>
-#end
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/did_you_mean.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/did_you_mean.vm
deleted file mode 100644
index d0eb4f8..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/did_you_mean.vm
+++ /dev/null
@@ -1,11 +0,0 @@
-#**
- *  Hyperlinked spelling suggestions in results list
- *#
-
-#set($collations = $response.response.spellcheck.collations.getAll("collation"))
-#if($collations && $collations.size() > 0)
-  Did you mean
-  #foreach($collation in $collations)
-    <a href="#{url_for_home}#{lensNoQ}&q=$esc.url($collation.collationQuery)">$esc.html($collation.collationQuery)</a> ($collation.hits)?
-  #end
-#end
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/error.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/error.vm
deleted file mode 100644
index 80b5819..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/error.vm
+++ /dev/null
@@ -1,11 +0,0 @@
-#**
- *  Show Error Message, if any
- *#
-
-## Show Error Message, if any
-## Usually rendered inside div class=error
-
-#if( $response.response.error.code )
-  <h1>ERROR $response.response.error.code</h1>
-  $response.response.error.msg
-#end
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facet_fields.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facet_fields.vm
deleted file mode 100644
index 2926817..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facet_fields.vm
+++ /dev/null
@@ -1,24 +0,0 @@
-#**
- *  Display facets based on field values
- *  e.g.: fields specified by &facet.field=
- *#
-
-#if($response.facetFields)
-  <h2 #annTitle("Facets generated by adding &facet.field= to the request")>
-    Field Facets
-  </h2>
-  #foreach($field in $response.facetFields)
-    ## Hide facets without value
-    #if($field.values.size() > 0)
-      <span class="facet-field">$field.name</span>
-      <ul>
-        #foreach($facet in $field.values)
-          <li>
-            <a href="#url_for_facet_filter($field.name, $facet.name)" title="$esc.html($facet.name)">
-              #if($facet.name!=$null)$esc.html($display.truncate($facet.name,20))#else<em>missing</em>#end</a> ($facet.count)
-          </li>
-        #end
-      </ul>
-    #end  ## end if > 0
-  #end    ## end for each facet field
-#end      ## end if response has facet fields
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facet_pivot.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facet_pivot.vm
deleted file mode 100644
index 7aa50da..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facet_pivot.vm
+++ /dev/null
@@ -1,12 +0,0 @@
-#**
- *  Display Pivot-Based Facets
- *  e.g.: facets specified by &facet.pivot=
- *#
-
-<h2 #annTitle("Facets generated by adding &facet.pivot= to the request")>
-  Pivot Facets
-</h2>
-
-#set($pivot = $response.response.facet_counts.facet_pivot)
-
-#display_facet_pivot($pivot, "")
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facet_queries.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facet_queries.vm
deleted file mode 100644
index 37489c7..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facet_queries.vm
+++ /dev/null
@@ -1,12 +0,0 @@
-#**
- *  Display facets based on specific facet queries
- *  e.g.: facets specified by &facet.query=
- *#
-
-#set($field = $response.response.facet_counts.facet_queries)
-
-<h2 #annTitle("Facets generated by adding &facet.query= to the request")>
-  Query Facets
-</h2>
-
-#display_facet_query($field, "", "")
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facet_ranges.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facet_ranges.vm
deleted file mode 100644
index a769415..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facet_ranges.vm
+++ /dev/null
@@ -1,23 +0,0 @@
-#**
- *  Display facets based on ranges of values, AKA "Bukets"
- *  e.g.: ranges specified by &facet.range=
- *#
-
-<h2 #annTitle("Facets generated by adding &facet.range= to the request")>
-  Range Facets
-</h2>
-
-#foreach ($field in $response.response.facet_counts.facet_ranges)
-  ## Hide facets without value
-  #if($field.value.counts.size() > 0)
-  #set($name = $field.key)
-  #set($display = $name)
-  #set($f = $field.value.counts)
-  #set($start = $field.value.start)
-  #set($end = $field.value.end)
-  #set($gap = $field.value.gap)
-  #set($before = $field.value.before)
-  #set($after = $field.value.after)
-  #display_facet_range($f, $display, $name, $start, $end, $gap, $before, $after)
-  #end  ## end if has any values
-#end    ## end for each facet range
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facets.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facets.vm
deleted file mode 100644
index 55d40c9..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/facets.vm
+++ /dev/null
@@ -1,10 +0,0 @@
-#**
- *  Overall Facet display block
- *  Invokes the 4 facet and 1 cluster template
- *#
-
-#parse('facet_fields.vm')
-#parse('facet_queries.vm')
-#parse('facet_ranges.vm')
-#parse('facet_pivot.vm')
-#parse('cluster.vm')
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/footer.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/footer.vm
deleted file mode 100644
index e7430d3..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/footer.vm
+++ /dev/null
@@ -1,43 +0,0 @@
-#**
- *  Render the bottom section of the page visible to users
- *#
-
-<hr/>
-<div>
-  <span>Options:</span>
-
-  #if($request.params.get('debugQuery'))
-    <a href="#url_for_home?#q#if($request.params.getParams('fq').size() > 0)&#fqs($request.params.getParams('fq'))#end">
-      disable debug</a>
-  #else
-    <a href="#url_for_lens&debugQuery=true&fl=*,score">
-      enable debug</a>
-  #end
-  -
-  #if($annotate)
-    <a href="#url_for_home?#q#if($request.params.getParams('fq').size() > 0)&#fqs($request.params.getParams('fq'))#end#boostPrice">
-      disable annotation</a>
-  #else
-    <a href="#url_for_lens&annotateBrowse=true">
-      enable annotation</a>
-  #end
-  -
-  <a #annTitle("Click to switch to an XML response: &wt=xml") href="#url_for_lens&wt=xml#if($request.params.get('debugQuery'))&debugQuery=true#end">
-    XML results</a>
-
-</div>
-
-<div>
-  Generated by <a href="http://wiki.apache.org/solr/VelocityResponseWriter">VelocityResponseWriter</a>
-</div>
-<div>
-  <span>Documentation: </span>
-  <a href="http://lucene.apache.org/solr">Solr Home Page</a>, <a href="http://wiki.apache.org/solr">
-    Solr Wiki</a>
-  </div>
-<div>
-  Disclaimer:
-  The locations displayed in this demonstration are purely fictional.
-  It is more than likely that no store with the items listed actually
-  exists at that location!
-</div>
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/head.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/head.vm
deleted file mode 100644
index d158113..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/head.vm
+++ /dev/null
@@ -1,37 +0,0 @@
-#**
- *  Provide elements for the <head> section of the HTML document
- *#
-
-  ## An example of using an arbitrary request parameter
-  <title>#param('title')</title>
-  <meta http-equiv="content-type" content="text/html; charset=UTF-8"/>
-
-  <script type="text/javascript" src="#{url_root}/libs/jquery-3.4.1.min.js"></script>
-  <link rel="stylesheet" type="text/css" href="#{url_for_solr}/admin/file?file=/velocity/main.css&contentType=text/css"/>
-  <link rel="stylesheet" href="#{url_for_solr}/admin/file?file=/velocity/jquery.autocomplete.css&contentType=text/css" type="text/css" />
-  <link rel="icon" type="image/x-icon" href="#{url_root}/img/favicon.ico"/>
-  <link rel="shortcut icon" type="image/x-icon" href="#{url_root}/img/favicon.ico"/>
-  <script type="text/javascript" src="#{url_for_solr}/admin/file?file=/velocity/jquery.autocomplete.js&contentType=text/javascript"></script>
-
-
-    <script>
-    $(document).ready(function(){
-      $("\#q").autocomplete('#{url_for_solr}/terms', {  ## backslash escaped #q as that is a macro defined in VM_global_library.vm
-           extraParams:{
-             'terms.prefix': function() { return $("\#q").val();},
-             'terms.sort': 'count',
-             'terms.fl': 'name',
-             'wt': 'velocity',
-             'v.template': 'suggest'
-           }
-         }
-      ).keydown(function(e){
-        if (e.keyCode === 13){
-          $("#query-form").trigger('submit');
-        }
-      });
-
-      // http://localhost:8983/solr/collection1/terms?terms.fl=name&terms.prefix=i&terms.sort=count&wt=velocity&v.template=suggest
-    });
-
-    </script>
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/header.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/header.vm
deleted file mode 100644
index a408451..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/header.vm
+++ /dev/null
@@ -1,7 +0,0 @@
-#**
- *  Render the top section of the page visible to users
- *#
-
-<div id="head">
-  <span ><a href="#url_for_home#if($request.params.get('debugQuery'))?debugQuery=true#end"><img src="#{url_root}/img/solr.svg" id="logo"/></a></span>
-</div>
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/hit.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/hit.vm
deleted file mode 100644
index a9c11f4..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/hit.vm
+++ /dev/null
@@ -1,25 +0,0 @@
-#**
- *  Called for each matching document but then
- *  calls one of product_doc, join_doc or richtext_doc
- *  depending on which fields the doc has
- *#
-
-#set($docId = $doc.getFieldValue('id'))
-
-<div class="result-document">
-
-  ## Has a "name" field ?
-  #if($doc.getFieldValue('name'))
-    #parse("product_doc.vm")
-
-  ## Has a "compName_s" field ?
-  #elseif($doc.getFieldValue('compName_s'))
-    #parse("join_doc.vm")
-
-  ## Fallback to richtext_doc
-  #else
-    #parse("richtext_doc.vm")
-
-  #end
-
-</div>
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/hit_grouped.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/hit_grouped.vm
deleted file mode 100644
index 5297f1e..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/hit_grouped.vm
+++ /dev/null
@@ -1,43 +0,0 @@
-#**
- *  Display grouped results
- *#
-
-<div class="result-document">
-
-  <div class="result-title">
-    <b>$grouping.key</b>
-  </div>
-
-  <div>
-    Total Matches in Group: $grouping.value.matches
-  </div>
-
-  <div>  ## list of groups
-
-    #foreach ($group in $grouping.value.groups)
-      <div class="group-value">
-        #if($group.groupValue)$group.groupValue#{else}<i>No group</i>#end
-        <span #annTitle("The count of the number of documents in this group")>
-          ($group.doclist.numFound)
-        </span>
-      </div>
-
-      <div class="group-doclist"
-        #annTitle("Contains the top scoring documents in the group")
-      >
-        #foreach ($doc in $group.doclist)
-          #set($docId = $doc.getFieldValue('id'))
-          #if($doc.getFieldValue('name'))
-            #parse("product_doc.vm")
-          #elseif($doc.getFieldValue('compName_s'))
-            #parse("join_doc.vm")
-          #else
-            #parse("richtext_doc.vm")
-          #end
-        #end
-      </div>
-
-    #end  ## end of foreach group in grouping.value.groups
-  </div>  ## div tag for entire list of groups
-
-</div>  ## end of div class=result-document
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/hit_plain.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/hit_plain.vm
deleted file mode 100644
index 193439b..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/hit_plain.vm
+++ /dev/null
@@ -1,25 +0,0 @@
-#**
- *  An extremely plain / debug version of hit.vm
- *#
-
-<table>
-  ## For each field
-  #foreach( $fieldName in $doc.fieldNames )
-    ## For each value
-    #foreach( $value in $doc.getFieldValues($fieldName) )
-      <tr>
-        ## Field Name
-        <th align="right" valign="top">
-          #if( $foreach.count == 1 )
-            $fieldName:
-          #end
-        </th>
-        ## Field Value(s)
-        <td align="left" valign="top">
-          $esc.html($value) <br/>
-        </td>
-      </tr>
-    #end     ## end for each value
-  #end       ## end for each field
-</table>
-<hr/>
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/join_doc.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/join_doc.vm
deleted file mode 100644
index 9956012..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/join_doc.vm
+++ /dev/null
@@ -1,20 +0,0 @@
-#**
- *  Display documents that are joined to other documents
- *#
-
-<div class="result-title">
-  <b>#field('compName_s')</b>
-</div>
-
-<div>
-  Id: #field('id')
-  (company-details document for
-    <a href="http://wiki.apache.org/solr/Join" target="_new">join</a>
-  )
-</div>
-
-<div>
-  Address: #field('address_s')
-</div>
-
-#parse('debug.vm')
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/jquery.autocomplete.css b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/jquery.autocomplete.css
deleted file mode 100644
index 97a62e0..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/jquery.autocomplete.css
+++ /dev/null
@@ -1,48 +0,0 @@
-.ac_results {
-  padding: 0px;
-  border: 1px solid black;
-  background-color: white;
-  overflow: hidden;
-  z-index: 99999;
-}
-
-.ac_results ul {
-  width: 100%;
-  list-style-position: outside;
-  list-style: none;
-  padding: 0;
-  margin: 0;
-}
-
-.ac_results li {
-  margin: 0px;
-  padding: 2px 5px;
-  cursor: default;
-  display: block;
-  /* 
-  if width will be 100% horizontal scrollbar will apear 
-  when scroll mode will be used
-  */
-  /*width: 100%;*/
-  font: menu;
-  font-size: 12px;
-  /* 
-  it is very important, if line-height not setted or setted 
-  in relative units scroll will be broken in firefox
-  */
-  line-height: 16px;
-  overflow: hidden;
-}
-
-.ac_loading {
-  background: white url('indicator.gif') right center no-repeat;
-}
-
-.ac_odd {
-  background-color: #eee;
-}
-
-.ac_over {
-  background-color: #0A246A;
-  color: white;
-}
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/jquery.autocomplete.js b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/jquery.autocomplete.js
deleted file mode 100644
index 442f5a0..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/jquery.autocomplete.js
+++ /dev/null
@@ -1,763 +0,0 @@
-/*
- * Autocomplete - jQuery plugin 1.1pre
- *
- * Copyright (c) 2007 Dylan Verheul, Dan G. Switzer, Anjesh Tuladhar, Jörn Zaefferer
- *
- * Dual licensed under the MIT and GPL licenses:
- *   http://www.opensource.org/licenses/mit-license.php
- *   http://www.gnu.org/licenses/gpl.html
- *
- * Revision: Id: jquery.autocomplete.js 5785 2008-07-12 10:37:33Z joern.zaefferer $
- *
- */
-
-;(function($) {
-  
-$.fn.extend({
-  autocomplete: function(urlOrData, options) {
-    var isUrl = typeof urlOrData == "string";
-    options = $.extend({}, $.Autocompleter.defaults, {
-      url: isUrl ? urlOrData : null,
-      data: isUrl ? null : urlOrData,
-      delay: isUrl ? $.Autocompleter.defaults.delay : 10,
-      max: options && !options.scroll ? 10 : 150
-    }, options);
-    
-    // if highlight is set to false, replace it with a do-nothing function
-    options.highlight = options.highlight || function(value) { return value; };
-    
-    // if the formatMatch option is not specified, then use formatItem for backwards compatibility
-    options.formatMatch = options.formatMatch || options.formatItem;
-    
-    return this.each(function() {
-      new $.Autocompleter(this, options);
-    });
-  },
-  result: function(handler) {
-    return this.bind("result", handler);
-  },
-  search: function(handler) {
-    return this.trigger("search", [handler]);
-  },
-  flushCache: function() {
-    return this.trigger("flushCache");
-  },
-  setOptions: function(options){
-    return this.trigger("setOptions", [options]);
-  },
-  unautocomplete: function() {
-    return this.trigger("unautocomplete");
-  }
-});
-
-$.Autocompleter = function(input, options) {
-
-  var KEY = {
-    UP: 38,
-    DOWN: 40,
-    DEL: 46,
-    TAB: 9,
-    RETURN: 13,
-    ESC: 27,
-    COMMA: 188,
-    PAGEUP: 33,
-    PAGEDOWN: 34,
-    BACKSPACE: 8
-  };
-
-  // Create $ object for input element
-  var $input = $(input).attr("autocomplete", "off").addClass(options.inputClass);
-
-  var timeout;
-  var previousValue = "";
-  var cache = $.Autocompleter.Cache(options);
-  var hasFocus = 0;
-  var lastKeyPressCode;
-  var config = {
-    mouseDownOnSelect: false
-  };
-  var select = $.Autocompleter.Select(options, input, selectCurrent, config);
-  
-  var blockSubmit;
-  
-  // prevent form submit in opera when selecting with return key
-  $.browser.opera && $(input.form).bind("submit.autocomplete", function() {
-    if (blockSubmit) {
-      blockSubmit = false;
-      return false;
-    }
-  });
-  
-  // only opera doesn't trigger keydown multiple times while pressed, others don't work with keypress at all
-  $input.bind(($.browser.opera ? "keypress" : "keydown") + ".autocomplete", function(event) {
-    // track last key pressed
-    lastKeyPressCode = event.keyCode;
-    switch(event.keyCode) {
-    
-      case KEY.UP:
-        event.preventDefault();
-        if ( select.visible() ) {
-          select.prev();
-        } else {
-          onChange(0, true);
-        }
-        break;
-        
-      case KEY.DOWN:
-        event.preventDefault();
-        if ( select.visible() ) {
-          select.next();
-        } else {
-          onChange(0, true);
-        }
-        break;
-        
-      case KEY.PAGEUP:
-        event.preventDefault();
-        if ( select.visible() ) {
-          select.pageUp();
-        } else {
-          onChange(0, true);
-        }
-        break;
-        
-      case KEY.PAGEDOWN:
-        event.preventDefault();
-        if ( select.visible() ) {
-          select.pageDown();
-        } else {
-          onChange(0, true);
-        }
-        break;
-      
-      // matches also semicolon
-      case options.multiple && $.trim(options.multipleSeparator) == "," && KEY.COMMA:
-      case KEY.TAB:
-      case KEY.RETURN:
-        if( selectCurrent() ) {
-          // stop default to prevent a form submit, Opera needs special handling
-          event.preventDefault();
-          blockSubmit = true;
-          return false;
-        }
-        break;
-        
-      case KEY.ESC:
-        select.hide();
-        break;
-        
-      default:
-        clearTimeout(timeout);
-        timeout = setTimeout(onChange, options.delay);
-        break;
-    }
-  }).focus(function(){
-    // track whether the field has focus, we shouldn't process any
-    // results if the field no longer has focus
-    hasFocus++;
-  }).blur(function() {
-    hasFocus = 0;
-    if (!config.mouseDownOnSelect) {
-      hideResults();
-    }
-  }).click(function() {
-    // show select when clicking in a focused field
-    if ( hasFocus++ > 1 && !select.visible() ) {
-      onChange(0, true);
-    }
-  }).bind("search", function() {
-    // TODO why not just specifying both arguments?
-    var fn = (arguments.length > 1) ? arguments[1] : null;
-    function findValueCallback(q, data) {
-      var result;
-      if( data && data.length ) {
-        for (var i=0; i < data.length; i++) {
-          if( data[i].result.toLowerCase() == q.toLowerCase() ) {
-            result = data[i];
-            break;
-          }
-        }
-      }
-      if( typeof fn == "function" ) fn(result);
-      else $input.trigger("result", result && [result.data, result.value]);
-    }
-    $.each(trimWords($input.val()), function(i, value) {
-      request(value, findValueCallback, findValueCallback);
-    });
-  }).bind("flushCache", function() {
-    cache.flush();
-  }).bind("setOptions", function() {
-    $.extend(options, arguments[1]);
-    // if we've updated the data, repopulate
-    if ( "data" in arguments[1] )
-      cache.populate();
-  }).bind("unautocomplete", function() {
-    select.unbind();
-    $input.unbind();
-    $(input.form).unbind(".autocomplete");
-  });
-  
-  
-  function selectCurrent() {
-    var selected = select.selected();
-    if( !selected )
-      return false;
-    
-    var v = selected.result;
-    previousValue = v;
-    
-    if ( options.multiple ) {
-      var words = trimWords($input.val());
-      if ( words.length > 1 ) {
-        v = words.slice(0, words.length - 1).join( options.multipleSeparator ) + options.multipleSeparator + v;
-      }
-      v += options.multipleSeparator;
-    }
-    
-    $input.val(v);
-    hideResultsNow();
-    $input.trigger("result", [selected.data, selected.value]);
-    return true;
-  }
-  
-  function onChange(crap, skipPrevCheck) {
-    if( lastKeyPressCode == KEY.DEL ) {
-      select.hide();
-      return;
-    }
-    
-    var currentValue = $input.val();
-    
-    if ( !skipPrevCheck && currentValue == previousValue )
-      return;
-    
-    previousValue = currentValue;
-    
-    currentValue = lastWord(currentValue);
-    if ( currentValue.length >= options.minChars) {
-      $input.addClass(options.loadingClass);
-      if (!options.matchCase)
-        currentValue = currentValue.toLowerCase();
-      request(currentValue, receiveData, hideResultsNow);
-    } else {
-      stopLoading();
-      select.hide();
-    }
-  };
-  
-  function trimWords(value) {
-    if ( !value ) {
-      return [""];
-    }
-    var words = value.split( options.multipleSeparator );
-    var result = [];
-    $.each(words, function(i, value) {
-      if ( $.trim(value) )
-        result[i] = $.trim(value);
-    });
-    return result;
-  }
-  
-  function lastWord(value) {
-    if ( !options.multiple )
-      return value;
-    var words = trimWords(value);
-    return words[words.length - 1];
-  }
-  
-  // fills in the input box w/the first match (assumed to be the best match)
-  // q: the term entered
-  // sValue: the first matching result
-  function autoFill(q, sValue){
-    // autofill in the complete box w/the first match as long as the user hasn't entered in more data
-    // if the last user key pressed was backspace, don't autofill
-    if( options.autoFill && (lastWord($input.val()).toLowerCase() == q.toLowerCase()) && lastKeyPressCode != KEY.BACKSPACE ) {
-      // fill in the value (keep the case the user has typed)
-      $input.val($input.val() + sValue.substring(lastWord(previousValue).length));
-      // select the portion of the value not typed by the user (so the next character will erase)
-      $.Autocompleter.Selection(input, previousValue.length, previousValue.length + sValue.length);
-    }
-  };
-
-  function hideResults() {
-    clearTimeout(timeout);
-    timeout = setTimeout(hideResultsNow, 200);
-  };
-
-  function hideResultsNow() {
-    var wasVisible = select.visible();
-    select.hide();
-    clearTimeout(timeout);
-    stopLoading();
-    if (options.mustMatch) {
-      // call search and run callback
-      $input.search(
-        function (result){
-          // if no value found, clear the input box
-          if( !result ) {
-            if (options.multiple) {
-              var words = trimWords($input.val()).slice(0, -1);
-              $input.val( words.join(options.multipleSeparator) + (words.length ? options.multipleSeparator : "") );
-            }
-            else
-              $input.val( "" );
-          }
-        }
-      );
-    }
-    if (wasVisible)
-      // position cursor at end of input field
-      $.Autocompleter.Selection(input, input.value.length, input.value.length);
-  };
-
-  function receiveData(q, data) {
-    if ( data && data.length && hasFocus ) {
-      stopLoading();
-      select.display(data, q);
-      autoFill(q, data[0].value);
-      select.show();
-    } else {
-      hideResultsNow();
-    }
-  };
-
-  function request(term, success, failure) {
-    if (!options.matchCase)
-      term = term.toLowerCase();
-    var data = cache.load(term);
-    data = null; // Avoid buggy cache and go to Solr every time 
-    // recieve the cached data
-    if (data && data.length) {
-      success(term, data);
-    // if an AJAX url has been supplied, try loading the data now
-    } else if( (typeof options.url == "string") && (options.url.length > 0) ){
-      
-      var extraParams = {
-        timestamp: +new Date()
-      };
-      $.each(options.extraParams, function(key, param) {
-        extraParams[key] = typeof param == "function" ? param() : param;
-      });
-      
-      $.ajax({
-        // try to leverage ajaxQueue plugin to abort previous requests
-        mode: "abort",
-        // limit abortion to this input
-        port: "autocomplete" + input.name,
-        dataType: options.dataType,
-        url: options.url,
-        data: $.extend({
-          q: lastWord(term),
-          limit: options.max
-        }, extraParams),
-        success: function(data) {
-          var parsed = options.parse && options.parse(data) || parse(data);
-          cache.add(term, parsed);
-          success(term, parsed);
-        }
-      });
-    } else {
-      // if we have a failure, we need to empty the list -- this prevents the the [TAB] key from selecting the last successful match
-      select.emptyList();
-      failure(term);
-    }
-  };
-  
-  function parse(data) {
-    var parsed = [];
-    var rows = data.split("\n");
-    for (var i=0; i < rows.length; i++) {
-      var row = $.trim(rows[i]);
-      if (row) {
-        row = row.split("|");
-        parsed[parsed.length] = {
-          data: row,
-          value: row[0],
-          result: options.formatResult && options.formatResult(row, row[0]) || row[0]
-        };
-      }
-    }
-    return parsed;
-  };
-
-  function stopLoading() {
-    $input.removeClass(options.loadingClass);
-  };
-
-};
-
-$.Autocompleter.defaults = {
-  inputClass: "ac_input",
-  resultsClass: "ac_results",
-  loadingClass: "ac_loading",
-  minChars: 1,
-  delay: 400,
-  matchCase: false,
-  matchSubset: true,
-  matchContains: false,
-  cacheLength: 10,
-  max: 100,
-  mustMatch: false,
-  extraParams: {},
-  selectFirst: false,
-  formatItem: function(row) { return row[0]; },
-  formatMatch: null,
-  autoFill: false,
-  width: 0,
-  multiple: false,
-  multipleSeparator: ", ",
-  highlight: function(value, term) {
-    return value.replace(new RegExp("(?![^&;]+;)(?!<[^<>]*)(" + term.replace(/([\^\$\(\)\[\]\{\}\*\.\+\?\|\\])/gi, "\\$1") + ")(?![^<>]*>)(?![^&;]+;)", "gi"), "<strong>$1</strong>");
-  },
-    scroll: true,
-    scrollHeight: 180
-};
-
-$.Autocompleter.Cache = function(options) {
-
-  var data = {};
-  var length = 0;
-  
-  function matchSubset(s, sub) {
-    if (!options.matchCase) 
-      s = s.toLowerCase();
-    var i = s.indexOf(sub);
-    if (options.matchContains == "word"){
-      i = s.toLowerCase().search("\\b" + sub.toLowerCase());
-    }
-    if (i == -1) return false;
-    return i == 0 || options.matchContains;
-  };
-  
-  function add(q, value) {
-    if (length > options.cacheLength){
-      flush();
-    }
-    if (!data[q]){ 
-      length++;
-    }
-    data[q] = value;
-  }
-  
-  function populate(){
-    if( !options.data ) return false;
-    // track the matches
-    var stMatchSets = {},
-      nullData = 0;
-
-    // no url was specified, we need to adjust the cache length to make sure it fits the local data store
-    if( !options.url ) options.cacheLength = 1;
-    
-    // track all options for minChars = 0
-    stMatchSets[""] = [];
-    
-    // loop through the array and create a lookup structure
-    for ( var i = 0, ol = options.data.length; i < ol; i++ ) {
-      var rawValue = options.data[i];
-      // if rawValue is a string, make an array otherwise just reference the array
-      rawValue = (typeof rawValue == "string") ? [rawValue] : rawValue;
-      
-      var value = options.formatMatch(rawValue, i+1, options.data.length);
-      if ( value === false )
-        continue;
-        
-      var firstChar = value.charAt(0).toLowerCase();
-      // if no lookup array for this character exists, look it up now
-      if( !stMatchSets[firstChar] ) 
-        stMatchSets[firstChar] = [];
-
-      // if the match is a string
-      var row = {
-        value: value,
-        data: rawValue,
-        result: options.formatResult && options.formatResult(rawValue) || value
-      };
-      
-      // push the current match into the set list
-      stMatchSets[firstChar].push(row);
-
-      // keep track of minChars zero items
-      if ( nullData++ < options.max ) {
-        stMatchSets[""].push(row);
-      }
-    };
-
-    // add the data items to the cache
-    $.each(stMatchSets, function(i, value) {
-      // increase the cache size
-      options.cacheLength++;
-      // add to the cache
-      add(i, value);
-    });
-  }
-  
-  // populate any existing data
-  setTimeout(populate, 25);
-  
-  function flush(){
-    data = {};
-    length = 0;
-  }
-  
-  return {
-    flush: flush,
-    add: add,
-    populate: populate,
-    load: function(q) {
-      if (!options.cacheLength || !length)
-        return null;
-      /* 
-       * if dealing w/local data and matchContains than we must make sure
-       * to loop through all the data collections looking for matches
-       */
-      if( !options.url && options.matchContains ){
-        // track all matches
-        var csub = [];
-        // loop through all the data grids for matches
-        for( var k in data ){
-          // don't search through the stMatchSets[""] (minChars: 0) cache
-          // this prevents duplicates
-          if( k.length > 0 ){
-            var c = data[k];
-            $.each(c, function(i, x) {
-              // if we've got a match, add it to the array
-              if (matchSubset(x.value, q)) {
-                csub.push(x);
-              }
-            });
-          }
-        }        
-        return csub;
-      } else 
-      // if the exact item exists, use it
-      if (data[q]){
-        return data[q];
-      } else
-      if (options.matchSubset) {
-        for (var i = q.length - 1; i >= options.minChars; i--) {
-          var c = data[q.substr(0, i)];
-          if (c) {
-            var csub = [];
-            $.each(c, function(i, x) {
-              if (matchSubset(x.value, q)) {
-                csub[csub.length] = x;
-              }
-            });
-            return csub;
-          }
-        }
-      }
-      return null;
-    }
-  };
-};
-
-$.Autocompleter.Select = function (options, input, select, config) {
-  var CLASSES = {
-    ACTIVE: "ac_over"
-  };
-  
-  var listItems,
-    active = -1,
-    data,
-    term = "",
-    needsInit = true,
-    element,
-    list;
-  
-  // Create results
-  function init() {
-    if (!needsInit)
-      return;
-    element = $("<div/>")
-    .hide()
-    .addClass(options.resultsClass)
-    .css("position", "absolute")
-    .appendTo(document.body);
-  
-    list = $("<ul/>").appendTo(element).mouseover( function(event) {
-      if(target(event).nodeName && target(event).nodeName.toUpperCase() == 'LI') {
-              active = $("li", list).removeClass(CLASSES.ACTIVE).index(target(event));
-          $(target(event)).addClass(CLASSES.ACTIVE);            
-          }
-    }).click(function(event) {
-      $(target(event)).addClass(CLASSES.ACTIVE);
-      select();
-      // TODO provide option to avoid setting focus again after selection? useful for cleanup-on-focus
-      input.focus();
-      return false;
-    }).mousedown(function() {
-      config.mouseDownOnSelect = true;
-    }).mouseup(function() {
-      config.mouseDownOnSelect = false;
-    });
-    
-    if( options.width > 0 )
-      element.css("width", options.width);
-      
-    needsInit = false;
-  } 
-  
-  function target(event) {
-    var element = event.target;
-    while(element && element.tagName != "LI")
-      element = element.parentNode;
-    // more fun with IE, sometimes event.target is empty, just ignore it then
-    if(!element)
-      return [];
-    return element;
-  }
-
-  function moveSelect(step) {
-    listItems.slice(active, active + 1).removeClass(CLASSES.ACTIVE);
-    movePosition(step);
-        var activeItem = listItems.slice(active, active + 1).addClass(CLASSES.ACTIVE);
-        if(options.scroll) {
-            var offset = 0;
-            listItems.slice(0, active).each(function() {
-        offset += this.offsetHeight;
-      });
-            if((offset + activeItem[0].offsetHeight - list.scrollTop()) > list[0].clientHeight) {
-                list.scrollTop(offset + activeItem[0].offsetHeight - list.innerHeight());
-            } else if(offset < list.scrollTop()) {
-                list.scrollTop(offset);
-            }
-        }
-  };
-  
-  function movePosition(step) {
-    active += step;
-    if (active < 0) {
-      active = listItems.size() - 1;
-    } else if (active >= listItems.size()) {
-      active = 0;
-    }
-  }
-  
-  function limitNumberOfItems(available) {
-    return options.max && options.max < available
-      ? options.max
-      : available;
-  }
-  
-  function fillList() {
-    list.empty();
-    var max = limitNumberOfItems(data.length);
-    for (var i=0; i < max; i++) {
-      if (!data[i])
-        continue;
-      var formatted = options.formatItem(data[i].data, i+1, max, data[i].value, term);
-      if ( formatted === false )
-        continue;
-      var li = $("<li/>").html( options.highlight(formatted, term) ).addClass(i%2 == 0 ? "ac_even" : "ac_odd").appendTo(list)[0];
-      $.data(li, "ac_data", data[i]);
-    }
-    listItems = list.find("li");
-    if ( options.selectFirst ) {
-      listItems.slice(0, 1).addClass(CLASSES.ACTIVE);
-      active = 0;
-    }
-    // apply bgiframe if available
-    if ( $.fn.bgiframe )
-      list.bgiframe();
-  }
-  
-  return {
-    display: function(d, q) {
-      init();
-      data = d;
-      term = q;
-      fillList();
-    },
-    next: function() {
-      moveSelect(1);
-    },
-    prev: function() {
-      moveSelect(-1);
-    },
-    pageUp: function() {
-      if (active != 0 && active - 8 < 0) {
-        moveSelect( -active );
-      } else {
-        moveSelect(-8);
-      }
-    },
-    pageDown: function() {
-      if (active != listItems.size() - 1 && active + 8 > listItems.size()) {
-        moveSelect( listItems.size() - 1 - active );
-      } else {
-        moveSelect(8);
-      }
-    },
-    hide: function() {
-      element && element.hide();
-      listItems && listItems.removeClass(CLASSES.ACTIVE);
-      active = -1;
-    },
-    visible : function() {
-      return element && element.is(":visible");
-    },
-    current: function() {
-      return this.visible() && (listItems.filter("." + CLASSES.ACTIVE)[0] || options.selectFirst && listItems[0]);
-    },
-    show: function() {
-      var offset = $(input).offset();
-      element.css({
-        width: typeof options.width == "string" || options.width > 0 ? options.width : $(input).width(),
-        top: offset.top + input.offsetHeight,
-        left: offset.left
-      }).show();
-            if(options.scroll) {
-                list.scrollTop(0);
-                list.css({
-          maxHeight: options.scrollHeight,
-          overflow: 'auto'
-        });
-        
-                if($.browser.msie && typeof document.body.style.maxHeight === "undefined") {
-          var listHeight = 0;
-          listItems.each(function() {
-            listHeight += this.offsetHeight;
-          });
-          var scrollbarsVisible = listHeight > options.scrollHeight;
-                    list.css('height', scrollbarsVisible ? options.scrollHeight : listHeight );
-          if (!scrollbarsVisible) {
-            // IE doesn't recalculate width when scrollbar disappears
-            listItems.width( list.width() - parseInt(listItems.css("padding-left")) - parseInt(listItems.css("padding-right")) );
-          }
-                }
-                
-            }
-    },
-    selected: function() {
-      var selected = listItems && listItems.filter("." + CLASSES.ACTIVE).removeClass(CLASSES.ACTIVE);
-      return selected && selected.length && $.data(selected[0], "ac_data");
-    },
-    emptyList: function (){
-      list && list.empty();
-    },
-    unbind: function() {
-      element && element.remove();
-    }
-  };
-};
-
-$.Autocompleter.Selection = function(field, start, end) {
-  if( field.createTextRange ){
-    var selRange = field.createTextRange();
-    selRange.collapse(true);
-    selRange.moveStart("character", start);
-    selRange.moveEnd("character", end);
-    selRange.select();
-  } else if( field.setSelectionRange ){
-    field.setSelectionRange(start, end);
-  } else {
-    if( field.selectionStart ){
-      field.selectionStart = start;
-      field.selectionEnd = end;
-    }
-  }
-  field.focus();
-};
-
-})(jQuery);
\ No newline at end of file
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/layout.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/layout.vm
deleted file mode 100644
index 50f4c1b..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/layout.vm
+++ /dev/null
@@ -1,24 +0,0 @@
-#**
- *  Overall HTML page layout
- *#
-
-<html>
-<head>
-  #parse("head.vm")
-</head>
-  <body>
-    <div id="admin"><a href="#url_root/#/#core_name">Solr Admin</a></div>
-    <div id="header">
-      #parse("header.vm")
-    </div>
-    <div id="tabs">
-      #parse("tabs.vm")
-    </div>
-    <div id="content">
-      $content
-    </div>
-    <div id="footer">
-      #parse("footer.vm")
-    </div>
-  </body>
-</html>
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/main.css b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/main.css
deleted file mode 100644
index 67278fb..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/main.css
+++ /dev/null
@@ -1,231 +0,0 @@
-#admin{
-  text-align: right;
-  vertical-align: top; 
-}
-
-#head{
-  width: 100%;
-}
-.array-field {
-  border: 2px solid #474747;
-  background: #FFE9D8;
-  padding: 5px;
-  margin: 5px;
-}
-
-.array-field-list li {
-  list-style: circle;
-  margin-left: 20px;
-}
-
-.parsed_query_header {
-  font-family: Helvetica, Arial, sans-serif;
-  font-size: 10pt;
-  font-weight: bold;
-}
-
-.parsed_query {
-  font-family: Courier, Courier New, monospaced;
-  font-size: 10pt;
-  font-weight: normal;
-}
-
-body {
-  font-family: Helvetica, Arial, sans-serif;
-  font-size: 10pt;
-}
-
-a {
-  color: #43a4b1;
-}
-
-.navigators {
-  float: left;
-  margin: 5px;
-  margin-top: 0px;
-  width: 185px;
-  padding: 5px;
-  top: -20px;
-  position: relative;  
-}
-
-.tabs-bar {
-  padding: 5px;
-  width: 100%;
-  border: 1px solid;
-  border-width: 0px 0px 1px 0px;
-}
-.tab {
-  font-weight: bold;
-  padding: 5px;
-  margin: 0px 5px;
-  border: 1px solid;
-  background-color: #dddddd;
-  border-top-left-radius: 4px;
-  border-top-right-radius: 4px;
-}
-.tab:hover {
-  background: #FEC293;
-}
-.tab.selected {
-  background-color: #ffffff;
-  border-bottom: 1px solid #ffffff;
-}
-
-.navigators h2 {
-  background: #FEC293;
-  padding: 2px 5px;
-}
-
-.navigators ul {
-  list-style: none;
-  margin: 0;
-  margin-bottom: 5px;
-  margin-top: 5px;
-  padding-left: 10px;
-}
-
-.navigators ul li {
-  color: #999;
-  padding: 2px;
-}
-
-
-
-.facet-field {
-  font-weight: bold;
-}
-
-.highlight {
-  color: white;
-  background-color: gray;
-  border: 1px black solid;
-}
-
-.highlight-box {
-  margin-left: 15px;
-}
-
-.field-name {
-  font-weight: bold;
-}
-
-.highlighted-facet-field {
-  background: white;
-}
-
-.constraints {
-  margin-top: 10px;
-}
-
-#query-form{
-  width: 80%;
-}
-
-
-
-.query-box, .constraints {
-  padding: 5px;
-  margin: 5px;
-  font-weight: normal;
-  font-size: 24px;
-  letter-spacing: 0.08em;
-}
-
-.query-box #q {
-  margin-left: 8px;
-  width: 60%;
-  height: 50px;
-  border: 1px solid #999;
-  font-size: 1em;
-  padding: 0.4em;
-}
-
-.query-box {
-  
-}
-
-.query-boost {
-  
-  top: 10px;
-  left: 50px;
-  position: relative;
-  font-size: 0.8em;
-}
-
-.query-box .inputs{
-  left: 180px;
-  position: relative;
-  
-}
-
-#logo {
-  width: 115px;
-  margin: 0px 0px 20px 12px;
-  border-style: none;
-}
-
-.pagination {
-  padding-left: 33%;
-  background: #eee;
-  margin: 5px;
-  margin-left: 210px;
-  padding-top: 5px;
-  padding-bottom: 5px;
-}
-
-.result-document {
-  border: 1px solid #999;
-  padding: 5px;
-  margin: 5px;
-  margin-left: 210px;
-  margin-bottom: 15px;
-}
-
-.result-document div{
-  padding: 5px;
-}
-
-.result-title{
-  width:60%;
-}
-
-.result-body{
-  background: #ddd;
-}
-
-.mlt{
-  
-}
-
-.map{
-  float: right;
-  position: relative;
-  top: -25px;  
-}
-
-.result-document:nth-child(2n+1) {
-  background-color: #eee;
-}
-
-
-.selected-facet-field {
-  font-weight: bold;
-}
-
-li.show {
-  list-style: disc;
-}
-
-.group-value{
-  font-weight: bold;
-}
-
-.error {
-  color: white;
-  background-color: red;
-  left: 210px;
-  width:80%;
-  position: relative;
-
-}
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/mime_type_lists.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/mime_type_lists.vm
deleted file mode 100644
index 1468bbd..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/mime_type_lists.vm
+++ /dev/null
@@ -1,68 +0,0 @@
-#**
- *  Define some Mime-Types, short and long form
- *#
-
-## MimeType to extension map for detecting file type
-## and showing proper icon
-## List of types match the icons in /solr/img/filetypes
-
-## Short MimeType Names
-## Was called $supportedtypes
-#set($supportedMimeTypes = "7z;ai;aiff;asc;audio;bin;bz2;c;cfc;cfm;chm;class;conf;cpp;cs;css;csv;deb;divx;doc;dot;eml;enc;file;gif;gz;hlp;htm;html;image;iso;jar;java;jpeg;jpg;js;lua;m;mm;mov;mp3;mpg;odc;odf;odg;odi;odp;ods;odt;ogg;pdf;pgp;php;pl;png;ppt;ps;py;ram;rar;rb;rm;rpm;rtf;sig;sql;swf;sxc;sxd;sxi;sxw;tar;tex;tgz;txt;vcf;video;vsd;wav;wma;wmv;xls;xml;xpi;xvid;zip")
-
-## Long Form: map MimeType headers to our Short names
-## Was called $extMap
-#set( $mimeExtensionsMap = {
-   "application/x-7z-compressed": "7z",
-   "application/postscript": "ai",
-   "application/pgp-signature": "asc",
-   "application/octet-stream": "bin",
-   "application/x-bzip2": "bz2",
-   "text/x-c": "c",
-   "application/vnd.ms-htmlhelp": "chm",
-   "application/java-vm": "class",
-   "text/css": "css",
-   "text/csv": "csv",
-   "application/x-debian-package": "deb",
-   "application/msword": "doc",
-   "message/rfc822": "eml",
-   "image/gif": "gif",
-   "application/winhlp": "hlp",
-   "text/html": "html",
-   "application/java-archive": "jar",
-   "text/x-java-source": "java",
-   "image/jpeg": "jpeg",
-   "application/javascript": "js",
-   "application/vnd.oasis.opendocument.chart": "odc",
-   "application/vnd.oasis.opendocument.formula": "odf",
-   "application/vnd.oasis.opendocument.graphics": "odg",
-   "application/vnd.oasis.opendocument.image": "odi",
-   "application/vnd.oasis.opendocument.presentation": "odp",
-   "application/vnd.oasis.opendocument.spreadsheet": "ods",
-   "application/vnd.oasis.opendocument.text": "odt",
-   "application/pdf": "pdf",
-   "application/pgp-encrypted": "pgp",
-   "image/png": "png",
-   "application/vnd.ms-powerpoint": "ppt",
-   "audio/x-pn-realaudio": "ram",
-   "application/x-rar-compressed": "rar",
-   "application/vnd.rn-realmedia": "rm",
-   "application/rtf": "rtf",
-   "application/x-shockwave-flash": "swf",
-   "application/vnd.sun.xml.calc": "sxc",
-   "application/vnd.sun.xml.draw": "sxd",
-   "application/vnd.sun.xml.impress": "sxi",
-   "application/vnd.sun.xml.writer": "sxw",
-   "application/x-tar": "tar",
-   "application/x-tex": "tex",
-   "text/plain": "txt",
-   "text/x-vcard": "vcf",
-   "application/vnd.visio": "vsd",
-   "audio/x-wav": "wav",
-   "audio/x-ms-wma": "wma",
-   "video/x-ms-wmv": "wmv",
-   "application/vnd.ms-excel": "xls",
-   "application/xml": "xml",
-   "application/x-xpinstall": "xpi",
-   "application/zip": "zip"
-})
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/pagination_bottom.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/pagination_bottom.vm
deleted file mode 100644
index 71b8bdf..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/pagination_bottom.vm
+++ /dev/null
@@ -1,22 +0,0 @@
-#**
- *  Paging and Statistics at bottom of results
- *#
-
-## Usually rendered in pagination div tag
-
-#if($response.response.get('grouped'))
-  ## pass
-#else
-
-  #link_to_previous_page("previous")
-
-  <span class="results-found">$page.results_found</span>
-  results found.
-
-  Page <span class="page-num">$page.current_page_number</span>
-    of <span class="page-count">$page.page_count</span>
-
-  #link_to_next_page("next")
-
-#end
-<br/>
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/pagination_top.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/pagination_top.vm
deleted file mode 100644
index e0ac8ac..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/pagination_top.vm
+++ /dev/null
@@ -1,29 +0,0 @@
-#**
- *  Paging and Statistics at top of results
- *#
-
-## Usually rendered in pagination div tag
-
-## Grouped Results / Not Paginated
-#if($response.response.get('grouped'))
-
-  <span>
-    <span class="results-found">
-      $response.response.get('grouped').size() group(s)
-    </span>
-    found in ${response.responseHeader.QTime} ms
-  </span>
-
-## Regular Results / Use Paging Links if needed
-#else
-
-  <span>
-    <span class="results-found">$page.results_found</span>
-    results found in
-    ${response.responseHeader.QTime} ms
-  </span>
-
-  Page <span class="page-num">$page.current_page_number</span>
-    of <span class="page-count">$page.page_count</span>
-
-#end   ## end else non-grouped results, normal pagination
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/product_doc.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/product_doc.vm
deleted file mode 100644
index c878d8c..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/product_doc.vm
+++ /dev/null
@@ -1,32 +0,0 @@
-#**
- *  Render a hit representing a Product
- *  assumed to have a field called "name"
- *#
-
-<div class="result-title"><b>#field('name')</b><span class="mlt">   #if($params.getBool('mlt', false) == false)<a href="#lensNoQ&q=id:$docId&mlt=true">More Like This</a>#end</span></div>
-##do we have a physical store for this product
-#set($store = $doc.getFieldValue('store'))
-#if($store)<div class="map"><img src="http://maps.google.com/maps/api/staticmap?&zoom=12&size=150x80&maptype=roadmap&markers=$doc.getFieldValue('store')&sensor=false" /><div><small><a target="_map" href="http://maps.google.com/?q=$store&amp;source=embed">Larger Map</a></small></div></div>#end
-<div>Id: #field('id')</div>
-<div>Price: #field('price_c')</div>
-<div>Features: #field('features')</div>
-<div>In Stock: #field('inStock')</div>
-<div class="mlt">
-  #set($mlt = $mltResults.get($docId))
-  #set($mltOn = $params.getBool('mlt'))
-  #if($mltOn == true)<div class="field-name">Similar Items</div>#end
-  #if ($mltOn && $mlt && $mlt.size() > 0)
-  <ul>
-    #foreach($mltHit in $mlt)
-      #set($mltId = $mltHit.getFieldValue('id'))
-      <li><div><a href="#url_for_home?q=id:$mltId">$mltId</a></div><div><span class="field-name">Name:</span> $mltHit.getFieldValue('name')</div>
-        <div><span class="field-name">Price:</span> $!number.currency($mltHit.getFieldValue('price')) <span class="field-name">In Stock:</span> $mltHit.getFieldValue('inStock')</div>
-
-      </li>
-    #end
-  </ul>
-  #elseif($mltOn && $mlt.size() == 0)
-    <div>No Similar Items Found</div>
-  #end
-</div>
-#parse('debug.vm')
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/query.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/query.vm
deleted file mode 100644
index ddbab3f..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/query.vm
+++ /dev/null
@@ -1,42 +0,0 @@
-<div class="query-box">
-  <form id="query-form" action="#{url_for_home}" method="GET">
-    <div class="inputs">
-      <span #annTitle("Add the query using the &q= parameter")>Find: <input type="text" id="q" name="q" value="$!esc.html($params.get('q'))"/> <input type="submit" id="querySubmit"/> <input type="reset"/></span>
-      <div class="query-boost"><span #annTitle("Add the boost function &bf=price to the query")><input type="checkbox" name="bf" value="price" #if($request.params.get('bf') == 'price')checked="true"#end>Boost by Price</input></span>
-      #parse("querySpatial.vm")
-      #parse("queryGroup.vm")
-      </div>
-  </div>
-
-    #if($request.params.get('debugQuery'))
-      <input type="hidden" name="debugQuery" value="true"/>
-    #end
-    #if($annotate == true)
-      <input type="hidden" name="annotateBrowse" value="true"/>
-    #end
-    #foreach($fq in $request.params.getParams('fq'))
-      #if ($fq != "{!bbox}")
-        <input type="hidden" name="fq" id="allFQs" value="$esc.html($fq)"/>
-      #end
-    #end
-    <div class="constraints" #annTitle("Lists out the &fq filters.  Click to remove.")>
-      #foreach($fq in $params.getParams('fq'))
-        #set($previous_fq_count=$velocityCount - 1)
-        #if($fq != '')
-        &gt; <a style="{text-decoration: line-through;}" href="#url_for_filters($request.params.getParams('fq').subList(0,$previous_fq_count))">$fq</a>
-        #end
-      #end
-    </div>
-    <div class="parsed_query_header">
-     #if($request.params.get('debugQuery'))
-        <a href="#" onclick='jQuery(this).siblings("div").toggle(); return false;'>toggle parsed query</a>
-        <div class="parsed_query" style="display:none">$response.response.debug.parsedquery</div>
-      #end
-      #set($queryOpts = $request.params.get("queryOpts"))
-      #if($queryOpts && $queryOpts != "")
-        <input type="hidden" name="queryOpts" value="$queryOpts"/>
-      #end
-    </div>
-  </form>
-
-</div>
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/query_form.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/query_form.vm
deleted file mode 100644
index 70a0af2..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/query_form.vm
+++ /dev/null
@@ -1,64 +0,0 @@
-#**
- *  Renders the main query form
- *#
-
-<div class="query-box">
-  <form id="query-form" action="#{url_for_home}" method="GET">
-
-    <div class="inputs">
-      <span #annTitle("Add the query using the &q= parameter")>
-        Find:
-        <input type="text" id="q" name="q" value="$!esc.html($params.get('q'))"/>
-        <input type="submit" id="querySubmit"/>
-        <input type="reset"/>
-      </span>
-      <div class="query-boost">
-        <span #annTitle("Add the boost function &bf=price to the query")>
-          <input type="checkbox" name="bf" value="price"
-            #if($request.params.get('bf') == 'price')checked="true"#end
-          >
-            Boost by Price
-          </input>
-        </span>
-      #parse("query_spatial.vm")
-      #parse("query_group.vm")
-      </div>
-  </div>
-
-    #if($request.params.get('debugQuery'))
-      <input type="hidden" name="debugQuery" value="true"/>
-    #end
-    #if($annotate == true)
-      <input type="hidden" name="annotateBrowse" value="true"/>
-    #end
-    #foreach($fq in $request.params.getParams('fq'))
-      #if ($fq != "{!bbox}")
-        <input type="hidden" name="fq" id="allFQs" value="$esc.html($fq)"/>
-      #end
-    #end
-
-    <div class="constraints" #annTitle("Lists out the &fq filters.  Click to remove.")>
-      #foreach($fq in $params.getParams('fq'))
-        #set($previous_fq_count=$velocityCount - 1)
-        #if($fq != '')
-          &gt;
-          <a style="{text-decoration: line-through;}"
-            href="#url_for_filters($request.params.getParams('fq').subList(0,$previous_fq_count))"
-          >$fq</a>
-        #end
-      #end
-    </div>
-
-    <div class="parsed_query_header">
-      #if($request.params.get('debugQuery'))
-        <a href="#" onclick='jQuery(this).siblings("div").toggle(); return false;'>toggle parsed query</a>
-        <div class="parsed_query" style="display:none">$response.response.debug.parsedquery</div>
-      #end
-      #set($queryOpts = $request.params.get("queryOpts"))
-      #if($queryOpts && $queryOpts != "")
-        <input type="hidden" name="queryOpts" value="$queryOpts"/>
-      #end
-    </div>
-
-  </form>
-</div>
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/query_group.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/query_group.vm
deleted file mode 100644
index 42e5457..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/query_group.vm
+++ /dev/null
@@ -1,43 +0,0 @@
-#**
- *  Query settings for grouping by fields,
- *  e.g.: Manufacturer or Popularity
- *#
-
-#set($queryOpts = $params.get("queryOpts"))
-
-#if($queryOpts == "group")
-  <div>
-    #set($groupF = $request.params.get('group.field'))
-
-    <label #annTitle("Add the &group.field parameter. Multiselect is supported")>
-      Group By:
-      <select id="group" name="group.field" multiple="true">
-        ## TODO: Handle multiple selects correctly
-        ## TODO: fix empty / "No Group" selection
-
-        <option value=""
-          #if($groupF == '')selected="true"#end
-        >
-          No Group
-        </option>
-
-        <option value="manu_exact"
-          #if($groupF == 'manu_exact')selected="true"#end
-        >
-          Manufacturer
-        </option>
-
-        <option value="popularity"
-          #if($groupF == 'popularity')selected="true"#end
-        >
-          Popularity
-        </option>
-
-      </select>
-    </label>  
-
-    <input type="hidden" name="group" value="true"/>
-
-  </div>
-
-#end
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/query_spatial.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/query_spatial.vm
deleted file mode 100644
index 2bc2044..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/query_spatial.vm
+++ /dev/null
@@ -1,75 +0,0 @@
-#**
- *  Query logic for selecting location / Geospatial search
- *#
-
-#set($queryOpts = $params.get("queryOpts"))
-
-#if($queryOpts == "spatial")
-
-  <div>
-
-    #set($loc = $request.params.get('pt'))
-    ## Normalize first trip through to "none" because
-    ## an empty string generates an error message later on
-    #if( ! $loc )
-      #set( $loc = "none" )
-    #end
-
-    #set($dist = $request.params.get('d', "10"))
-
-    ## Cities for The Select List
-    #set( $cities = {
-      "none": "No Filter",
-      "45.17614,-93.87341": "Buffalo, MN",
-      "37.7752,-100.0232": "Dodge City, KS",
-      "35.0752,-97.032": "Oklahoma City, OK",
-      "37.7752,-122.4232": "San Francisco CA"
-    })
-
-    <label #annTitle("Add the &pt parameter")>
-      Location Filter:
-      <select id="pt" name="pt">
-
-        ## Generate <option> tag for each city
-        #foreach( $city_lon_lat in $cities.keySet() )
-          #set( $city_name = $cities.get($city_lon_lat) )
-          <option value="$city_lon_lat"
-            #if($loc == $city_lon_lat)selected="true"#end
-          >
-            $city_name
-          </option>
-        #end
-
-      </select>
-
-    </label>
-
-    <span #annTitle("Add the &d parameter")>
-      Distance (KM):
-      <input id="d" name="d" type="text" size="6"
-        value="#if($dist != '')${dist}#{else}10#end"  ## TODO: isn't the default of 10 above sufficient?  no if/else needed?
-      />
-    </span>
-
-    <input type="hidden" name="sfield" value="store"/>
-    <input type="hidden" id="spatialFQ" name="fq" value=""/>
-    <input type="hidden" name="queryOpts" value="spatial"/>        
-
-  </div>
-
-  <script type="text/javascript">
-    $('#query-form').submit(function() {
-      if ($("#pt").val() != "none") {
-        $("#spatialFQ").val("{!bbox}");
-      }
-      $fqs = $("#allFQs").val();
-      $fqs = $fqs.replace("{!bbox}", "");
-      if ($fqs == ''){
-        $("#allFQs").remove();
-      }
-      $("#allFQs").val($fqs);
-      return true;
-    });
-  </script>
-
-#end
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/results_list.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/results_list.vm
deleted file mode 100644
index f73532b..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/results_list.vm
+++ /dev/null
@@ -1,22 +0,0 @@
-#**
- *  Render the main Results List
- *#
-
-## Usually displayed inside <div class="results">
-
-#if($response.response.get('grouped'))
-
-  #foreach($grouping in $response.response.get('grouped'))
-    #parse("hit_grouped.vm")
-  #end
-
-#else
-
-  #foreach($doc in $response.results)
-    #parse("hit.vm")
-    ## Can get an extremely simple view of the doc
-    ## which might be nicer for debugging
-    ##parse("hit_plain.vm")
-  #end
-
-#end
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/richtext_doc.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/richtext_doc.vm
deleted file mode 100644
index 74f1c07..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/richtext_doc.vm
+++ /dev/null
@@ -1,153 +0,0 @@
-#**
- *  Render a complex document in the results list
- *#
-
-## Load Mime-Type List and Mapping
-#parse('mime_type_lists.vm')
-## Sets:
-## * supportedMimeTypes, AKA supportedtypes
-## * mimeExtensionsMap, AKA extMap
-
-## Title
-#if($doc.getFieldValue('title'))
-  #set($title = $esc.html($doc.getFirstValue('title')))
-#else
-  #set($title = "["+$doc.getFieldValue('id')+"]")
-#end
-
-## URL
-#if($doc.getFieldValue('url'))
-  #set($url = $doc.getFieldValue('url'))
-#elseif($doc.getFieldValue('resourcename'))
-  #set($url = "file:///$doc.getFieldValue('resourcename')")
-#else
-  #set($url = "$doc.getFieldValue('id')")
-#end
-
-## Sort out Mime-Type
-#set($ct = $doc.getFirstValue('content_type').split(";").get(0))
-#set($filename = $doc.getFieldValue('resourcename'))
-#set($filetype = false)
-#set($filetype = $mimeExtensionsMap.get($ct))
-
-## TODO: falling back to file extension is convenient,
-## except when you don't have an icon for that extension
-## example "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
-## document with a .docx extension.
-## It'd be nice to fall back to an "unknown" or the existing "file" type
-## We sort of do this below, but only if the filename has no extension
-## (anything after the last dot).
-
-#if(!$filetype)
-  #set($filetype = $filename.substring($filename.lastIndexOf(".")).substring(1))
-#end
-
-## #if(!$filetype)
-##   #set($filetype = "file")
-## #end
-## #if(!$supportedMimeTypes.contains($filetype))
-##   #set($filetype = "file")
-## #end
-
-## Row 1: Icon and Title and mlt link
-<div class="result-title">
-  ## Icon
-  ## Small file type icons from http://www.splitbrain.org/projects/file_icons (public domain)
-  <img src="#{url_root}/img/filetypes/${filetype}.png" align="center">
-
-  ## Title, hyperlinked
-  <a href="${url}" target="_blank">
-    <b>$title</b></a>
-
-  ## Link for MLT / More Like This / Find Similar
-  <span class="mlt">
-    #if($params.getBool('mlt', false) == false)
-      <a href="#lensNoQ&q=id:%22$docId%22&mlt=true">
-        More Like This</a>
-    #end
-  </span>
-
-</div>
-
-## Row 2?: ID / URL
-<div>
-  Id: #field('id')
-</div>
-
-## Resource Name
-<div>
-  #if($doc.getFieldValue('resourcename'))
-    Resource name: $filename 
-  #elseif($url)
-    URL: $url
-  #end
-  #if($ct)
-    ($ct)
-  #end
-</div>
-
-## Author
-#if($doc.getFieldValue('author'))
-  <div>
-    Author: #field('author')
-  </div>
-#end
-
-## Last_Modified Date
-#if($doc.getFieldValue('last_modified'))
-  <div>
-    last-modified:
-    #field('last_modified')
-  </div>
-#end
-
-## Main content of doc
-<div class="result-body">
-  #field('content')
-</div>
-
-## Display Similar Documents / MLT = More Like This
-<div class="mlt">
-  #set($mlt = $mltResults.get($docId))
-  #set($mltOn = $params.getBool('mlt'))
-  #if($mltOn == true)
-    <div class="field-name">
-      Similar Items
-    </div>
-  #end
-  ## If has MLT enabled An Entries to show
-  #if ($mltOn && $mlt && $mlt.size() > 0)
-    <ul>
-      #foreach($mltHit in $mlt)
-        #set($mltId = $mltHit.getFieldValue('id'))
-        <li>
-          <div>
-            <a href="#url_for_home?q=id:$mltId">
-              $mltId</a>
-          </div>
-          <div>
-            <span class="field-name">
-              Title:
-            </span>
-            $mltHit.getFieldValue('title')
-          </div>
-          <div>
-            <span class="field-name">
-              Author:
-            </span>
-            $mltHit.getFieldValue('author')
-            <span class="field-name">
-              Description:
-            </span>
-            $mltHit.getFieldValue('description')
-          </div>
-        </li>
-      #end    ## end for each mltHit in $mlt
-    </ul>
-  ## Else MLT Enabled but no mlt results for this query
-  #elseif($mltOn && $mlt.size() == 0)
-    <div>No Similar Items Found</div>
-  #end
-</div>  ## div class=mlt
-
-#parse('debug.vm')
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/suggest.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/suggest.vm
deleted file mode 100644
index dae6b83..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/suggest.vm
+++ /dev/null
@@ -1,8 +0,0 @@
-#**
- *  Provides cynamic spelling suggestions
- *  as you type in the search form
- *#
-
-#foreach($t in $response.response.terms.name)
-  $t.key
-#end
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/tabs.vm b/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/tabs.vm
deleted file mode 100644
index da19cbc..0000000
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/velocity/tabs.vm
+++ /dev/null
@@ -1,50 +0,0 @@
-#**
- *  Provides navigation/access to Advanced search options
- *  Usually displayed near the top of the page
- *#
-
-##TODO: Make some nice tabs here
-
-#set($queryOpts = $params.get("queryOpts"))
-
-<div class="tabs-bar" #annTitle("Click the link to demonstrate various Solr capabilities")>
-
-  <span>Type of Search:</span>
-
-  ##queryOpts=$queryOpts
-
-  ## return to Simple Search
-  ##set( $selected = ($queryOpts && $queryOpts != "") )
-  #set( $selected = ! $queryOpts )
-  <span class="tab #if($selected)selected#end">
-    #if($selected)
-      Simple
-    #else
-      <a href="#url_for_home/?#debug#annotate">
-        Simple</a>
-    #end
-  </span>
-
-  ## GEO-Spatial / Location Based
-  #set( $selected = ($queryOpts == "spatial") )
-  <span class="tab #if($selected)selected#end">
-    #if($selected)
-      Spatial
-    #else
-      <a href="#url_for_home?&queryOpts=spatial#debug#annotate">
-        Spatial</a>
-    #end
-  </span>
-
-  ## Group By Field
-  #set( $selected = ($queryOpts == "group") )
-  <span class="tab #if($selected)selected#end">
-    #if($selected)
-      Group By
-    #else
-      <a href="#url_for_home?#debug#annotate&queryOpts=group&group=true&group.field=manu_exact">
-        Group By</a>
-    #end
-  </span>
-
-</div>
diff --git a/solr/solr-ref-guide/build.gradle b/solr/solr-ref-guide/build.gradle
index 8571646..e3063c6 100644
--- a/solr/solr-ref-guide/build.gradle
+++ b/solr/solr-ref-guide/build.gradle
@@ -86,7 +86,6 @@
     depVer('org.apache.logging.log4j:log4j-core')
     depVer('org.apache.opennlp:opennlp-tools')
     depVer('org.apache.tika:tika-core')
-    depVer('org.apache.velocity.tools:velocity-tools-generic')
     depVer('org.apache.zookeeper:zookeeper')
 
     // jekyll dependencies
@@ -195,7 +194,6 @@
                 ["ivyversions.org.apache.logging.log4j.version", "org.apache.logging.log4j", "log4j-core"],
                 ["ivyversions./org.apache.opennlp/opennlp-tools", "org.apache.opennlp", "opennlp-tools"],
                 ["ivyversions.org.apache.tika.version", "org.apache.tika", "tika-core"],
-                ["ivyversions.org.apache.velocity.tools.version", "org.apache.velocity.tools", "velocity-tools-generic"],
                 ["ivyversions.org.apache.zookeeper.version", "org.apache.zookeeper", "zookeeper"],
 
                 ["ivy-zookeeper-version", "org.apache.zookeeper", "zookeeper"],
@@ -203,7 +201,6 @@
                 ["ivy-tika-version", "org.apache.tika", "tika-core"],
                 ["ivy-opennlp-version", "org.apache.opennlp", "opennlp-tools"],
                 ["ivy-commons-codec-version", "commons-codec", "commons-codec"],
-                ["ivy-velocity-tools-version", "org.apache.velocity.tools", "velocity-tools-generic"],
                 ["ivy-dropwizard-version", "io.dropwizard.metrics", "metrics-core"]
         ].each { antProp, depGroup, depId ->
             props[antProp] = getVersion(depGroup, depId, configurations.depVer)
diff --git a/solr/solr-ref-guide/src/_config.yml.template b/solr/solr-ref-guide/src/_config.yml.template
index 36c92ed..fb840c0 100755
--- a/solr/solr-ref-guide/src/_config.yml.template
+++ b/solr/solr-ref-guide/src/_config.yml.template
@@ -83,7 +83,6 @@
   ivy-log4j-version: "${ivyversions.org.apache.logging.log4j.version}"
   ivy-opennlp-version: "${ivyversions./org.apache.opennlp/opennlp-tools}"
   ivy-tika-version: "${ivyversions.org.apache.tika.version}"
-  ivy-velocity-tools-version: "${ivyversions.org.apache.velocity.tools.version}"
   ivy-zookeeper-version: "${ivyversions.org.apache.zookeeper.version}"
 
 asciidoctor:
diff --git a/solr/solr-ref-guide/src/libs.adoc b/solr/solr-ref-guide/src/libs.adoc
index 5243274..9c25ac0 100644
--- a/solr/solr-ref-guide/src/libs.adoc
+++ b/solr/solr-ref-guide/src/libs.adoc
@@ -72,7 +72,5 @@
   <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
   <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
 
-  <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
   <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-ltr-\d.*\.jar" />
 ----
diff --git a/solr/solr-ref-guide/src/response-writers.adoc b/solr/solr-ref-guide/src/response-writers.adoc
index a921658..e550a99 100644
--- a/solr/solr-ref-guide/src/response-writers.adoc
+++ b/solr/solr-ref-guide/src/response-writers.adoc
@@ -1,5 +1,5 @@
 = Response Writers
-:page-children: velocity-response-writer
+
 // Licensed to the Apache Software Foundation (ASF) under one
 // or more contributor license agreements.  See the NOTICE file
 // distributed with this work for additional information
@@ -32,7 +32,6 @@
 * <<Python Response Writer,python>>
 * <<Ruby Response Writer,ruby>>
 * <<Smile Response Writer,smile>>
-* <<Velocity Response Writer,velocity>>
 * <<XLSX Response Writer,xlsx>>
 * <<Standard XML Response Writer,xml>>
 * <<XSLT Response Writer,xslt>>
@@ -298,13 +297,6 @@
 MA147LL/A,"electronics,music",Apple 60 GB iPod with Video Playback Black,10,399.0,0.2446348
 ----
 
-[[velocity-writer]]
-== Velocity Response Writer
-
-The `VelocityResponseWriter` processes the Solr response and request context through Apache Velocity templating.
-
-See the <<velocity-response-writer.adoc#velocity-response-writer,Velocity Response Writer>> section for details.
-
 == Smile Response Writer
 
 The Smile format is a JSON-compatible binary format, described in detail here: https://en.wikipedia.org/wiki/Smile_%28data_interchange_format%29[https://en.wikipedia.org/wiki/Smile_(data_interchange_format)]
diff --git a/solr/solr-ref-guide/src/searching.adoc b/solr/solr-ref-guide/src/searching.adoc
index cc84627..4298548 100644
--- a/solr/solr-ref-guide/src/searching.adoc
+++ b/solr/solr-ref-guide/src/searching.adoc
@@ -1,6 +1,5 @@
 = Searching
 :page-children: overview-of-searching-in-solr, +
-  velocity-search-ui, +
   relevance, +
   query-syntax-and-parsing, +
   json-request-api, +
@@ -50,7 +49,6 @@
 This section describes how Solr works with search requests. It covers the following topics:
 
 * <<overview-of-searching-in-solr.adoc#overview-of-searching-in-solr,Overview of Searching in Solr>>: An introduction to searching with Solr.
-* <<velocity-search-ui.adoc#velocity-search-ui,Velocity Search UI>>: A simple search UI using the VelocityResponseWriter.
 * <<relevance.adoc#relevance,Relevance>>: Conceptual information about understanding relevance in search results.
 * <<query-syntax-and-parsing.adoc#query-syntax-and-parsing,Query Syntax and Parsing>>: A brief conceptual overview of query syntax and parsing. It also contains the following sub-sections:
 ** <<common-query-parameters.adoc#common-query-parameters,Common Query Parameters>>: No matter the query parser, there are several parameters that are common to all of them.
diff --git a/solr/solr-ref-guide/src/velocity-response-writer.adoc b/solr/solr-ref-guide/src/velocity-response-writer.adoc
deleted file mode 100644
index e08ea70..0000000
--- a/solr/solr-ref-guide/src/velocity-response-writer.adoc
+++ /dev/null
@@ -1,120 +0,0 @@
-= Velocity Response Writer
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-The VelocityResponseWriter is an optional plugin available in the `contrib/velocity` directory. It powers the /browse user interfaces when using some example configurations such as "techproducts" and "example/files".
-
-[IMPORTANT]
-====
-The VelocityResponseWriter has been deprecated and may be removed in a future version of Solr.
-====
-
-Its JAR and dependencies must be added (via `<lib>` or solr/home lib inclusion), and must be registered in `solrconfig.xml` like this:
-
-[source,xml]
-----
-<queryResponseWriter name="velocity" class="solr.VelocityResponseWriter">
-  <str name="template.base.dir">${velocity.template.base.dir:}</str>
-
-<!--
-  <str name="init.properties.file">velocity-init.properties</str>
-  <lst name="tools">
-    <str name="mytool">com.example.MyCustomTool</str>
-  </lst>
--->
-</queryResponseWriter>
-----
-
-== Configuration & Usage
-
-=== Template Rendering Protections
-
-Velocity template rendering is largely controlled by the `trusted` configset flag.  Templates built into (the `/browse` ones) the component library are always available
-with this component.  In a trusted configset, templates in the `velocity/` subdirectory of the configset are renderable.  Also in a trusted configset, when `template.base.dir`
-is specified those templates are renderable.
-
-=== VelocityResponseWriter Initialization Parameters
-
-`template.base.dir`::
-If specified and exists as a file system directory, a file resource loader will be added for this directory. Templates in this directory will override "solr" resource loader templates.
-
-`init.properties.file`:: Specifies a properties file name which must exist in the Solr `conf/` directory (*not* under a `velocity/` subdirectory) or root of a JAR file in a <lib>.
-
-`tools`::
-External "tools" can be specified as list of string name/value (tool name / class name) pairs. Tools, in the Velocity context, are simply Java objects. Tool classes are constructed using a no-arg constructor (or a single-SolrCore-arg constructor if it exists) and added to the Velocity context with the specified name.
-+
-A custom registered tool can override the built-in context objects with the same name, except for `$request`, `$response`, `$page`, and `$debug` (these tools are designed to not be overridden).
-
-=== VelocityResponseWriter Request Parameters
-
-`v.template`::
-Specifies the name of the template to render.
-
-`v.layout`::
-Specifies a template name to use as the layout around the main, `v.template`, specified template.
-+
-The main template is rendered into a string value included into the layout rendering as `$content`.
-
-`v.layout.enabled`::
-Determines if the main template should have a layout wrapped around it. The default is `true`, but requires `v.layout` to specified as well.
-
-`v.contentType`::
-Specifies the content type used in the HTTP response. If not specified, the default will depend on whether `v.json` is specified or not.
-+
-The default without `v.json=wrf`: `text/html;charset=UTF-8`.
-+
-The default with `v.json=wrf`: `application/json;charset=UTF-8`.
-
-`v.json`::
-Specifies a function name to wrap around the response rendered as JSON. If specified, the content type used in the response will be "application/json;charset=UTF-8", unless overridden by `v.contentType`.
-+
-Output will be in this format (with `v.json=wrf`):
-+
-`wrf("result":"<Velocity generated response string, with quotes and backslashes escaped>")`
-
-`v.locale`::
-Locale to use with the `$resource` tool and other LocaleConfig implementing tools. The default locale is `Locale.ROOT`. Localized resources are loaded from standard Java resource bundles named `resources[_locale-code].properties`.
-+
-Resource bundles can be added by providing a JAR file visible by the SolrResourceLoader with resource bundles under a velocity sub-directory. Resource bundles are not loadable under `conf/`, as only the class loader aspect of SolrResourceLoader can be used here.
-
-
-=== VelocityResponseWriter Context Objects
-
-[%autowidth.spread,width="100%",options="header"]
-|===
-|Context Reference |Description
-|`request` |{solr-javadocs}solr-core/org/apache/solr/request/SolrQueryRequest.html[SolrQueryRequest] javadocs
-|`response` |{solr-javadocs}solr-core/org/apache/solr/response/SolrQueryResponse.html[QueryResponse] most of the time, but in some cases where QueryResponse doesn't like the request handler's output (https://cwiki.apache.org/confluence/display/solr/AnalysisRequestHandler[AnalysisRequestHandler], for example, causes a ClassCastException parsing "response"), the response will be a SolrResponseBase object.
-|`esc` |A Velocity http://velocity.apache.org/tools/{ivy-velocity-tools-version}/tools-summary.html#EscapeTool[EscapeTool] instance
-|`date` |A Velocity http://velocity.apache.org/tools/{ivy-velocity-tools-version}/tools-summary.html#ComparisonDateTool[ComparisonDateTool] instance
-|`math` |A Velocity http://velocity.apache.org/tools/{ivy-velocity-tools-version}/tools-summary.html#MathTool[MathTool] instance
-|`number` |A Velocity http://velocity.apache.org/tools/{ivy-velocity-tools-version}/tools-summary.html#NumberTool[NumberTool] instance
-|`sort` |A Velocity http://velocity.apache.org/tools/{ivy-velocity-tools-version}/tools-summary.html#SortTool[SortTool] instance
-|`display` |A Velocity http://velocity.apache.org/tools/{ivy-velocity-tools-version}/tools-summary.html#DisplayTool[DisplayTool] instance
-|`resource` |A Velocity http://velocity.apache.org/tools/{ivy-velocity-tools-version}/tools-summary.html#ResourceTool[ResourceTool] instance
-|`engine` |The current VelocityEngine instance
-|`page` |An instance of Solr's PageTool (only included if the response is a QueryResponse where paging makes sense)
-|`debug` |A shortcut to the debug part of the response, or null if debug is not on. This is handy for having debug-only sections in a template using `#if($debug)...#end`
-|`content` |The rendered output of the main template, when rendering the layout (`v.layout.enabled=true` and `v.layout=<template>`).
-|[custom tool(s)] |Tools provided by the optional "tools" list of the VelocityResponseWriter registration are available by their specified name.
-|===
-
-=== VelocityResponseWriter Usage
-
-To see results in an HTML user interface on your own collection, try http://localhost:8983/solr/<my collection>/select?q=*:*&wt=velocity&v.template=browse&v.layout=layout
-
-Or try `/browse` in the examples techproducts or example/files.
diff --git a/solr/solr-ref-guide/src/velocity-search-ui.adoc b/solr/solr-ref-guide/src/velocity-search-ui.adoc
deleted file mode 100644
index 96d7f93..0000000
--- a/solr/solr-ref-guide/src/velocity-search-ui.adoc
+++ /dev/null
@@ -1,26 +0,0 @@
-= Velocity Search UI
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-Solr includes a sample search UI based on the <<response-writers.adoc#velocity-writer,VelocityResponseWriter>> (also known as Solritas) that demonstrates several useful features, such as searching, faceting, highlighting, autocomplete, and geospatial searching.
-
-When using the `sample_techproducts_configs` configset, you can access the Velocity sample Search UI: `\http://localhost:8983/solr/techproducts/browse`
-
-.The Velocity Search UI
-image::images/velocity-search-ui/techproducts_browse.png[image,width=500]
-
-For more information about the Velocity Response Writer, see the <<response-writers.adoc#velocity-writer,Response Writer page>>.
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionApiMapping.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionApiMapping.java
index 721dbd1..9d35e40 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionApiMapping.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/CollectionApiMapping.java
@@ -41,12 +41,7 @@
 import static org.apache.solr.client.solrj.request.CollectionApiMapping.ConfigSetEndPoint.CONFIG_COMMANDS;
 import static org.apache.solr.client.solrj.request.CollectionApiMapping.ConfigSetEndPoint.CONFIG_DEL;
 import static org.apache.solr.client.solrj.request.CollectionApiMapping.ConfigSetEndPoint.LIST_CONFIG;
-import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.CLUSTER;
 import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.CLUSTER_ALIASES;
-import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.CLUSTER_CMD;
-import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.CLUSTER_CMD_STATUS;
-import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.CLUSTER_CMD_STATUS_DELETE;
-import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.CLUSTER_NODES;
 import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.COLLECTIONS;
 import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.COLLECTIONS_COMMANDS;
 import static org.apache.solr.client.solrj.request.CollectionApiMapping.EndPoint.COLLECTION_STATE;
@@ -66,10 +61,6 @@
 
   public enum Meta implements CommandMeta {
     GET_COLLECTIONS(COLLECTIONS, GET, LIST),
-    GET_CLUSTER(CLUSTER, GET, LIST, "/cluster", null),
-    GET_CLUSTER_OVERSEER(CLUSTER, GET, OVERSEERSTATUS, "/cluster/overseer", null),
-    GET_CLUSTER_STATUS_CMD(CLUSTER_CMD_STATUS, GET, REQUESTSTATUS),
-    DELETE_CLUSTER_STATUS(CLUSTER_CMD_STATUS_DELETE, DELETE, DELETESTATUS),
     GET_A_COLLECTION(COLLECTION_STATE, GET, CLUSTERSTATUS),
     LIST_ALIASES(CLUSTER_ALIASES, GET, LISTALIASES),
     CREATE_COLLECTION(COLLECTIONS_COMMANDS,
@@ -190,23 +181,23 @@
             NAME, "collection",
             "propertyName", "name",
             "propertyValue", "value")),
-    ADD_ROLE(CLUSTER_CMD,
+/*    ADD_ROLE(CLUSTER_CMD,
         POST,
         ADDROLE,
-        "add-role",null),
-    REMOVE_ROLE(CLUSTER_CMD,
+        "add-role",null),*/
+/*    REMOVE_ROLE(CLUSTER_CMD,
         POST,
         REMOVEROLE,
-        "remove-role",null),
+        "remove-role",null),*/
 
-    SET_CLUSTER_PROPERTY(CLUSTER_CMD,
+  /*  SET_CLUSTER_PROPERTY(CLUSTER_CMD,
         POST,
         CLUSTERPROP,
-        "set-property",null),
-    SET_CLUSTER_PROPERTY_OBJ(CLUSTER_CMD,
+        "set-property",null),*/
+   /* SET_CLUSTER_PROPERTY_OBJ(CLUSTER_CMD,
         POST,
         null,
-        "set-obj-property", null),
+        "set-obj-property", null),*/
     BACKUP_COLLECTION(COLLECTIONS_COMMANDS,
         POST,
         BACKUP,
@@ -218,7 +209,6 @@
         "restore-collection",
         null
     ),
-    GET_NODES(CLUSTER_NODES, GET, null),
     FORCE_LEADER(PER_COLLECTION_PER_SHARD_COMMANDS, POST, CollectionAction.FORCELEADER, "force-leader", null),
     BALANCE_SHARD_UNIQUE(PER_COLLECTION, POST, BALANCESHARDUNIQUE,"balance-shard-unique" , null)
     ;
@@ -342,12 +332,7 @@
   }
 
   public enum EndPoint implements V2EndPoint {
-    CLUSTER("cluster"),
     CLUSTER_ALIASES("cluster.aliases"),
-    CLUSTER_CMD("cluster.Commands"),
-    CLUSTER_NODES("cluster.nodes"),
-    CLUSTER_CMD_STATUS("cluster.commandstatus"),
-    CLUSTER_CMD_STATUS_DELETE("cluster.commandstatus.delete"),
     COLLECTIONS_COMMANDS("collections.Commands"),
     COLLECTIONS("collections"),
     COLLECTION_STATE("collections.collection"),
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropInfo.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropInfo.java
new file mode 100644
index 0000000..fe768be
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/beans/ClusterPropInfo.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.client.solrj.request.beans;
+
+import org.apache.solr.common.annotation.JsonProperty;
+import org.apache.solr.common.util.ReflectMapWriter;
+
+public class ClusterPropInfo implements ReflectMapWriter {
+
+  @JsonProperty
+  public String urlScheme;
+
+  @JsonProperty
+  public Integer maxCoresPerNode;
+  @JsonProperty
+  public String location;
+
+  @JsonProperty
+  public DefaultsInfo defaults;
+
+  @JsonProperty
+  public CollectionDefaults collectionDefaults;
+
+  public static class CollectionDefaults implements ReflectMapWriter {
+    @JsonProperty
+    public Integer numShards;
+    @JsonProperty
+    public Integer tlogReplicas;
+    @JsonProperty
+    public Integer pullReplicas;
+    @JsonProperty
+    public Integer nrtReplicas;
+
+  }
+
+  public static class DefaultsInfo implements ReflectMapWriter {
+
+    @JsonProperty
+    public CollectionDefaults collection;
+
+    @JsonProperty
+    public CollectionDefaults cluster;
+
+  }
+
+  public static class ClusterInfo implements ReflectMapWriter {
+    @JsonProperty
+    public Boolean useLegacyReplicaAssignment;
+
+
+    @JsonProperty
+    public CollectionDefaults collection;
+
+  }
+
+
+}
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/DefaultSolrParams.java b/solr/solrj/src/java/org/apache/solr/common/params/DefaultSolrParams.java
index c8bc3b0..bec6266 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/DefaultSolrParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/DefaultSolrParams.java
@@ -27,7 +27,7 @@
   protected final SolrParams params;
   protected final SolrParams defaults;
 
-  protected DefaultSolrParams(SolrParams params, SolrParams defaults) {
+  public DefaultSolrParams(SolrParams params, SolrParams defaults) {
     assert params != null && defaults != null;
     this.params = params;
     this.defaults = defaults;
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/JsonSchemaCreator.java b/solr/solrj/src/java/org/apache/solr/common/util/JsonSchemaCreator.java
index defd33d..e6cef30 100644
--- a/solr/solrj/src/java/org/apache/solr/common/util/JsonSchemaCreator.java
+++ b/solr/solrj/src/java/org/apache/solr/common/util/JsonSchemaCreator.java
@@ -58,10 +58,16 @@
   private static Map<String, Object> createSchemaFromType(java.lang.reflect.Type t, Map<String, Object> map) {
     if (natives.containsKey(t)) {
       map.put("type", natives.get(t));
-    } else if (t instanceof ParameterizedType && ((ParameterizedType) t).getRawType() == List.class) {
-      Type typ = ((ParameterizedType) t).getActualTypeArguments()[0];
-      map.put("type", "array");
-      map.put("items", getSchema(typ));
+    } else if (t instanceof ParameterizedType) {
+      if (((ParameterizedType) t).getRawType() == List.class) {
+        Type typ = ((ParameterizedType) t).getActualTypeArguments()[0];
+        map.put("type", "array");
+        map.put("items", getSchema(typ));
+      } else if (((ParameterizedType) t).getRawType() == Map.class) {
+        Type typ = ((ParameterizedType) t).getActualTypeArguments()[0];
+        map.put("type", "object");
+        map.put("additionalProperties", true);
+      }
     } else {
       createObjectSchema((Class) t, map);
     }
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/Utils.java b/solr/solrj/src/java/org/apache/solr/common/util/Utils.java
index 669df38..b4f1f47 100644
--- a/solr/solrj/src/java/org/apache/solr/common/util/Utils.java
+++ b/solr/solrj/src/java/org/apache/solr/common/util/Utils.java
@@ -871,7 +871,7 @@
               l.add((ew, inst) -> ew.put(fname, (float) mh.invoke(inst)));
             } else {
               MethodHandle mh = lookup.findGetter(c, field.getName(), field.getType());
-              l.add((ew, inst) -> ew.put(fname, mh.invoke(inst)));
+              l.add((ew, inst) -> ew.putIfNotNull(fname, mh.invoke(inst)));
             }
           } catch (NoSuchFieldException e) {
             //this is unlikely
diff --git a/solr/solrj/src/resources/apispec/cluster.Commands.json b/solr/solrj/src/resources/apispec/cluster.Commands.json
deleted file mode 100644
index 04428b3..0000000
--- a/solr/solrj/src/resources/apispec/cluster.Commands.json
+++ /dev/null
@@ -1,146 +0,0 @@
-{
-  "documentation": "https://lucene.apache.org/solr/guide/cluster-node-management.html",
-  "description": "Cluster-wide commands to assign roles to nodes, remove role assignments, or add, edit or remove a cluster-wide property.",
-  "methods": [
-    "POST"
-  ],
-  "url": {
-    "paths": [
-      "/cluster"
-    ]
-  },
-  "commands": {
-    "add-role":{
-      "type":"object",
-      "documentation":"https://lucene.apache.org/solr/guide/cluster-node-management.html#addrole",
-      "description":"Assign a specific role to a node in the cluster.",
-      "properties": {
-        "role": {
-          "type": "string",
-          "description": "The name of the role. The only supported role is 'overseer'."
-
-        },
-        "node": {
-          "type": "string",
-          "description": "The name of the node. It is possible to assign a role even before that node is started."
-
-        }
-      },
-      "required": [
-        "role",
-        "node"
-      ]
-    },
-    "remove-role":{
-      "type":"object",
-      "documentation":"https://lucene.apache.org/solr/guide/cluster-node-management.html#removerole",
-      "description":"Unassign a role from a node in the cluster.",
-      "properties": {
-        "role": {
-          "type": "string",
-          "description": "The name of the role. The only supported role as of now is 'overseer'."
-
-        },
-        "node": {
-          "type": "string",
-          "description": "The name of the node where the role should be removed."
-        }
-      },
-      "required": [
-        "role",
-        "node"
-      ]
-    },
-    "set-property": {
-      "type": "object",
-      "documentation": "https://lucene.apache.org/solr/guide/cluster-node-management.html#clusterprop",
-      "description": "Add, edit, or delete a cluster-wide property.",
-      "properties": {
-        "name": {
-          "type": "string",
-          "description": "The name of the property"
-        },
-        "val": {
-          "type": ["string","boolean","null"],
-          "description": "The value of the property. If the value is empty or null, the property is unset."
-        }
-      },
-      "required": [
-        "name",
-        "val"
-      ]
-    },
-    "set-obj-property": {
-      "type": "object",
-      "documentation": "https://lucene.apache.org/solr/guide/cluster-node-management.html#clusterprop",
-      "description": "Add, edit, or delete a cluster-wide property.",
-      "properties": {
-        "urlScheme": {
-          "type": "string"
-        },
-        "maxCoresPerNode": {
-          "type": "boolean"
-        },
-        "location": {
-          "type": "string"
-        },
-        "defaults" : {
-          "type" : "object",
-          "properties": {
-            "cluster": {
-              "type" : "object",
-              "properties": {
-                "useLegacyReplicaAssignment": {
-                  "type" : "boolean",
-                  "description" : "Decides wheyher to use the deprecated legacy replica assignment strategy or not"
-                }
-              }
-            },
-            "collection": {
-              "type": "object",
-              "properties": {
-                "numShards": {
-                  "type": "integer",
-                  "description": "Default number of shards for a collection"
-                },
-                "tlogReplicas": {
-                  "type": "integer",
-                  "description": "Default number of TLOG replicas"
-                },
-                "pullReplicas": {
-                  "type": "integer",
-                  "description": "Default number of PULL replicas"
-                },
-                "nrtReplicas": {
-                  "type": "integer",
-                  "description": "Default number of NRT replicas"
-                }
-              }
-            }
-          }
-        },
-        "collectionDefaults": {
-          "type": "object",
-          "properties": {
-            "numShards": {
-              "type": "integer",
-              "description": "Default number of shards for a collection"
-            },
-            "tlogReplicas": {
-              "type": "integer",
-              "description": "Default number of TLOG replicas"
-            },
-            "pullReplicas": {
-              "type": "integer",
-              "description": "Default number of PULL replicas"
-            },
-            "nrtReplicas": {
-              "type": "integer",
-              "description": "Default number of NRT replicas"
-            }
-          }
-        }
-      }
-    }
-  }
-}
diff --git a/solr/solrj/src/resources/apispec/cluster.commandstatus.delete.json b/solr/solrj/src/resources/apispec/cluster.commandstatus.delete.json
deleted file mode 100644
index 5576c42..0000000
--- a/solr/solrj/src/resources/apispec/cluster.commandstatus.delete.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-  "methods": [
-    "DELETE"
-  ],
-  "url": {
-    "paths": [
-      "/cluster/command-status/{id}"
-    ]
-  }
-}
diff --git a/solr/solrj/src/resources/apispec/cluster.commandstatus.json b/solr/solrj/src/resources/apispec/cluster.commandstatus.json
deleted file mode 100644
index a8a402b..0000000
--- a/solr/solrj/src/resources/apispec/cluster.commandstatus.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
-  "methods": [
-    "GET"
-  ],
-  "url": {
-    "paths": [
-      "/cluster/command-status"
-    ],
-    "params": {
-      "flush": {
-        "type": "boolean",
-        "default": false
-      },
-      "id":{
-        "type":"string",
-        "description": "The command id"
-      }
-    }
-  }
-}
diff --git a/solr/solrj/src/resources/apispec/cluster.json b/solr/solrj/src/resources/apispec/cluster.json
deleted file mode 100644
index 028869d..0000000
--- a/solr/solrj/src/resources/apispec/cluster.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
-  "documentation": "https://lucene.apache.org/solr/guide/cluster-node-management.html",
-  "description": "General information about the cluster, including defined collections (with the 'cluster' endpoint), status of the overseer (with the 'cluster/overseer' endpoint), and available nodes (with the 'cluster/nodes' endpoint).",
-  "methods": [
-    "GET"
-  ],
-  "url": {
-    "paths": [
-      "/cluster",
-      "/cluster/overseer",
-      "/cluster/nodes"
-    ]
-  }
-}
diff --git a/solr/solrj/src/test/org/apache/solr/common/util/JsonValidatorTest.java b/solr/solrj/src/test/org/apache/solr/common/util/JsonValidatorTest.java
index 404661a..b4807e3 100644
--- a/solr/solrj/src/test/org/apache/solr/common/util/JsonValidatorTest.java
+++ b/solr/solrj/src/test/org/apache/solr/common/util/JsonValidatorTest.java
@@ -40,7 +40,6 @@
     checkSchema("core.config.Commands");
     checkSchema("core.SchemaEdit");
     checkSchema("cluster.configs.Commands");
-    checkSchema("cluster.Commands");
   }
 
 
@@ -173,13 +172,6 @@
 
   }
 
-  public void testNullObjectValue() {
-    ValidatingJsonMap spec = Utils.getSpec("cluster.Commands").getSpec();
-    JsonSchemaValidator validator = new JsonSchemaValidator((Map) Utils.getObjectByPath(spec, false, "/commands/set-obj-property"));
-    List<String> object = validator.validateJson(Utils.fromJSONString("{collectionDefaults: null}"));
-    assertNull(object);
-  }
-
   private void checkSchema(String name) {
     ValidatingJsonMap spec = Utils.getSpec(name).getSpec();
     @SuppressWarnings({"rawtypes"})
diff --git a/versions.lock b/versions.lock
index a606bd8..e960306 100644
--- a/versions.lock
+++ b/versions.lock
@@ -78,7 +78,7 @@
 org.apache.commons:commons-configuration2:2.1.1 (1 constraints: 0605f935)
 org.apache.commons:commons-csv:1.7 (1 constraints: ac04212c)
 org.apache.commons:commons-exec:1.3 (1 constraints: a8041d2c)
-org.apache.commons:commons-lang3:3.9 (7 constraints: 36678708)
+org.apache.commons:commons-lang3:3.9 (4 constraints: 702e84c7)
 org.apache.commons:commons-math3:3.6.1 (1 constraints: 0c050d36)
 org.apache.commons:commons-text:1.6 (1 constraints: ab04202c)
 org.apache.curator:curator-client:2.13.0 (1 constraints: 3805383b)
@@ -118,10 +118,6 @@
 org.apache.tika:tika-java7:1.24 (1 constraints: db04f730)
 org.apache.tika:tika-parsers:1.24 (1 constraints: db04f730)
 org.apache.tika:tika-xmp:1.24 (1 constraints: db04f730)
-org.apache.velocity:velocity-engine-core:2.0 (3 constraints: 973bcd79)
-org.apache.velocity.tools:velocity-tools-generic:3.0 (1 constraints: 00136415)
-org.apache.velocity.tools:velocity-tools-view:3.0 (1 constraints: 7a14126a)
-org.apache.velocity.tools:velocity-tools-view-jsp:3.0 (1 constraints: a704202c)
 org.apache.xmlbeans:xmlbeans:3.1.0 (1 constraints: 0605fd35)
 org.apache.zookeeper:zookeeper:3.6.1 (1 constraints: 0c050d36)
 org.apache.zookeeper:zookeeper-jute:3.6.1 (1 constraints: 8a0d3828)
@@ -178,7 +174,7 @@
 org.restlet.jee:org.restlet.ext.servlet:2.4.3 (1 constraints: 0b050436)
 org.rrd4j:rrd4j:3.5 (1 constraints: ac04252c)
 org.slf4j:jcl-over-slf4j:1.7.24 (1 constraints: 4005473b)
-org.slf4j:slf4j-api:1.7.24 (18 constraints: 64f415d2)
+org.slf4j:slf4j-api:1.7.24 (15 constraints: a3ba2a7b)
 org.tallison:jmatio:1.5 (1 constraints: aa041f2c)
 org.tukaani:xz:1.8 (1 constraints: ad04222c)
 org.xerial.snappy:snappy-java:1.1.7.6 (1 constraints: 6f05a240)
diff --git a/versions.props b/versions.props
index 39a4f17..fa27d0e 100644
--- a/versions.props
+++ b/versions.props
@@ -24,7 +24,6 @@
 com.sun.jersey:*=1.19
 com.tdunning:t-digest=3.1
 com.vaadin.external.google:android-json=0.0.20131108.vaadin1
-commons-beanutils:commons-beanutils=1.9.3
 commons-cli:commons-cli=1.4
 commons-codec:commons-codec=1.13
 commons-collections:commons-collections=3.2.2
@@ -70,7 +69,6 @@
 org.apache.pdfbox:jempbox=1.8.16
 org.apache.poi:*=4.1.1
 org.apache.tika:*=1.24
-org.apache.velocity.tools:*=3.0
 org.apache.xmlbeans:xmlbeans=3.1.0
 org.apache.zookeeper:*=3.6.1
 org.asciidoctor:asciidoctorj=1.6.2