Bring branch up to date with trunk

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/security@1499601 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/.gitignore b/.gitignore
index 3530f93..faf37a0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
-
+# hdfs
+/solr/example/hdfs
 *.jar
 
 # .
diff --git a/build.xml b/build.xml
index eac26db..7851b7d 100644
--- a/build.xml
+++ b/build.xml
@@ -90,6 +90,7 @@
 	
   <target name="rat-sources" description="Runs rat across all sources and tests">
     <subant target="rat-sources" inheritall="false" failonerror="true">
+      <fileset dir="." includes="extra-targets.xml" /><!-- run rat-sources also for root directory -->
       <fileset dir="lucene" includes="build.xml" />
       <fileset dir="solr" includes="build.xml" />
     </subant>
@@ -130,7 +131,7 @@
   
   <target name="get-maven-poms"
           description="Copy Maven POMs from dev-tools/maven/ to maven-build/">
-    <copy todir="${maven-build-dir}" overwrite="true">
+    <copy todir="${maven-build-dir}" overwrite="true" encoding="UTF-8">
       <fileset dir="${basedir}/dev-tools/maven"/>
       <filterset begintoken="@" endtoken="@">
         <filter token="version" value="${version}"/>
@@ -185,7 +186,12 @@
   </target>
 
   <target name="eclipse" depends="resolve" description="Setup Eclipse configuration">
-    <copy file="dev-tools/eclipse/dot.project" tofile=".project" overwrite="false"/>
+    <basename file="${basedir}" property="eclipseprojectname"/>
+      <copy file="dev-tools/eclipse/dot.project" tofile=".project" overwrite="false" encoding="UTF-8">
+      <filterset>
+        <filter token="ECLIPSEPROJECTNAME" value="${eclipseprojectname}"/>
+      </filterset>
+    </copy>
     <mkdir dir=".settings"/>
     <copy todir=".settings/" overwrite="true">
       <fileset dir="dev-tools/eclipse/dot.settings" includes="*.prefs" />
@@ -199,7 +205,7 @@
     <!-- TODO: find a better way to exclude duplicate JAR files & fix the servlet-api mess! -->
     <pathconvert property="eclipse.fileset.libs" pathsep="|" dirsep="/">
       <fileset dir="${basedir}/lucene" includes="**/lib/*.jar" excludes="**/*servlet-api*.jar, analysis/uima/**, tools/**, build/**"/>
-      <fileset dir="${basedir}/solr" includes="**/lib/*.jar" excludes="core/lib/*servlet-api*.jar, contrib/analysis-extras/**, test-framework/**, build/**, dist/**, package/**" />
+      <fileset dir="${basedir}/solr" includes="**/lib/*.jar" excludes="core/lib/*servlet-api*.jar, contrib/analysis-extras/**, test-framework/lib/junit*, test-framework/lib/ant*, test-framework/lib/randomizedtesting*, build/**, dist/**, package/**" />
       <map from="${basedir}/" to=""/>
     </pathconvert>
     <xslt in="${ant.file}" out=".classpath" style="dev-tools/eclipse/dot.classpath.xsl" force="true">
diff --git a/dev-tools/eclipse/dot.project b/dev-tools/eclipse/dot.project
index e10343d..dcd7970 100644
--- a/dev-tools/eclipse/dot.project
+++ b/dev-tools/eclipse/dot.project
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <projectDescription>
-	<name>lucene_solr_trunk</name>
+	<name>@ECLIPSEPROJECTNAME@</name>
 	<comment></comment>
 	<projects>
 	</projects>
diff --git a/dev-tools/idea/.idea/libraries/Solr_test_framework_library.xml b/dev-tools/idea/.idea/libraries/Solr_test_framework_library.xml
new file mode 100644
index 0000000..bcd4c44
--- /dev/null
+++ b/dev-tools/idea/.idea/libraries/Solr_test_framework_library.xml
@@ -0,0 +1,10 @@
+<component name="libraryTable">
+  <library name="Solr test framework library">
+    <CLASSES>
+      <root url="file://$PROJECT_DIR$/solr/test-framework/lib" />
+    </CLASSES>
+    <JAVADOC />
+    <SOURCES />
+    <jarDirectory url="file://$PROJECT_DIR$/solr/test-framework/lib" recursive="false" />
+  </library>
+</component>
\ No newline at end of file
diff --git a/dev-tools/idea/solr/core/src/test/solr-core-tests.iml b/dev-tools/idea/solr/core/src/test/solr-core-tests.iml
index 6a47d93..0a49215 100644
--- a/dev-tools/idea/solr/core/src/test/solr-core-tests.iml
+++ b/dev-tools/idea/solr/core/src/test/solr-core-tests.iml
@@ -13,6 +13,7 @@
     <orderEntry type="library" scope="TEST" name="Solr core library" level="project" />
     <orderEntry type="library" scope="TEST" name="Solrj library" level="project" />
     <orderEntry type="library" scope="TEST" name="Solr example library" level="project" />
+    <orderEntry type="library" scope="TEST" name="Solr test framework library" level="project" />
     <orderEntry type="module" scope="TEST" module-name="lucene-test-framework" />
     <orderEntry type="module" scope="TEST" module-name="solr-test-framework" />
     <orderEntry type="module" scope="TEST" module-name="solr-core-test-files" />
diff --git a/dev-tools/maven/lucene/analysis/stempel/pom.xml.template b/dev-tools/maven/lucene/analysis/stempel/pom.xml.template
index 31c931d..89c77c6 100644
--- a/dev-tools/maven/lucene/analysis/stempel/pom.xml.template
+++ b/dev-tools/maven/lucene/analysis/stempel/pom.xml.template
@@ -96,6 +96,7 @@
               </bundledSignatures>
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
diff --git a/dev-tools/maven/lucene/benchmark/pom.xml.template b/dev-tools/maven/lucene/benchmark/pom.xml.template
index 0527762..ec3eace 100755
--- a/dev-tools/maven/lucene/benchmark/pom.xml.template
+++ b/dev-tools/maven/lucene/benchmark/pom.xml.template
@@ -142,6 +142,7 @@
               </bundledSignatures>
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
diff --git a/dev-tools/maven/lucene/core/src/java/pom.xml.template b/dev-tools/maven/lucene/core/src/java/pom.xml.template
index 700257f..d1af906 100644
--- a/dev-tools/maven/lucene/core/src/java/pom.xml.template
+++ b/dev-tools/maven/lucene/core/src/java/pom.xml.template
@@ -87,6 +87,7 @@
               </bundledSignatures>
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
diff --git a/dev-tools/maven/lucene/demo/pom.xml.template b/dev-tools/maven/lucene/demo/pom.xml.template
index a6aef59..b46698a 100644
--- a/dev-tools/maven/lucene/demo/pom.xml.template
+++ b/dev-tools/maven/lucene/demo/pom.xml.template
@@ -113,6 +113,7 @@
               </bundledSignatures>
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
diff --git a/dev-tools/maven/lucene/facet/pom.xml.template b/dev-tools/maven/lucene/facet/pom.xml.template
index 391ec96..6fc8872 100755
--- a/dev-tools/maven/lucene/facet/pom.xml.template
+++ b/dev-tools/maven/lucene/facet/pom.xml.template
@@ -98,6 +98,7 @@
               </bundledSignatures>
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
diff --git a/dev-tools/maven/lucene/misc/pom.xml.template b/dev-tools/maven/lucene/misc/pom.xml.template
index 4125ea0..b816a7c 100644
--- a/dev-tools/maven/lucene/misc/pom.xml.template
+++ b/dev-tools/maven/lucene/misc/pom.xml.template
@@ -86,6 +86,7 @@
               </bundledSignatures>
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
diff --git a/dev-tools/maven/lucene/pom.xml.template b/dev-tools/maven/lucene/pom.xml.template
index 7b6618d..cbdbf07 100644
--- a/dev-tools/maven/lucene/pom.xml.template
+++ b/dev-tools/maven/lucene/pom.xml.template
@@ -78,6 +78,7 @@
               </bundledSignatures>
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
@@ -96,6 +97,7 @@
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/tests.txt</signaturesFile>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
diff --git a/dev-tools/maven/lucene/queryparser/pom.xml.template b/dev-tools/maven/lucene/queryparser/pom.xml.template
index d395396..bf91090 100644
--- a/dev-tools/maven/lucene/queryparser/pom.xml.template
+++ b/dev-tools/maven/lucene/queryparser/pom.xml.template
@@ -101,6 +101,7 @@
               </bundledSignatures>
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
diff --git a/dev-tools/maven/lucene/test-framework/pom.xml.template b/dev-tools/maven/lucene/test-framework/pom.xml.template
index 881d194..f31c728 100644
--- a/dev-tools/maven/lucene/test-framework/pom.xml.template
+++ b/dev-tools/maven/lucene/test-framework/pom.xml.template
@@ -106,6 +106,7 @@
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/tests.txt</signaturesFile>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
diff --git a/dev-tools/maven/pom.xml.template b/dev-tools/maven/pom.xml.template
index 57ac6da..8996d73 100644
--- a/dev-tools/maven/pom.xml.template
+++ b/dev-tools/maven/pom.xml.template
@@ -47,10 +47,11 @@
     <jetty.version>8.1.10.v20130312</jetty.version>
     <slf4j.version>1.6.6</slf4j.version>
     <log4j.version>1.2.16</log4j.version>
-    <tika.version>1.3</tika.version>
+    <tika.version>1.4</tika.version>
     <httpcomponents.version>4.2.3</httpcomponents.version>
     <commons-io.version>2.1</commons-io.version>
     <restlet.version>2.1.1</restlet.version>
+    <hadoop.version>2.0.5-alpha</hadoop.version>
 
     <!-- RandomizedTesting library system properties -->
     <tests.iters>1</tests.iters>
@@ -183,6 +184,11 @@
         <version>${commons-io.version}</version>
       </dependency>
       <dependency>
+        <groupId>joda-time</groupId>
+        <artifactId>joda-time</artifactId>
+        <version>2.2</version>
+      </dependency>
+      <dependency>
         <groupId>org.apache.httpcomponents</groupId>
         <artifactId>httpclient</artifactId>
         <version>${httpcomponents.version}</version>
@@ -204,6 +210,16 @@
         <version>0.5</version>
       </dependency>
       <dependency>
+        <groupId>com.googlecode.concurrentlinkedhashmap</groupId>
+        <artifactId>concurrentlinkedhashmap-lru</artifactId>
+        <version>1.2</version>
+      </dependency>
+      <dependency>
+        <groupId>com.sun.jersey</groupId>
+        <artifactId>jersey-core</artifactId>
+        <version>1.16</version>
+      </dependency>
+      <dependency>
         <groupId>commons-lang</groupId>
         <artifactId>commons-lang</artifactId>
         <version>2.6</version>
@@ -281,6 +297,38 @@
         <version>10.9.1.0</version>
       </dependency>
       <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-annotations</artifactId>
+        <version>${hadoop.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-auth</artifactId>
+        <version>${hadoop.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-common</artifactId>
+        <version>${hadoop.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-common</artifactId>
+        <version>${hadoop.version}</version>
+        <classifier>tests</classifier>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdfs</artifactId>
+        <version>${hadoop.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdfs</artifactId>
+        <version>${hadoop.version}</version>
+        <classifier>tests</classifier>
+      </dependency>
+      <dependency>
         <groupId>org.apache.tika</groupId>
         <artifactId>tika-core</artifactId>
         <version>${tika.version}</version>
@@ -307,8 +355,8 @@
             <artifactId>vorbis-java-core</artifactId>
           </exclusion>
           <exclusion>
-            <groupId>asm</groupId>
-            <artifactId>asm</artifactId>
+            <groupId>org.ow2.asm</groupId>
+            <artifactId>asm-debug-all</artifactId>
           </exclusion>
           <exclusion>
             <groupId>org.aspectj</groupId>
@@ -364,7 +412,7 @@
       <dependency>
         <groupId>org.carrot2</groupId>
         <artifactId>morfologik-polish</artifactId>
-        <version>1.5.5</version>
+        <version>1.6.0</version>
       </dependency>
       <dependency>
         <groupId>org.codehaus.woodstox</groupId>
@@ -398,6 +446,16 @@
         <version>${jetty.version}</version>
       </dependency>
       <dependency>
+        <groupId>org.mortbay.jetty</groupId>
+        <artifactId>jetty</artifactId>
+        <version>6.1.26</version>
+      </dependency>
+      <dependency>
+        <groupId>org.mortbay.jetty</groupId>
+        <artifactId>jetty-util</artifactId>
+        <version>6.1.26</version>
+      </dependency>
+      <dependency>
         <groupId>org.restlet.jee</groupId>
         <artifactId>org.restlet</artifactId>
         <version>${restlet.version}</version>
@@ -510,7 +568,7 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-compiler-plugin</artifactId>
-          <version>3.0</version>
+          <version>3.1</version>
           <configuration>
             <source>${java.compat.version}</source>
             <target>${java.compat.version}</target>
@@ -519,7 +577,7 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-dependency-plugin</artifactId>
-          <version>2.6</version>
+          <version>2.8</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
@@ -529,7 +587,7 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-enforcer-plugin</artifactId>
-          <version>1.2</version>
+          <version>1.3</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
@@ -565,7 +623,7 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-javadoc-plugin</artifactId>
-          <version>2.9</version>
+          <version>2.9.1</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
@@ -574,8 +632,13 @@
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
+          <artifactId>maven-site-plugin</artifactId>
+          <version>3.3</version>
+        </plugin>
+        <plugin>
+          <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-surefire-plugin</artifactId>
-          <version>2.13</version>
+          <version>2.15</version>
           <configuration>
             <runOrder>random</runOrder>
             <reportFormat>plain</reportFormat>
@@ -640,7 +703,7 @@
         <plugin>
           <groupId>org.codehaus.mojo</groupId>
           <artifactId>build-helper-maven-plugin</artifactId>
-          <version>1.7</version>
+          <version>1.8</version>
         </plugin>
         <plugin>
           <groupId>org.codehaus.mojo</groupId>
@@ -739,7 +802,7 @@
       <plugin>
         <groupId>org.apache.felix</groupId>
         <artifactId>maven-bundle-plugin</artifactId>
-        <version>2.3.7</version>
+        <version>2.4.0</version>
         <configuration>
           <instructions>
             <Export-Package>*;-split-package:=merge-first</Export-Package>
diff --git a/dev-tools/maven/solr/core/src/java/pom.xml.template b/dev-tools/maven/solr/core/src/java/pom.xml.template
index 0bb719b..dee48c7 100644
--- a/dev-tools/maven/solr/core/src/java/pom.xml.template
+++ b/dev-tools/maven/solr/core/src/java/pom.xml.template
@@ -125,6 +125,10 @@
       <version>${project.version}</version>
     </dependency>
     <dependency>
+      <groupId>com.googlecode.concurrentlinkedhashmap</groupId>
+      <artifactId>concurrentlinkedhashmap-lru</artifactId>
+    </dependency>
+    <dependency>
       <groupId>commons-codec</groupId>
       <artifactId>commons-codec</artifactId>
     </dependency>
@@ -137,6 +141,146 @@
       <artifactId>commons-fileupload</artifactId>
     </dependency>
     <dependency>
+      <groupId>joda-time</groupId>
+      <artifactId>joda-time</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-auth</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.commons</groupId>
+          <artifactId>commons-math</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>xmlenc</groupId>
+          <artifactId>xmlenc</artifactId>                                  
+        </exclusion>                                                          
+        <exclusion>
+          <groupId>commons-httpclient</groupId>
+          <artifactId>commons-httpclient</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-net</groupId>
+          <artifactId>commons-net</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-json</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-server</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-compiler</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-el</groupId>
+          <artifactId>commons-el</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>net.java.dev.jets3t</groupId>
+          <artifactId>jets3t</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-digester</groupId>
+          <artifactId>commons-digester</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-beanutils</groupId>
+          <artifactId>commons-beanutils-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.codehaus.jackson</groupId>
+          <artifactId>jackson-core-asl</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.codehaus.jackson</groupId>
+          <artifactId>jackson-mapper-asl</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.avro</groupId>
+          <artifactId>avro</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>net.sf.kosmosfs</groupId>
+          <artifactId>kfs</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.jcraft</groupId>
+          <artifactId>jsch</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-server</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.codehaus.jackson</groupId>
+          <artifactId>jackson-core-asl</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.codehaus.jackson</groupId>
+          <artifactId>jackson-mapper-asl</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>xmlenc</groupId>
+          <artifactId>xmlenc</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-daemon</groupId>
+          <artifactId>commons-daemon</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
       <groupId>org.restlet.jee</groupId>
       <artifactId>org.restlet</artifactId>
       <version>${restlet.version}</version>
@@ -249,6 +393,7 @@
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/servlet-api.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
diff --git a/dev-tools/maven/solr/core/src/test/pom.xml.template b/dev-tools/maven/solr/core/src/test/pom.xml.template
index a9647db..5e09174 100644
--- a/dev-tools/maven/solr/core/src/test/pom.xml.template
+++ b/dev-tools/maven/solr/core/src/test/pom.xml.template
@@ -138,6 +138,7 @@
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/servlet-api.txt</signaturesFile>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
diff --git a/dev-tools/maven/solr/pom.xml.template b/dev-tools/maven/solr/pom.xml.template
index a44e7f1..51bbab0 100644
--- a/dev-tools/maven/solr/pom.xml.template
+++ b/dev-tools/maven/solr/pom.xml.template
@@ -34,7 +34,6 @@
   <modules>
     <module>core</module>
     <module>solrj</module>
-    <module>webapp</module>
     <module>test-framework</module>
     <module>contrib</module>
   </modules>
@@ -149,6 +148,7 @@
               </bundledSignatures>
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
@@ -169,6 +169,7 @@
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/tests.txt</signaturesFile>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
diff --git a/dev-tools/maven/solr/solrj/src/java/pom.xml.template b/dev-tools/maven/solr/solrj/src/java/pom.xml.template
index 8405328..5992d60 100644
--- a/dev-tools/maven/solr/solrj/src/java/pom.xml.template
+++ b/dev-tools/maven/solr/solrj/src/java/pom.xml.template
@@ -155,6 +155,7 @@
               </bundledSignatures>
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
                 <!-- Solr-J does NOT depend on servlet-api -->
                 <!-- <signaturesFile>${top-level}/lucene/tools/forbiddenApis/servlet-api.txt</signaturesFile> -->
               </signaturesFiles>
diff --git a/dev-tools/maven/solr/solrj/src/test/pom.xml.template b/dev-tools/maven/solr/solrj/src/test/pom.xml.template
index 2b8d652..f0d0ebf 100644
--- a/dev-tools/maven/solr/solrj/src/test/pom.xml.template
+++ b/dev-tools/maven/solr/solrj/src/test/pom.xml.template
@@ -142,6 +142,7 @@
               <signaturesFiles>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/tests.txt</signaturesFile>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
diff --git a/dev-tools/maven/solr/test-framework/pom.xml.template b/dev-tools/maven/solr/test-framework/pom.xml.template
index eb9bc00..5e3b537 100644
--- a/dev-tools/maven/solr/test-framework/pom.xml.template
+++ b/dev-tools/maven/solr/test-framework/pom.xml.template
@@ -65,6 +65,128 @@
       <artifactId>junit</artifactId>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <classifier>tests</classifier>
+      <exclusions>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.commons</groupId>
+          <artifactId>commons-math</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>xmlenc</groupId>
+          <artifactId>xmlenc</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-httpclient</groupId>
+          <artifactId>commons-httpclient</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-net</groupId>
+          <artifactId>commons-net</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-json</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-server</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-compiler</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-el</groupId>
+          <artifactId>commons-el</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>net.java.dev.jets3t</groupId>
+          <artifactId>jets3t</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-digester</groupId>
+          <artifactId>commons-digester</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.codehaus.jackson</groupId>
+          <artifactId>jackson-core-asl</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.codehaus.jackson</groupId>
+          <artifactId>jackson-mapper-asl</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.avro</groupId>
+          <artifactId>avro</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>net.sf.kosmosfs</groupId>
+          <artifactId>kfs</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.jcraft</groupId>
+          <artifactId>jsch</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-beanutils</groupId>
+          <artifactId>commons-beanutils-core</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <classifier>tests</classifier>
+      <exclusions>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-server</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.codehaus.jackson</groupId>
+          <artifactId>jackson-core-asl</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.codehaus.jackson</groupId>
+          <artifactId>jackson-mapper-asl</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>tomcat</groupId>
+          <artifactId>jasper-runtime</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>xmlenc</groupId>
+          <artifactId>xmlenc</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-daemon</groupId>
+          <artifactId>commons-daemon</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
       <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-servlet</artifactId>
     </dependency>
@@ -72,6 +194,23 @@
       <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-util</artifactId>
     </dependency>
+
+    <!-- Jetty 6 required for Hadoop DfsMiniCluster -->
+    <dependency>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jetty</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>servlet-api</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jetty-util</artifactId>
+    </dependency>
+
     <!-- If your tests don't use BaseDistributedSearchTestCase or SolrJettyTestBase,
          you can exclude the two Jetty dependencies below. -->
     <dependency>
@@ -130,6 +269,7 @@
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/servlet-api.txt</signaturesFile>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/tests.txt</signaturesFile>
                 <signaturesFile>${top-level}/lucene/tools/forbiddenApis/executors.txt</signaturesFile>
+                <signaturesFile>${top-level}/lucene/tools/forbiddenApis/chars.txt</signaturesFile>
               </signaturesFiles>
             </configuration>
             <goals>
diff --git a/dev-tools/maven/solr/webapp/pom.xml.template b/dev-tools/maven/solr/webapp/pom.xml.template
deleted file mode 100644
index e86a54a..0000000
--- a/dev-tools/maven/solr/webapp/pom.xml.template
+++ /dev/null
@@ -1,124 +0,0 @@
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-  <!--
-    Licensed to the Apache Software Foundation (ASF) under one
-    or more contributor license agreements.  See the NOTICE file
-    distributed with this work for additional information
-    regarding copyright ownership.  The ASF licenses this file
-    to you under the Apache License, Version 2.0 (the
-    "License"); you may not use this file except in compliance
-    with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing,
-    software distributed under the License is distributed on an
-    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-    KIND, either express or implied.  See the License for the
-    specific language governing permissions and limitations
-    under the License.
-  -->
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.solr</groupId>
-    <artifactId>solr-parent</artifactId>
-    <version>@version@</version>
-    <relativePath>../pom.xml</relativePath>
-  </parent>
-  <groupId>org.apache.solr</groupId>
-  <artifactId>solr</artifactId>
-  <packaging>war</packaging>
-  <name>Apache Solr Search Server</name>
-  <description>Apache Solr Search Server</description>
-  <properties>
-    <module-directory>solr/webapp</module-directory>
-    <relative-top-level>../../..</relative-top-level>
-    <module-path>${relative-top-level}/${module-directory}</module-path>
-  </properties>
-  <scm>
-    <connection>scm:svn:${vc-anonymous-base-url}/${module-directory}</connection>
-    <developerConnection>scm:svn:${vc-dev-base-url}/${module-directory}</developerConnection>
-    <url>${vc-browse-base-url}/${module-directory}</url>
-  </scm>
-  <dependencies>
-    <dependency>
-      <groupId>${project.groupId}</groupId>
-      <artifactId>solr-core</artifactId>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>${project.groupId}</groupId>
-      <artifactId>solr-solrj</artifactId>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.eclipse.jetty.orbit</groupId>
-      <artifactId>javax.servlet</artifactId>
-      <scope>provided</scope>
-    </dependency>
-  </dependencies>
-  <build>
-    <directory>${build-directory}</directory>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-javadoc-plugin</artifactId>
-        <configuration>
-          <skip>true</skip> <!-- There are no public or protected classes -->
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-source-plugin</artifactId>
-        <configuration>
-          <!-- There are no sources for the Solr WAR, but    -->
-          <!-- the maven-source-plugin has no "skip" option. -->
-          <!-- Setting attach=false prevents the built jar   -->
-          <!-- from being installed or deployed.             -->
-          <attach>false</attach>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-war-plugin</artifactId>
-        <configuration>
-          <warSourceDirectory>${module-path}/web</warSourceDirectory>
-          <webXml>${module-path}/web/WEB-INF/web.xml</webXml>
-        </configuration>
-      </plugin>
-      <plugin>
-        <!-- http://wiki.eclipse.org/Jetty/Feature/Jetty_Maven_Plugin -->
-        <groupId>org.mortbay.jetty</groupId>
-        <artifactId>jetty-maven-plugin</artifactId>
-        <configuration>
-          <scanIntervalSeconds>10</scanIntervalSeconds>
-          <webAppConfig>
-            <contextPath>/solr</contextPath>
-          </webAppConfig>
-          <webAppSourceDirectory>${module-path}/web</webAppSourceDirectory>
-          <systemProperties>
-            <systemProperty>
-              <name>solr.solr.home</name>
-              <value>${top-level}/solr/example/solr</value>
-            </systemProperty>
-          </systemProperties>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>de.thetaphi</groupId>
-        <artifactId>forbiddenapis</artifactId>
-        <executions>
-          <execution>
-            <id>solr-shared-check-forbidden-apis</id>
-            <phase>none</phase> <!-- Block inherited execution -->
-          </execution>
-          <execution>
-            <id>solr-shared-test-check-forbidden-apis</id>
-            <phase>none</phase> <!-- Block inherited execution -->
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/dev-tools/scripts/buildAndPushRelease.py b/dev-tools/scripts/buildAndPushRelease.py
index 82b046d..af4d7b9 100644
--- a/dev-tools/scripts/buildAndPushRelease.py
+++ b/dev-tools/scripts/buildAndPushRelease.py
@@ -46,9 +46,10 @@
     raise RuntimeError(msg)
 
 def runAndSendGPGPassword(command, password):
-  p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE)
+  p = subprocess.Popen(command, shell=True, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE)
   f = open(LOG, 'ab')
   while True:
+    p.stdout.flush()
     line = p.stdout.readline()
     if len(line) == 0:
       break
diff --git a/dev-tools/scripts/smokeTestRelease.py b/dev-tools/scripts/smokeTestRelease.py
index f1ce9c8..b47400c 100644
--- a/dev-tools/scripts/smokeTestRelease.py
+++ b/dev-tools/scripts/smokeTestRelease.py
@@ -970,10 +970,6 @@
     print('    unpack %s...' % distribution)
     unpackLogFile = '%s/unpack-%s-maven-checks.log' % (tmpDir, distribution)
     run('tar xzf %s/%s' % (tmpDir, distribution), unpackLogFile)
-    if project == 'solr': # unpack the Solr war
-      unpackLogFile = '%s/unpack-solr-war-maven-checks.log' % tmpDir
-      print('        unpack Solr war...')
-      run('jar xvf */dist/*.war', unpackLogFile)
     distributionFiles[project] = []
     for root, dirs, files in os.walk(destDir):
       distributionFiles[project].extend([os.path.join(root, file) for file in files])
@@ -1309,7 +1305,7 @@
 
   if len(sys.argv) < 5:
     print()
-    print('Usage python -u %s BaseURL SvnRevision version tmpDir [ isSigned ] [ -testArgs "-Dwhat=ever [ ... ]" ]'
+    print('Usage python -u %s BaseURL SvnRevision version tmpDir [ isSigned(True|False) ] [ -testArgs "-Dwhat=ever [ ... ]" ]'
           % sys.argv[0])
     print()
     print('  example: python3.2 -u dev-tools/scripts/smokeTestRelease.py http://people.apache.org/~whoever/staging_area/lucene-solr-4.3.0-RC1-rev1469340 1469340 4.3.0 /path/to/a/tmp/dir')
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index e90e567..1e7c11c 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -23,11 +23,19 @@
   not positioned. This change affects all classes that inherit from
   DocIdSetIterator, including DocsEnum and DocsAndPositionsEnum. (Adrien Grand)
 
+* LUCENE-5089: Update to Morfologik 1.6.0. MorfologikAnalyzer and MorfologikFilter 
+  no longer support multiple "dictionaries" as there is only one dictionary available.
+  (Dawid Weiss)
+
 New Features
 
 * LUCENE-4747: Move to Java 7 as minimum Java version.
   (Robert Muir, Uwe Schindler)
 
+* LUCENE-5089: Update to Morfologik 1.6.0. MorfologikAnalyzer and MorfologikFilter 
+  no longer support multiple "dictionaries" as there is only one dictionary available.
+  (Dawid Weiss)
+
 Optimizations
 
 * LUCENE-4848: Use Java 7 NIO2-FileChannel instead of RandomAccessFile
@@ -39,6 +47,9 @@
 
 Changes in backwards compatibility policy
 
+* LUCENE-5085: MorfologikFilter will no longer stem words marked as keywords
+  (Dawid Weiss, Grzegorz Sobczyk)
+
 * LUCENE-4955: NGramTokenFilter now emits all n-grams for the same token at the
   same position and preserves the position length and the offsets of the
   original token. (Simon Willnauer, Adrien Grand)
@@ -47,6 +58,10 @@
   (a, ab, b, bc, c) instead of (a, b, c, ab, bc) and doesn't trim trailing
   whitespaces. (Adrien Grand)
 
+* LUCENE-5042: The n-gram and edge n-gram tokenizers and filters now correctly
+  handle supplementary characters, and the tokenizers have the ability to
+  pre-tokenize the input stream similarly to CharTokenizer. (Adrien Grand)
+
 * LUCENE-4967: NRTManager is replaced by
   ControlledRealTimeReopenThread, for controlling which requests must
   see which indexing changes, so that it can work with any
@@ -90,8 +105,31 @@
   categories. You should set TakmiSampleFixer on SamplingParams if required (but 
   notice that this means slower search). (Rob Audenaerde, Gilad Barkai, Shai Erera)
 
+* LUCENE-4933: Replace ExactSimScorer/SloppySimScorer with just SimScorer. Previously
+  there were 2 implementations as a performance hack to support tableization of
+  sqrt(), but this caching is removed, as sqrt is implemented in hardware with modern 
+  jvms and its faster not to cache.  (Robert Muir)
+
+* LUCENE-5038: MergePolicy now has a default implementation for useCompoundFile based
+  on segment size and noCFSRatio. The default implemantion was pulled up from
+  TieredMergePolicy. (Simon Willnauer)
+
+* LUCENE-5063: FieldCache.get(Bytes|Shorts), SortField.Type.(BYTE|SHORT) and
+  FieldCache.DEFAULT_(BYTE|SHORT|INT|LONG|FLOAT|DOUBLE)_PARSER are now
+  deprecated. These methods/types assume that data is stored as strings although
+  Lucene has much better support for numeric data through (Int|Long)Field,
+  NumericRangeQuery and FieldCache.get(Int|Long)s. (Adrien Grand)
+
+* LUCENE-5078: TfIDFSimilarity lets you encode the norm value as any arbitrary long.
+  As a result, encode/decodeNormValue were made abstract with their signatures changed.
+  The default implementation was moved to DefaultSimilarity, which encodes the norm as
+  a single-byte value. (Shai Erera)
+
 Bug Fixes
 
+* LUCENE-4890: QueryTreeBuilder.getBuilder() only finds interfaces on the 
+  most derived class. (Adriano Crestani)
+
 * LUCENE-4997: Internal test framework's tests are sensitive to previous 
   test failures and tests.failfast. (Dawid Weiss, Shai Erera)
 
@@ -123,6 +161,48 @@
   some readers did not have the requested numeric DV field.
   (Rob Audenaerde, Shai Erera)
 
+* LUCENE-5028: Remove pointless and confusing doShare option in FST's
+  PositiveIntOutputs (Han Jiang via Mike McCandless)
+
+* LUCENE-5032: Fix IndexOutOfBoundsExc in PostingsHighlighter when
+  multi-valued fields exceed maxLength (Tomás Fernández Löbbe
+  via Mike McCandless)
+
+* LUCENE-4933: SweetSpotSimilarity didn't apply its tf function to some
+  queries (SloppyPhraseQuery, SpanQueries).  (Robert Muir)
+
+* LUCENE-5033: SlowFuzzyQuery was accepting too many terms (documents) when
+  provided minSimilarity is an int > 1 (Tim Allison via Mike McCandless)
+
+* LUCENE-5045: DrillSideways.search did not work on an empty index. (Shai Erera)
+
+* LUCENE-4995: CompressingStoredFieldsReader now only reuses an internal buffer
+  when there is no more than 32kb to decompress. This prevents from running
+  into out-of-memory errors when working with large stored fields.
+  (Adrien Grand)
+
+* LUCENE-5048: CategoryPath with a long path could result in hitting 
+  NegativeArraySizeException, categories being added multiple times to the 
+  taxonomy or drill-down terms silently discarded by the indexer. CategoryPath 
+  is now limited to MAX_CATEGORY_PATH_LENGTH characters.
+  (Colton Jamieson, Mike McCandless, Shai Erera)
+
+* LUCENE-5062: If the spatial data for a document was comprised of multiple
+  overlapping or adjacent parts then a CONTAINS predicate query might not match
+  when the sum of those shapes contain the query shape but none do individually.
+  A flag was added to use the original faster algorithm. (David Smiley)
+
+* LUCENE-4971: Fixed NPE in AnalyzingSuggester when there are too many
+  graph expansions.  (Alexey Kudinov via Mike McCandless)
+
+* LUCENE-5080: Combined setMaxMergeCount and setMaxThreadCount into one
+  setter in ConcurrentMergePolicy: setMaxMergesAndThreads.  Previously these
+  setters would not work unless you invoked them very carefully.
+  (Robert Muir, Shai Erera)
+  
+* LUCENE-5068: QueryParserUtil.escape() does not escape forward slash.
+  (Matias Holte via Steve Rowe)
+
 Optimizations
 
 * LUCENE-4936: Improve numeric doc values compression in case all values share
@@ -137,8 +217,23 @@
   single snapshots_N file, and no longer requires closing (Mike
   McCandless, Shai Erera)
 
+* LUCENE-5035: Compress addresses in FieldCacheImpl.SortedDocValuesImpl more
+  efficiently. (Adrien Grand, Robert Muir)
+
+* LUCENE-4941: Sort "from" terms only once when using JoinUtil.
+  (Martijn van Groningen)
+
+* LUCENE-5050: Close the stored fields and term vectors index files as soon as
+  the index has been loaded into memory to save file descriptors. (Adrien Grand)
+
 New Features
 
+* LUCENE-5085: MorfologikFilter will no longer stem words marked as keywords
+  (Dawid Weiss, Grzegorz Sobczyk)
+
+* LUCENE-5064: Added PagedMutable (internal), a paged extension of
+  PackedInts.Mutable which allows for storing more than 2B values. (Adrien Grand)
+
 * LUCENE-4766: Added a PatternCaptureGroupTokenFilter that uses Java regexes to 
   emit multiple tokens one for each capture group in one or more patterns.
   (Simon Willnauer, Clinton Gormley)
@@ -169,6 +264,37 @@
 * LUCENE-5022: Added FacetResult.mergeHierarchies to merge multiple
   FacetResult of the same dimension into a single one with the reconstructed
   hierarchy. (Shai Erera)
+
+* LUCENE-5026: Added PagedGrowableWriter, a new internal packed-ints structure
+  that grows the number of bits per value on demand, can store more than 2B
+  values and supports random write and read access. (Adrien Grand)
+
+* LUCENE-5025: FST's Builder can now handle more than 2.1 billion
+  "tail nodes" while building a minimal FST.  (Aaron Binns, Adrien
+  Grand, Mike McCandless)
+
+* LUCENE-5063: FieldCache.DEFAULT.get(Ints|Longs) now uses bit-packing to save
+  memory. (Adrien Grand)
+
+* LUCENE-5079: IndexWriter.hasUncommittedChanges() returns true if there are
+  changes that have not been committed. (yonik, Mike McCandless, Uwe Schindler)
+
+* SOLR-4565: Extend NorwegianLightStemFilter and NorwegianMinimalStemFilter 
+  to handle "nynorsk" (Erlend Garåsen, janhoy via Robert Muir)
+
+* LUCENE-5087: Add getMultiValuedSeparator to PostingsHighlighter, for cases
+  where you want a different logical separator between field values. This can
+  be set to e.g. U+2029 PARAGRAPH SEPARATOR if you never want passes to span
+  values. (Mike McCandless, Robert Muir)
+
+* LUCENE-5013: Added ScandinavianFoldingFilterFactory and
+  ScandinavianNormalizationFilterFactory (Karl Wettin via janhoy)
+
+API Changes
+
+* LUCENE-5077: Make it easier to use compressed norms. Lucene42NormsFormat takes
+  an overhead parameter, so you can easily pass a different value other than
+  PackedInts.FASTEST from your own codec.  (Robert Muir)
   
 Build
 
@@ -176,12 +302,31 @@
   Test framework may fail internally due to overly aggresive J9 optimizations. 
   (Dawid Weiss, Shai Erera)
 
+* LUCENE-5043: The eclipse target now uses the containing directory for the
+  project name.  This also enforces UTF-8 encoding when files are copied with
+  filtering.
+
+* LUCENE-5055: "rat-sources" target now checks also build.xml, ivy.xml,
+  forbidden-api signatures, and parts of resources folders.  (Ryan Ernst,
+  Uwe Schindler)
+
+* LUCENE-5072: Automatically patch javadocs generated by JDK versions
+  before 7u25 to work around the frame injection vulnerability (CVE-2013-1571,
+  VU#225657).  (Uwe Schindler)
+
 Tests
 
 * LUCENE-4901: TestIndexWriterOnJRECrash should work on any 
   JRE vendor via Runtime.halt().
   (Mike McCandless, Robert Muir, Uwe Schindler, Rodrigo Trujillo, Dawid Weiss)
 
+Changes in runtime behavior
+
+* LUCENE-5038: New segments written by IndexWriter are now wrapped into CFS
+  by default. DocumentsWriterPerThread doesn't consult MergePolicy anymore 
+  to decide if a CFS must be written, instead IndexWriterConfig now has a
+  property to enable / disable CFS for newly created segments. (Simon Willnauer)
+
 ======================= Lucene 4.3.1 =======================
 
 Bug Fixes
diff --git a/lucene/analysis/common/build.xml b/lucene/analysis/common/build.xml
index d9f4889..75de0e7 100644
--- a/lucene/analysis/common/build.xml
+++ b/lucene/analysis/common/build.xml
@@ -25,6 +25,7 @@
 
   <!-- some files for testing that do not have license headers -->
   <property name="rat.excludes" value="**/*.aff,**/*.dic,**/*.txt,**/charfilter/*.htm*,**/*LuceneResourcesWikiPage.html"/>
+  <property name="rat.additional-includes" value="src/tools/**"/>
 
   <import file="../analysis-module-build.xml"/>
 	
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekLowerCaseFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekLowerCaseFilter.java
index 01c537b..ba0a20a 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekLowerCaseFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekLowerCaseFilter.java
@@ -57,7 +57,7 @@
       int chLen = termAtt.length();
       for (int i = 0; i < chLen;) {
         i += Character.toChars(
-            lowerCase(charUtils.codePointAt(chArray, i)), chArray, i);
+            lowerCase(charUtils.codePointAt(chArray, i, chLen)), chArray, i);
        }
       return true;
     } else {
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellDictionary.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellDictionary.java
index 7abf1b0..ccb53f5 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellDictionary.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellDictionary.java
@@ -378,17 +378,14 @@
         wordForm = new HunspellWord(flagParsingStrategy.parseFlags(flagPart));
         Arrays.sort(wordForm.getFlags());
         entry = line.substring(0, flagSep);
-        if(ignoreCase) {
-          entry = entry.toLowerCase(Locale.ROOT);
-        }
       }
-      
-      List<HunspellWord> entries = words.get(entry);
-      if (entries == null) {
-        entries = new ArrayList<HunspellWord>();
-        words.put(entry, entries);
+      if(ignoreCase) {
+        entry = entry.toLowerCase(Locale.ROOT);
       }
+
+      List<HunspellWord> entries = new ArrayList<HunspellWord>();
       entries.add(wordForm);
+      words.put(entry, entries);
     }
   }
 
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.java
new file mode 100644
index 0000000..05a5f5a
--- /dev/null
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.java
@@ -0,0 +1,138 @@
+package org.apache.lucene.analysis.miscellaneous;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.util.StemmerUtil;
+
+import java.io.IOException;
+
+/**
+ * This filter folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o.
+ * It also discriminate against use of double vowels aa, ae, ao, oe and oo, leaving just the first one.
+ * <p/>
+ * It's is a semantically more destructive solution than {@link ScandinavianNormalizationFilter} but
+ * can in addition help with matching raksmorgas as räksmörgås.
+ * <p/>
+ * blåbærsyltetøj == blåbärsyltetöj == blaabaarsyltetoej == blabarsyltetoj
+ * räksmörgås == ræksmørgås == ræksmörgaos == raeksmoergaas == raksmorgas
+ * <p/>
+ * Background:
+ * Swedish åäö are in fact the same letters as Norwegian and Danish åæø and thus interchangeable
+ * when used between these languages. They are however folded differently when people type
+ * them on a keyboard lacking these characters.
+ * <p/>
+ * In that situation almost all Swedish people use a, a, o instead of å, ä, ö.
+ * <p/>
+ * Norwegians and Danes on the other hand usually type aa, ae and oe instead of å, æ and ø.
+ * Some do however use a, a, o, oo, ao and sometimes permutations of everything above.
+ * <p/>
+ * This filter solves that mismatch problem, but might also cause new.
+ * <p/>
+ * @see ScandinavianNormalizationFilter
+ */
+public final class ScandinavianFoldingFilter extends TokenFilter {
+
+  public ScandinavianFoldingFilter(TokenStream input) {
+    super(input);
+  }
+
+  private final CharTermAttribute charTermAttribute = addAttribute(CharTermAttribute.class);
+
+  private static final char AA = '\u00C5'; // Å
+  private static final char aa = '\u00E5'; // å
+  private static final char AE = '\u00C6'; // Æ
+  private static final char ae = '\u00E6'; // æ
+  private static final char AE_se = '\u00C4'; // Ä
+  private static final char ae_se = '\u00E4'; // ä
+  private static final char OE = '\u00D8'; // Ø
+  private static final char oe = '\u00F8'; // ø
+  private static final char OE_se = '\u00D6'; // Ö
+  private static final char oe_se = '\u00F6'; //ö
+
+
+  @Override
+  public boolean incrementToken() throws IOException {
+    if (!input.incrementToken()) {
+      return false;
+    }
+
+    char[] buffer = charTermAttribute.buffer();
+    int length = charTermAttribute.length();
+
+
+    int i;
+    for (i = 0; i < length; i++) {
+
+      if (buffer[i] == aa
+          || buffer[i] == ae_se
+          || buffer[i] == ae) {
+
+        buffer[i] = 'a';
+
+      } else if (buffer[i] == AA
+          || buffer[i] == AE_se
+          || buffer[i] == AE) {
+
+        buffer[i] = 'A';
+
+      } else if (buffer[i] == oe
+          || buffer[i] == oe_se) {
+
+        buffer[i] = 'o';
+
+      } else if (buffer[i] == OE
+          || buffer[i] == OE_se) {
+
+        buffer[i] = 'O';
+
+      } else if (length - 1 > i) {
+
+        if ((buffer[i] == 'a' || buffer[i] == 'A')
+            && (buffer[i + 1] == 'a'
+            || buffer[i + 1] == 'A'
+            || buffer[i + 1] == 'e'
+            || buffer[i + 1] == 'E'
+            || buffer[i + 1] == 'o'
+            || buffer[i + 1] == 'O')
+            ) {
+
+          length = StemmerUtil.delete(buffer, i + 1, length);
+
+        } else if ((buffer[i] == 'o' || buffer[i] == 'O')
+            && (buffer[i + 1] == 'e'
+            || buffer[i + 1] == 'E'
+            || buffer[i + 1] == 'o'
+            || buffer[i + 1] == 'O')
+            ) {
+
+          length = StemmerUtil.delete(buffer, i + 1, length);
+
+        }
+      }
+    }
+
+    charTermAttribute.setLength(length);
+
+
+    return true;
+  }
+
+}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilterFactory.java
new file mode 100644
index 0000000..ffde246
--- /dev/null
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilterFactory.java
@@ -0,0 +1,48 @@
+package org.apache.lucene.analysis.miscellaneous;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+
+import java.util.Map;
+
+/**
+ * Factory for {@link ScandinavianFoldingFilter}.
+ * <pre class="prettyprint">
+ * &lt;fieldType name="text_scandfold" class="solr.TextField" positionIncrementGap="100"&gt;
+ *   &lt;analyzer&gt;
+ *     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
+ *     &lt;filter class="solr.ScandinavianFoldingFilterFactory"/&gt;
+ *   &lt;/analyzer&gt;
+ * &lt;/fieldType&gt;</pre>
+ */
+public class ScandinavianFoldingFilterFactory extends TokenFilterFactory {
+
+  public ScandinavianFoldingFilterFactory(Map<String,String> args) {
+    super(args);
+    if (!args.isEmpty()) {
+      throw new IllegalArgumentException("Unknown parameters: " + args);
+    }
+  }
+
+  @Override
+  public ScandinavianFoldingFilter create(TokenStream input) {
+    return new ScandinavianFoldingFilter(input);
+  }
+}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.java
new file mode 100644
index 0000000..3da0034
--- /dev/null
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.java
@@ -0,0 +1,122 @@
+package org.apache.lucene.analysis.miscellaneous;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.util.StemmerUtil;
+
+import java.io.IOException;
+
+/**
+ * This filter normalize use of the interchangeable Scandinavian characters æÆäÄöÖøØ
+ * and folded variants (aa, ao, ae, oe and oo) by transforming them to åÅæÆøØ.
+ * <p/>
+ * It's a semantically less destructive solution than {@link ScandinavianFoldingFilter},
+ * most useful when a person with a Norwegian or Danish keyboard queries a Swedish index
+ * and vice versa. This filter does <b>not</b>  the common Swedish folds of å and ä to a nor ö to o.
+ * <p/>
+ * blåbærsyltetøj == blåbärsyltetöj == blaabaarsyltetoej but not blabarsyltetoj
+ * räksmörgås == ræksmørgås == ræksmörgaos == raeksmoergaas but not raksmorgas
+ * <p/>
+ * @see ScandinavianFoldingFilter
+ */
+public final class ScandinavianNormalizationFilter extends TokenFilter {
+
+  public ScandinavianNormalizationFilter(TokenStream input) {
+    super(input);
+  }
+
+  private final CharTermAttribute charTermAttribute = addAttribute(CharTermAttribute.class);
+
+  private static final char AA = '\u00C5'; // Å
+  private static final char aa = '\u00E5'; // å
+  private static final char AE = '\u00C6'; // Æ
+  private static final char ae = '\u00E6'; // æ
+  private static final char AE_se = '\u00C4'; // Ä
+  private static final char ae_se = '\u00E4'; // ä
+  private static final char OE = '\u00D8'; // Ø
+  private static final char oe = '\u00F8'; // ø
+  private static final char OE_se = '\u00D6'; // Ö
+  private static final char oe_se = '\u00F6'; //ö
+
+
+  @Override
+  public boolean incrementToken() throws IOException {
+    if (!input.incrementToken()) {
+      return false;
+    }
+
+    char[] buffer = charTermAttribute.buffer();
+    int length = charTermAttribute.length();
+
+
+    int i;
+    for (i = 0; i < length; i++) {
+
+      if (buffer[i] == ae_se) {
+        buffer[i] = ae;
+
+      } else if (buffer[i] == AE_se) {
+        buffer[i] = AE;
+
+      } else if (buffer[i] == oe_se) {
+        buffer[i] = oe;
+
+      } else if (buffer[i] == OE_se) {
+        buffer[i] = OE;
+
+      } else if (length - 1 > i) {
+
+        if (buffer[i] == 'a' && (buffer[i + 1] == 'a' || buffer[i + 1] == 'o' || buffer[i + 1] == 'A' || buffer[i + 1] == 'O')) {
+          length = StemmerUtil.delete(buffer, i + 1, length);
+          buffer[i] = aa;
+
+        } else if (buffer[i] == 'A' && (buffer[i + 1] == 'a' || buffer[i + 1] == 'A' || buffer[i + 1] == 'o' || buffer[i + 1] == 'O')) {
+          length = StemmerUtil.delete(buffer, i + 1, length);
+          buffer[i] = AA;
+
+        } else if (buffer[i] == 'a' && (buffer[i + 1] == 'e' || buffer[i + 1] == 'E')) {
+          length = StemmerUtil.delete(buffer, i + 1, length);
+          buffer[i] = ae;
+
+        } else if (buffer[i] == 'A' && (buffer[i + 1] == 'e' || buffer[i + 1] == 'E')) {
+          length = StemmerUtil.delete(buffer, i + 1, length);
+          buffer[i] = AE;
+
+        } else if (buffer[i] == 'o' && (buffer[i + 1] == 'e' || buffer[i + 1] == 'E' || buffer[i + 1] == 'o' || buffer[i + 1] == 'O')) {
+          length = StemmerUtil.delete(buffer, i + 1, length);
+          buffer[i] = oe;
+
+        } else if (buffer[i] == 'O' && (buffer[i + 1] == 'e' || buffer[i + 1] == 'E' || buffer[i + 1] == 'o' || buffer[i + 1] == 'O')) {
+          length = StemmerUtil.delete(buffer, i + 1, length);
+          buffer[i] = OE;
+
+        }
+
+      }
+    }
+
+    charTermAttribute.setLength(length);
+
+
+    return true;
+  }
+
+}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilterFactory.java
new file mode 100644
index 0000000..01ef9e2
--- /dev/null
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilterFactory.java
@@ -0,0 +1,48 @@
+package org.apache.lucene.analysis.miscellaneous;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+
+import java.util.Map;
+
+/**
+ * Factory for {@link org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilter}.
+ * <pre class="prettyprint">
+ * &lt;fieldType name="text_scandnorm" class="solr.TextField" positionIncrementGap="100"&gt;
+ *   &lt;analyzer&gt;
+ *     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
+ *     &lt;filter class="solr.ScandinavianNormalizationFilterFactory"/&gt;
+ *   &lt;/analyzer&gt;
+ * &lt;/fieldType&gt;</pre>
+ */
+public class ScandinavianNormalizationFilterFactory extends TokenFilterFactory {
+
+  public ScandinavianNormalizationFilterFactory(Map<String, String> args) {
+    super(args);
+    if (!args.isEmpty()) {
+      throw new IllegalArgumentException("Unknown parameters: " + args);
+    }
+  }
+
+  @Override
+  public ScandinavianNormalizationFilter create(TokenStream input) {
+    return new ScandinavianNormalizationFilter(input);
+  }
+}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
index 7ef82ad..c5013c5 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
@@ -25,21 +25,26 @@
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
+import org.apache.lucene.analysis.util.CharacterUtils;
 import org.apache.lucene.util.Version;
 
 /**
  * Tokenizes the given token into n-grams of given size(s).
  * <p>
  * This {@link TokenFilter} create n-grams from the beginning edge of a input token.
+ * <p><a name="match_version" />As of Lucene 4.4, this filter handles correctly
+ * supplementary characters.
  */
 public final class EdgeNGramTokenFilter extends TokenFilter {
   public static final int DEFAULT_MAX_GRAM_SIZE = 1;
   public static final int DEFAULT_MIN_GRAM_SIZE = 1;
 
+  private final CharacterUtils charUtils;
   private final int minGram;
   private final int maxGram;
   private char[] curTermBuffer;
   private int curTermLength;
+  private int curCodePointCount;
   private int curGramSize;
   private int tokStart;
   private int tokEnd; // only used if the length changed before this filter
@@ -74,6 +79,9 @@
       throw new IllegalArgumentException("minGram must not be greater than maxGram");
     }
 
+    this.charUtils = version.onOrAfter(Version.LUCENE_44)
+        ? CharacterUtils.getInstance(version)
+        : CharacterUtils.getJava4Instance();
     this.minGram = minGram;
     this.maxGram = maxGram;
   }
@@ -87,6 +95,7 @@
         } else {
           curTermBuffer = termAtt.buffer().clone();
           curTermLength = termAtt.length();
+          curCodePointCount = charUtils.codePointCount(termAtt);
           curGramSize = minGram;
           tokStart = offsetAtt.startOffset();
           tokEnd = offsetAtt.endOffset();
@@ -95,7 +104,7 @@
         }
       }
       if (curGramSize <= maxGram) {         // if we have hit the end of our n-gram size range, quit
-        if (curGramSize <= curTermLength) { // if the remaining input is too short, we can't generate any n-grams
+        if (curGramSize <= curCodePointCount) { // if the remaining input is too short, we can't generate any n-grams
           // grab gramSize chars from front or back
           clearAttributes();
           offsetAtt.setOffset(tokStart, tokEnd);
@@ -107,7 +116,8 @@
             posIncrAtt.setPositionIncrement(0);
           }
           posLenAtt.setPositionLength(savePosLen);
-          termAtt.copyBuffer(curTermBuffer, 0, curGramSize);
+          final int charLength = charUtils.offsetByCodePoints(curTermBuffer, 0, curTermLength, 0, curGramSize);
+          termAtt.copyBuffer(curTermBuffer, 0, charLength);
           curGramSize++;
           return true;
         }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java
index e41d940..deb16e6 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java
@@ -17,37 +17,23 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
 import java.io.Reader;
 
 import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Version;
 
 /**
  * Tokenizes the input from an edge into n-grams of given size(s).
  * <p>
  * This {@link Tokenizer} create n-grams from the beginning edge of a input token.
+ * <p><a name="match_version" />As of Lucene 4.4, this class supports
+ * {@link #isTokenChar(int) pre-tokenization} and correctly handles
+ * supplementary characters.
  */
-public final class EdgeNGramTokenizer extends Tokenizer {
+public class EdgeNGramTokenizer extends NGramTokenizer {
   public static final int DEFAULT_MAX_GRAM_SIZE = 1;
   public static final int DEFAULT_MIN_GRAM_SIZE = 1;
 
-  private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
-  private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
-  private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
-
-  private int minGram;
-  private int maxGram;
-  private int gramSize;
-  private boolean started;
-  private int inLen; // length of the input AFTER trim()
-  private int charsRead; // length of the input
-  private String inStr;
-
   /**
    * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
    *
@@ -57,8 +43,7 @@
    * @param maxGram the largest n-gram to generate
    */
   public EdgeNGramTokenizer(Version version, Reader input, int minGram, int maxGram) {
-    super(input);
-    init(version, minGram, maxGram);
+    super(version, input, minGram, maxGram, true);
   }
 
   /**
@@ -71,102 +56,7 @@
    * @param maxGram the largest n-gram to generate
    */
   public EdgeNGramTokenizer(Version version, AttributeFactory factory, Reader input, int minGram, int maxGram) {
-    super(factory, input);
-    init(version, minGram, maxGram);
+    super(version, factory, input, minGram, maxGram, true);
   }
 
-  private void init(Version version, int minGram, int maxGram) {
-    if (version == null) {
-      throw new IllegalArgumentException("version must not be null");
-    }
-
-    if (minGram < 1) {
-      throw new IllegalArgumentException("minGram must be greater than zero");
-    }
-
-    if (minGram > maxGram) {
-      throw new IllegalArgumentException("minGram must not be greater than maxGram");
-    }
-
-    this.minGram = minGram;
-    this.maxGram = maxGram;
-  }
-
-  /** Returns the next token in the stream, or null at EOS. */
-  @Override
-  public boolean incrementToken() throws IOException {
-    clearAttributes();
-    // if we are just starting, read the whole input
-    if (!started) {
-      started = true;
-      gramSize = minGram;
-      char[] chars = new char[Math.min(1024, maxGram)];
-      charsRead = 0;
-      // TODO: refactor to a shared readFully somewhere:
-      boolean exhausted = false;
-      while (charsRead < maxGram) {
-        final int inc = input.read(chars, charsRead, chars.length-charsRead);
-        if (inc == -1) {
-          exhausted = true;
-          break;
-        }
-        charsRead += inc;
-        if (charsRead == chars.length && charsRead < maxGram) {
-          chars = ArrayUtil.grow(chars);
-        }
-      }
-
-      inStr = new String(chars, 0, charsRead);
-
-      if (!exhausted) {
-        // Read extra throwaway chars so that on end() we
-        // report the correct offset:
-        char[] throwaway = new char[1024];
-        while(true) {
-          final int inc = input.read(throwaway, 0, throwaway.length);
-          if (inc == -1) {
-            break;
-          }
-          charsRead += inc;
-        }
-      }
-
-      inLen = inStr.length();
-      if (inLen == 0) {
-        return false;
-      }
-      posIncrAtt.setPositionIncrement(1);
-    } else {
-      posIncrAtt.setPositionIncrement(1);
-    }
-
-    // if the remaining input is too short, we can't generate any n-grams
-    if (gramSize > inLen) {
-      return false;
-    }
-
-    // if we have hit the end of our n-gram size range, quit
-    if (gramSize > maxGram || gramSize > inLen) {
-      return false;
-    }
-
-    // grab gramSize chars from front or back
-    termAtt.setEmpty().append(inStr, 0, gramSize);
-    offsetAtt.setOffset(correctOffset(0), correctOffset(gramSize));
-    gramSize++;
-    return true;
-  }
-  
-  @Override
-  public void end() {
-    // set final offset
-    final int finalOffset = correctOffset(charsRead);
-    this.offsetAtt.setOffset(finalOffset, finalOffset);
-  }    
-
-  @Override
-  public void reset() throws IOException {
-    super.reset();
-    started = false;
-  }
 }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java
index b499437..09c229b 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java
@@ -26,6 +26,7 @@
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
+import org.apache.lucene.analysis.util.CharacterUtils;
 import org.apache.lucene.util.Version;
 
 /**
@@ -33,6 +34,7 @@
  * <a name="version"/>
  * <p>You must specify the required {@link Version} compatibility when
  * creating a {@link NGramTokenFilter}. As of Lucene 4.4, this token filters:<ul>
+ * <li>handles supplementary characters correctly,</li>
  * <li>emits all n-grams for the same token at the same position,</li>
  * <li>does not modify offsets,</li>
  * <li>sorts n-grams by their offset in the original token first, then
@@ -42,6 +44,10 @@
  * {@link Version#LUCENE_44} in the constructor but this is not recommended as
  * it will lead to broken {@link TokenStream}s that will cause highlighting
  * bugs.
+ * <p>If you were using this {@link TokenFilter} to perform partial highlighting,
+ * this won't work anymore since this filter doesn't update offsets. You should
+ * modify your analysis chain to use {@link NGramTokenizer}, and potentially
+ * override {@link NGramTokenizer#isTokenChar(int)} to perform pre-tokenization.
  */
 public final class NGramTokenFilter extends TokenFilter {
   public static final int DEFAULT_MIN_NGRAM_SIZE = 1;
@@ -51,6 +57,7 @@
 
   private char[] curTermBuffer;
   private int curTermLength;
+  private int curCodePointCount;
   private int curGramSize;
   private int curPos;
   private int curPosInc, curPosLen;
@@ -59,6 +66,7 @@
   private boolean hasIllegalOffsets; // only if the length changed before this filter
 
   private final Version version;
+  private final CharacterUtils charUtils;
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final PositionIncrementAttribute posIncAtt;
   private final PositionLengthAttribute posLenAtt;
@@ -75,6 +83,9 @@
   public NGramTokenFilter(Version version, TokenStream input, int minGram, int maxGram) {
     super(new LengthFilter(version, input, minGram, Integer.MAX_VALUE));
     this.version = version;
+    this.charUtils = version.onOrAfter(Version.LUCENE_44)
+        ? CharacterUtils.getInstance(version)
+        : CharacterUtils.getJava4Instance();
     if (minGram < 1) {
       throw new IllegalArgumentException("minGram must be greater than zero");
     }
@@ -126,6 +137,7 @@
         } else {
           curTermBuffer = termAtt.buffer().clone();
           curTermLength = termAtt.length();
+          curCodePointCount = charUtils.codePointCount(termAtt);
           curGramSize = minGram;
           curPos = 0;
           curPosInc = posIncAtt.getPositionIncrement();
@@ -138,13 +150,15 @@
         }
       }
       if (version.onOrAfter(Version.LUCENE_44)) {
-        if (curGramSize > maxGram || curPos + curGramSize > curTermLength) {
+        if (curGramSize > maxGram || (curPos + curGramSize) > curCodePointCount) {
           ++curPos;
           curGramSize = minGram;
         }
-        if (curPos + curGramSize <= curTermLength) {
+        if ((curPos + curGramSize) <= curCodePointCount) {
           clearAttributes();
-          termAtt.copyBuffer(curTermBuffer, curPos, curGramSize);
+          final int start = charUtils.offsetByCodePoints(curTermBuffer, 0, curTermLength, 0, curPos);
+          final int end = charUtils.offsetByCodePoints(curTermBuffer, 0, curTermLength, start, curGramSize);
+          termAtt.copyBuffer(curTermBuffer, start, end - start);
           posIncAtt.setPositionIncrement(curPosInc);
           curPosInc = 0;
           posLenAtt.setPositionLength(curPosLen);
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java
index a0665bf..646b5e6 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java
@@ -25,6 +25,7 @@
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
+import org.apache.lucene.analysis.util.CharacterUtils;
 import org.apache.lucene.util.Version;
 
 /**
@@ -40,29 +41,47 @@
  * <tr><th>Offsets</th><td>[0,2[</td><td>[0,3[</td><td>[1,3[</td><td>[1,4[</td><td>[2,4[</td><td>[2,5[</td><td>[3,5[</td></tr>
  * </table>
  * <a name="version"/>
- * <p>Before Lucene 4.4, this class had a different behavior:<ul>
- * <li>It didn't support more than 1024 chars of input, the rest was trashed.</li>
- * <li>The last whitespaces of the 1024 chars block were trimmed.</li>
- * <li>Tokens were emitted in a different order (by increasing lengths).</li></ul>
- * <p>Although highly discouraged, it is still possible to use the old behavior
- * through {@link Lucene43NGramTokenizer}.
+ * <p>This tokenizer changed a lot in Lucene 4.4 in order to:<ul>
+ * <li>tokenize in a streaming fashion to support streams which are larger
+ * than 1024 chars (limit of the previous version),
+ * <li>count grams based on unicode code points instead of java chars (and
+ * never split in the middle of surrogate pairs),
+ * <li>give the ability to {@link #isTokenChar(int) pre-tokenize} the stream
+ * before computing n-grams.</ul>
+ * <p>Additionally, this class doesn't trim trailing whitespaces and emits
+ * tokens in a different order, tokens are now emitted by increasing start
+ * offsets while they used to be emitted by increasing lengths (which prevented
+ * from supporting large input streams).
+ * <p>Although <b style="color:red">highly</b> discouraged, it is still possible
+ * to use the old behavior through {@link Lucene43NGramTokenizer}.
  */
-public final class NGramTokenizer extends Tokenizer {
+// non-final to allow for overriding isTokenChar, but all other methods should be final
+public class NGramTokenizer extends Tokenizer {
   public static final int DEFAULT_MIN_NGRAM_SIZE = 1;
   public static final int DEFAULT_MAX_NGRAM_SIZE = 2;
 
-  private char[] buffer;
-  private int bufferStart, bufferEnd; // remaining slice of the buffer
+  private CharacterUtils charUtils;
+  private CharacterUtils.CharacterBuffer charBuffer;
+  private int[] buffer; // like charBuffer, but converted to code points
+  private int bufferStart, bufferEnd; // remaining slice in buffer
   private int offset;
   private int gramSize;
   private int minGram, maxGram;
   private boolean exhausted;
+  private int lastCheckedChar; // last offset in the buffer that we checked
+  private int lastNonTokenChar; // last offset that we found to not be a token char
+  private boolean edgesOnly; // leading edges n-grams only
 
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
   private final PositionLengthAttribute posLenAtt = addAttribute(PositionLengthAttribute.class);
   private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
 
+  NGramTokenizer(Version version, Reader input, int minGram, int maxGram, boolean edgesOnly) {
+    super(input);
+    init(version, minGram, maxGram, edgesOnly);
+  }
+
   /**
    * Creates NGramTokenizer with given min and max n-grams.
    * @param version the lucene compatibility <a href="#version">version</a>
@@ -71,8 +90,12 @@
    * @param maxGram the largest n-gram to generate
    */
   public NGramTokenizer(Version version, Reader input, int minGram, int maxGram) {
-    super(input);
-    init(version, minGram, maxGram);
+    this(version, input, minGram, maxGram, false);
+  }
+
+  NGramTokenizer(Version version, AttributeFactory factory, Reader input, int minGram, int maxGram, boolean edgesOnly) {
+    super(factory, input);
+    init(version, minGram, maxGram, edgesOnly);
   }
 
   /**
@@ -84,8 +107,7 @@
    * @param maxGram the largest n-gram to generate
    */
   public NGramTokenizer(Version version, AttributeFactory factory, Reader input, int minGram, int maxGram) {
-    super(factory, input);
-    init(version, minGram, maxGram);
+    this(version, factory, input, minGram, maxGram, false);
   }
 
   /**
@@ -97,10 +119,13 @@
     this(version, input, DEFAULT_MIN_NGRAM_SIZE, DEFAULT_MAX_NGRAM_SIZE);
   }
 
-  private void init(Version version, int minGram, int maxGram) {
-    if (!version.onOrAfter(Version.LUCENE_44)) {
+  private void init(Version version, int minGram, int maxGram, boolean edgesOnly) {
+    if (!edgesOnly && !version.onOrAfter(Version.LUCENE_44)) {
       throw new IllegalArgumentException("This class only works with Lucene 4.4+. To emulate the old (broken) behavior of NGramTokenizer, use Lucene43NGramTokenizer");
     }
+    charUtils = version.onOrAfter(Version.LUCENE_44)
+        ? CharacterUtils.getInstance(version)
+        : CharacterUtils.getJava4Instance();
     if (minGram < 1) {
       throw new IllegalArgumentException("minGram must be greater than zero");
     }
@@ -109,66 +134,107 @@
     }
     this.minGram = minGram;
     this.maxGram = maxGram;
-    buffer = new char[maxGram + 1024];
+    this.edgesOnly = edgesOnly;
+    charBuffer = CharacterUtils.newCharacterBuffer(2 * maxGram + 1024); // 2 * maxGram in case all code points require 2 chars and + 1024 for buffering to not keep polling the Reader
+    buffer = new int[charBuffer.getBuffer().length];
+    // Make the term att large enough
+    termAtt.resizeBuffer(2 * maxGram);
   }
 
-  /** Returns the next token in the stream, or null at EOS. */
   @Override
-  public boolean incrementToken() throws IOException {
+  public final boolean incrementToken() throws IOException {
     clearAttributes();
 
-    // compact
-    if (bufferStart >= buffer.length - maxGram) {
-      System.arraycopy(buffer, bufferStart, buffer, 0, bufferEnd - bufferStart);
-      bufferEnd -= bufferStart;
-      bufferStart = 0;
+    // termination of this loop is guaranteed by the fact that every iteration
+    // either advances the buffer (calls consumes()) or increases gramSize
+    while (true) {
+      // compact
+      if (bufferStart >= bufferEnd - maxGram - 1 && !exhausted) {
+        System.arraycopy(buffer, bufferStart, buffer, 0, bufferEnd - bufferStart);
+        bufferEnd -= bufferStart;
+        lastCheckedChar -= bufferStart;
+        lastNonTokenChar -= bufferStart;
+        bufferStart = 0;
 
-      // fill in remaining space
-      if (!exhausted) {
-        // TODO: refactor to a shared readFully
-        while (bufferEnd < buffer.length) {
-          final int read = input.read(buffer, bufferEnd, buffer.length - bufferEnd);
-          if (read == -1) {
-            exhausted = true;
-            break;
-          }
-          bufferEnd += read;
+        // fill in remaining space
+        exhausted = !charUtils.fill(charBuffer, input, buffer.length - bufferEnd);
+        // convert to code points
+        bufferEnd += charUtils.toCodePoints(charBuffer.getBuffer(), 0, charBuffer.getLength(), buffer, bufferEnd);
+      }
+
+      // should we go to the next offset?
+      if (gramSize > maxGram || (bufferStart + gramSize) > bufferEnd) {
+        if (bufferStart + 1 + minGram > bufferEnd) {
+          assert exhausted;
+          return false;
+        }
+        consume();
+        gramSize = minGram;
+      }
+
+      updateLastNonTokenChar();
+
+      // retry if the token to be emitted was going to not only contain token chars
+      final boolean termContainsNonTokenChar = lastNonTokenChar >= bufferStart && lastNonTokenChar < (bufferStart + gramSize);
+      final boolean isEdgeAndPreviousCharIsTokenChar = edgesOnly && lastNonTokenChar != bufferStart - 1;
+      if (termContainsNonTokenChar || isEdgeAndPreviousCharIsTokenChar) {
+        consume();
+        gramSize = minGram;
+        continue;
+      }
+
+      final int length = charUtils.toChars(buffer, bufferStart, gramSize, termAtt.buffer(), 0);
+      termAtt.setLength(length);
+      posIncAtt.setPositionIncrement(1);
+      posLenAtt.setPositionLength(1);
+      offsetAtt.setOffset(correctOffset(offset), correctOffset(offset + length));
+      ++gramSize;
+      return true;
+    }
+  }
+
+  private void updateLastNonTokenChar() {
+    final int termEnd = bufferStart + gramSize - 1;
+    if (termEnd > lastCheckedChar) {
+      for (int i = termEnd; i > lastCheckedChar; --i) {
+        if (!isTokenChar(buffer[i])) {
+          lastNonTokenChar = i;
+          break;
         }
       }
+      lastCheckedChar = termEnd;
     }
+  }
 
-    // should we go to the next offset?
-    if (gramSize > maxGram || bufferStart + gramSize > bufferEnd) {
-      bufferStart++;
-      offset++;
-      gramSize = minGram;
-    }
+  /** Consume one code point. */
+  private void consume() {
+    offset += Character.charCount(buffer[bufferStart++]);
+  }
 
-    // are there enough chars remaining?
-    if (bufferStart + gramSize > bufferEnd) {
-      return false;
-    }
-
-    termAtt.copyBuffer(buffer, bufferStart, gramSize);
-    posIncAtt.setPositionIncrement(1);
-    posLenAtt.setPositionLength(1);
-    offsetAtt.setOffset(correctOffset(offset), correctOffset(offset + gramSize));
-    ++gramSize;
+  /** Only collect characters which satisfy this condition. */
+  protected boolean isTokenChar(int chr) {
     return true;
   }
 
   @Override
-  public void end() {
-    final int endOffset = correctOffset(offset + bufferEnd - bufferStart);
+  public final void end() {
+    assert bufferStart <= bufferEnd;
+    int endOffset = offset;
+    for (int i = bufferStart; i < bufferEnd; ++i) {
+      endOffset += Character.charCount(buffer[i]);
+    }
+    endOffset = correctOffset(endOffset);
     offsetAtt.setOffset(endOffset, endOffset);
   }
 
   @Override
-  public void reset() throws IOException {
+  public final void reset() throws IOException {
     super.reset();
     bufferStart = bufferEnd = buffer.length;
+    lastNonTokenChar = lastCheckedChar = bufferStart - 1;
     offset = 0;
     gramSize = minGram;
     exhausted = false;
+    charBuffer.reset();
   }
 }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilter.java
index 5dac60f..311c850 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilter.java
@@ -35,12 +35,26 @@
  * </p>
  */
 public final class NorwegianLightStemFilter extends TokenFilter {
-  private final NorwegianLightStemmer stemmer = new NorwegianLightStemmer();
+  private final NorwegianLightStemmer stemmer;
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class);
-
+  
+  /** 
+   * Calls {@link #NorwegianLightStemFilter(TokenStream, int) 
+   * NorwegianLightStemFilter(input, BOKMAAL)}
+   */
   public NorwegianLightStemFilter(TokenStream input) {
+    this(input, NorwegianLightStemmer.BOKMAAL);
+  }
+  
+  /** 
+   * Creates a new NorwegianLightStemFilter
+   * @param flags set to {@link NorwegianLightStemmer#BOKMAAL}, 
+   *                     {@link NorwegianLightStemmer#NYNORSK}, or both.
+   */
+  public NorwegianLightStemFilter(TokenStream input, int flags) {
     super(input);
+    stemmer = new NorwegianLightStemmer(flags);
   }
   
   @Override
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilterFactory.java
index 98af94e..3446825 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilterFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilterFactory.java
@@ -23,6 +23,9 @@
 import org.apache.lucene.analysis.no.NorwegianLightStemFilter;
 import org.apache.lucene.analysis.util.TokenFilterFactory;
 
+import static org.apache.lucene.analysis.no.NorwegianLightStemmer.BOKMAAL;
+import static org.apache.lucene.analysis.no.NorwegianLightStemmer.NYNORSK;
+
 /** 
  * Factory for {@link NorwegianLightStemFilter}.
  * <pre class="prettyprint">
@@ -30,15 +33,27 @@
  *   &lt;analyzer&gt;
  *     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
  *     &lt;filter class="solr.LowerCaseFilterFactory"/&gt;
- *     &lt;filter class="solr.NorwegianLightStemFilterFactory"/&gt;
+ *     &lt;filter class="solr.NorwegianLightStemFilterFactory" variant="nb"/&gt;
  *   &lt;/analyzer&gt;
  * &lt;/fieldType&gt;</pre>
  */
 public class NorwegianLightStemFilterFactory extends TokenFilterFactory {
   
+  private final int flags;
+  
   /** Creates a new NorwegianLightStemFilterFactory */
   public NorwegianLightStemFilterFactory(Map<String,String> args) {
     super(args);
+    String variant = get(args, "variant");
+    if (variant == null || "nb".equals(variant)) {
+      flags = BOKMAAL;
+    } else if ("nn".equals(variant)) {
+      flags = NYNORSK;
+    } else if ("no".equals(variant)) {
+      flags = BOKMAAL | NYNORSK;
+    } else {
+      throw new IllegalArgumentException("invalid variant: " + variant);
+    }
     if (!args.isEmpty()) {
       throw new IllegalArgumentException("Unknown parameters: " + args);
     }
@@ -46,6 +61,6 @@
   
   @Override
   public TokenStream create(TokenStream input) {
-    return new NorwegianLightStemFilter(input);
+    return new NorwegianLightStemFilter(input, flags);
   }
 }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemmer.java
index 494283e..d18de32 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemmer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemmer.java
@@ -62,50 +62,106 @@
  * corpus to validate against whereas the Norwegian one is hand crafted.
  */
 public class NorwegianLightStemmer {
+  /** Constant to remove Bokmål-specific endings */
+  public static final int BOKMAAL = 1;
+  /** Constant to remove Nynorsk-specific endings */
+  public static final int NYNORSK = 2;
   
+  final boolean useBokmaal;
+  final boolean useNynorsk;
+  
+  /** 
+   * Creates a new NorwegianLightStemmer
+   * @param flags set to {@link #BOKMAAL}, {@link #NYNORSK}, or both.
+   */
+  public NorwegianLightStemmer(int flags) {
+    if (flags <= 0 || flags > BOKMAAL + NYNORSK) {
+      throw new IllegalArgumentException("invalid flags");
+    }
+    useBokmaal = (flags & BOKMAAL) != 0;
+    useNynorsk = (flags & NYNORSK) != 0;
+  }
+      
   public int stem(char s[], int len) {   
     // Remove posessive -s (bilens -> bilen) and continue checking 
     if (len > 4 && s[len-1] == 's')
       len--;
 
     // Remove common endings, single-pass
-    if (len > 7 && 
-        (endsWith(s, len, "heter") ||  // general ending (hemmelig-heter -> hemmelig)
-         endsWith(s, len, "heten")))   // general ending (hemmelig-heten -> hemmelig)
+    if (len > 7 &&
+        ((endsWith(s, len, "heter") &&
+          useBokmaal) ||  // general ending (hemmelig-heter -> hemmelig)
+         (endsWith(s, len, "heten") &&
+          useBokmaal) ||  // general ending (hemmelig-heten -> hemmelig)
+         (endsWith(s, len, "heita") &&
+          useNynorsk)))   // general ending (hemmeleg-heita -> hemmeleg)
       return len - 5;
+    
+    // Remove Nynorsk common endings, single-pass
+    if (len > 8 && useNynorsk &&
+        (endsWith(s, len, "heiter") ||  // general ending (hemmeleg-heiter -> hemmeleg)
+         endsWith(s, len, "leiken") ||  // general ending (trygg-leiken -> trygg)
+         endsWith(s, len, "leikar")))   // general ending (trygg-leikar -> trygg)
+      return len - 6;
 
     if (len > 5 &&
-        (endsWith(s, len, "dom") || // general ending (kristen-dom -> kristen)
-         endsWith(s, len, "het")))  // general ending (hemmelig-het -> hemmelig)
+        (endsWith(s, len, "dom") ||  // general ending (kristen-dom -> kristen)
+         (endsWith(s, len, "het") &&
+          useBokmaal)))              // general ending (hemmelig-het -> hemmelig)
       return len - 3;
     
+    if (len > 6 && useNynorsk &&
+        (endsWith(s, len, "heit") ||  // general ending (hemmeleg-heit -> hemmeleg)
+         endsWith(s, len, "semd") ||  // general ending (verk-semd -> verk)
+         endsWith(s, len, "leik")))   // general ending (trygg-leik -> trygg)
+      return len - 4;
+    
     if (len > 7 && 
         (endsWith(s, len, "elser") ||   // general ending (føl-elser -> føl)
          endsWith(s, len, "elsen")))    // general ending (føl-elsen -> føl)
       return len - 5;
     
     if (len > 6 &&
-        (endsWith(s, len, "ende") ||  // (sov-ende -> sov)
+        ((endsWith(s, len, "ende") &&
+          useBokmaal) ||      // (sov-ende -> sov)
+         (endsWith(s, len, "ande") &&
+          useNynorsk) ||      // (sov-ande -> sov)
          endsWith(s, len, "else") ||  // general ending (føl-else -> føl)
-         endsWith(s, len, "este") ||  // adj (fin-este -> fin)
-         endsWith(s, len, "eren")))   // masc
+         (endsWith(s, len, "este") &&
+          useBokmaal) ||      // adj (fin-este -> fin)
+         (endsWith(s, len, "aste") &&
+          useNynorsk) ||      // adj (fin-aste -> fin)
+         (endsWith(s, len, "eren") &&
+          useBokmaal) ||      // masc
+         (endsWith(s, len, "aren") &&
+          useNynorsk)))       // masc 
       return len - 4;
     
     if (len > 5 &&
-        (endsWith(s, len, "ere") || // adj (fin-ere -> fin)
-         endsWith(s, len, "est") || // adj (fin-est -> fin)
-         endsWith(s, len, "ene")    // masc/fem/neutr pl definite (hus-ene)
-         )) 
+        ((endsWith(s, len, "ere") &&
+         useBokmaal) ||     // adj (fin-ere -> fin)
+         (endsWith(s, len, "are") &&
+          useNynorsk) ||    // adj (fin-are -> fin)
+         (endsWith(s, len, "est") &&
+          useBokmaal) ||    // adj (fin-est -> fin)
+         (endsWith(s, len, "ast") &&
+          useNynorsk) ||    // adj (fin-ast -> fin)
+         endsWith(s, len, "ene") || // masc/fem/neutr pl definite (hus-ene)
+         (endsWith(s, len, "ane") &&
+          useNynorsk)))     // masc pl definite (gut-ane)
       return len - 3;
     
     if (len > 4 &&
         (endsWith(s, len, "er") ||  // masc/fem indefinite
          endsWith(s, len, "en") ||  // masc/fem definite
          endsWith(s, len, "et") ||  // neutr definite
-         endsWith(s, len, "st") ||  // adj (billig-st -> billig)
+         (endsWith(s, len, "ar") &&
+          useNynorsk) ||    // masc pl indefinite
+         (endsWith(s, len, "st") &&
+          useBokmaal) ||    // adj (billig-st -> billig)
          endsWith(s, len, "te")))
       return len - 2;
-    
+
     if (len > 3)
       switch(s[len-1]) {
         case 'a':     // fem definite
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilter.java
index f06ca0f..9d4fcfb 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilter.java
@@ -35,12 +35,26 @@
  * </p>
  */
 public final class NorwegianMinimalStemFilter extends TokenFilter {
-  private final NorwegianMinimalStemmer stemmer = new NorwegianMinimalStemmer();
+  private final NorwegianMinimalStemmer stemmer;
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class);
 
+  /** 
+   * Calls {@link #NorwegianMinimalStemFilter(TokenStream, int) 
+   * NorwegianMinimalStemFilter(input, BOKMAAL)}
+   */
   public NorwegianMinimalStemFilter(TokenStream input) {
+    this(input, NorwegianLightStemmer.BOKMAAL);
+  }
+  
+  /** 
+   * Creates a new NorwegianLightStemFilter
+   * @param flags set to {@link NorwegianLightStemmer#BOKMAAL}, 
+   *                     {@link NorwegianLightStemmer#NYNORSK}, or both.
+   */
+  public NorwegianMinimalStemFilter(TokenStream input, int flags) {
     super(input);
+    this.stemmer = new NorwegianMinimalStemmer(flags);
   }
   
   @Override
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilterFactory.java
index eb2f8e1..14a06a7 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilterFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilterFactory.java
@@ -23,6 +23,9 @@
 import org.apache.lucene.analysis.no.NorwegianMinimalStemFilter;
 import org.apache.lucene.analysis.util.TokenFilterFactory;
 
+import static org.apache.lucene.analysis.no.NorwegianLightStemmer.BOKMAAL;
+import static org.apache.lucene.analysis.no.NorwegianLightStemmer.NYNORSK;
+
 /** 
  * Factory for {@link NorwegianMinimalStemFilter}.
  * <pre class="prettyprint">
@@ -30,15 +33,27 @@
  *   &lt;analyzer&gt;
  *     &lt;tokenizer class="solr.StandardTokenizerFactory"/&gt;
  *     &lt;filter class="solr.LowerCaseFilterFactory"/&gt;
- *     &lt;filter class="solr.NorwegianMinimalStemFilterFactory"/&gt;
+ *     &lt;filter class="solr.NorwegianMinimalStemFilterFactory" variant="nb"/&gt;
  *   &lt;/analyzer&gt;
  * &lt;/fieldType&gt;</pre>
  */
 public class NorwegianMinimalStemFilterFactory extends TokenFilterFactory {
   
+  private final int flags;
+  
   /** Creates a new NorwegianMinimalStemFilterFactory */
   public NorwegianMinimalStemFilterFactory(Map<String,String> args) {
     super(args);
+    String variant = get(args, "variant");
+    if (variant == null || "nb".equals(variant)) {
+      flags = BOKMAAL;
+    } else if ("nn".equals(variant)) {
+      flags = NYNORSK;
+    } else if ("no".equals(variant)) {
+      flags = BOKMAAL | NYNORSK;
+    } else {
+      throw new IllegalArgumentException("invalid variant: " + variant);
+    }
     if (!args.isEmpty()) {
       throw new IllegalArgumentException("Unknown parameters: " + args);
     }
@@ -46,6 +61,6 @@
   
   @Override
   public TokenStream create(TokenStream input) {
-    return new NorwegianMinimalStemFilter(input);
+    return new NorwegianMinimalStemFilter(input, flags);
   }
 }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemmer.java
index 03ce57b..bc08548 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemmer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemmer.java
@@ -53,31 +53,52 @@
  */
 
 import static org.apache.lucene.analysis.util.StemmerUtil.*;
+import static org.apache.lucene.analysis.no.NorwegianLightStemmer.BOKMAAL;
+import static org.apache.lucene.analysis.no.NorwegianLightStemmer.NYNORSK;
 
 /**
- * Minimal Stemmer for Norwegian bokmål (no-nb)
+ * Minimal Stemmer for Norwegian Bokmål (no-nb) and Nynorsk (no-nn)
  * <p>
  * Stems known plural forms for Norwegian nouns only, together with genitiv -s
  */
 public class NorwegianMinimalStemmer {
+  final boolean useBokmaal;
+  final boolean useNynorsk;
   
-  public int stem(char s[], int len) {       
+  /** 
+   * Creates a new NorwegianMinimalStemmer
+   * @param flags set to {@link NorwegianLightStemmer#BOKMAAL}, 
+   *                     {@link NorwegianLightStemmer#NYNORSK}, or both.
+   */
+  public NorwegianMinimalStemmer(int flags) {
+    if (flags <= 0 || flags > BOKMAAL + NYNORSK) {
+      throw new IllegalArgumentException("invalid flags");
+    }
+    useBokmaal = (flags & BOKMAAL) != 0;
+    useNynorsk = (flags & NYNORSK) != 0;
+  }
+
+  public int stem(char s[], int len) { 
     // Remove genitiv s
     if (len > 4 && s[len-1] == 's')
       len--;
     
     if (len > 5 &&
-         endsWith(s, len, "ene")    // masc/fem/neutr pl definite (hus-ene)
-        )
+         (endsWith(s, len, "ene") ||  // masc/fem/neutr pl definite (hus-ene)
+          (endsWith(s, len, "ane") &&
+           useNynorsk                 // masc pl definite (gut-ane)
+        )))
       return len - 3;
-    
+
     if (len > 4 &&
-        (endsWith(s, len, "er") ||  // masc/fem indefinite
-         endsWith(s, len, "en") ||  // masc/fem definite
-         endsWith(s, len, "et")     // neutr definite
-        ))
+        (endsWith(s, len, "er") ||   // masc/fem indefinite
+         endsWith(s, len, "en") ||   // masc/fem definite
+         endsWith(s, len, "et") ||   // neutr definite
+         (endsWith(s, len, "ar") &&
+          useNynorsk                 // masc pl indefinite
+        )))
       return len - 2;
-    
+
     if (len > 3)
       switch(s[len-1]) {
         case 'a':     // fem definite
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilter.java
index e26b363..3578bdf 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilter.java
@@ -57,7 +57,7 @@
       final char[] buffer = termAtt.buffer();
       int length = termAtt.length();
       for (int i = 0; i < length;) {
-        final int ch = Character.codePointAt(buffer, i);
+        final int ch = Character.codePointAt(buffer, i, length);
     
         iOrAfter = (ch == LATIN_CAPITAL_LETTER_I || 
             (iOrAfter && Character.getType(ch) == Character.NON_SPACING_MARK));
@@ -100,7 +100,7 @@
    */
   private boolean isBeforeDot(char s[], int pos, int len) {
     for (int i = pos; i < len;) {
-      final int ch = Character.codePointAt(s, i);
+      final int ch = Character.codePointAt(s, i, len);
       if (Character.getType(ch) != Character.NON_SPACING_MARK)
         return false;
       if (ch == COMBINING_DOT_ABOVE)
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayMap.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayMap.java
index 8e759fa..442bf92 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayMap.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayMap.java
@@ -262,7 +262,7 @@
     if (ignoreCase) {
       for(int i=0;i<len;) {
         final int codePointAt = charUtils.codePointAt(text1, off+i, limit);
-        if (Character.toLowerCase(codePointAt) != charUtils.codePointAt(text2, i))
+        if (Character.toLowerCase(codePointAt) != charUtils.codePointAt(text2, i, text2.length))
           return false;
         i += Character.charCount(codePointAt); 
       }
@@ -282,7 +282,7 @@
     if (ignoreCase) {
       for(int i=0;i<len;) {
         final int codePointAt = charUtils.codePointAt(text1, i);
-        if (Character.toLowerCase(codePointAt) != charUtils.codePointAt(text2, i))
+        if (Character.toLowerCase(codePointAt) != charUtils.codePointAt(text2, i, text2.length))
           return false;
         i += Character.charCount(codePointAt);
       }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java
index 4d7693d..38b6e62 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharTokenizer.java
@@ -100,7 +100,8 @@
     while (true) {
       if (bufferIndex >= dataLen) {
         offset += dataLen;
-        if(!charUtils.fill(ioBuffer, input)) { // read supplementary char aware with CharacterUtils
+        charUtils.fill(ioBuffer, input); // read supplementary char aware with CharacterUtils
+        if (ioBuffer.getLength() == 0) {
           dataLen = 0; // so next offset += dataLen won't decrement offset
           if (length > 0) {
             break;
@@ -113,7 +114,7 @@
         bufferIndex = 0;
       }
       // use CharacterUtils here to support < 3.1 UTF-16 code unit behavior if the char based methods are gone
-      final int c = charUtils.codePointAt(ioBuffer.getBuffer(), bufferIndex);
+      final int c = charUtils.codePointAt(ioBuffer.getBuffer(), bufferIndex, ioBuffer.getLength());
       final int charCount = Character.charCount(c);
       bufferIndex += charCount;
 
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharacterUtils.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharacterUtils.java
index 3a864b3..7c3ec4d 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharacterUtils.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharacterUtils.java
@@ -52,27 +52,6 @@
   }
 
   /**
-   * Returns the code point at the given index of the char array.
-   * Depending on the {@link Version} passed to
-   * {@link CharacterUtils#getInstance(Version)} this method mimics the behavior
-   * of {@link Character#codePointAt(char[], int)} as it would have been
-   * available on a Java 1.4 JVM or on a later virtual machine version.
-   * 
-   * @param chars
-   *          a character array
-   * @param offset
-   *          the offset to the char values in the chars array to be converted
-   * 
-   * @return the Unicode code point at the given index
-   * @throws NullPointerException
-   *           - if the array is null.
-   * @throws IndexOutOfBoundsException
-   *           - if the value offset is negative or not less than the length of
-   *           the char array.
-   */
-  public abstract int codePointAt(final char[] chars, final int offset);
-
-  /**
    * Returns the code point at the given index of the {@link CharSequence}.
    * Depending on the {@link Version} passed to
    * {@link CharacterUtils#getInstance(Version)} this method mimics the behavior
@@ -116,7 +95,10 @@
    *           the char array.
    */
   public abstract int codePointAt(final char[] chars, final int offset, final int limit);
-  
+
+  /** Return the number of characters in <code>seq</code>. */
+  public abstract int codePointCount(CharSequence seq);
+
   /**
    * Creates a new {@link CharacterBuffer} and allocates a <code>char[]</code>
    * of the given bufferSize.
@@ -140,54 +122,108 @@
    * @param offset the offset to start at
    * @param limit the max char in the buffer to lower case
    */
-  public void toLowerCase(final char[] buffer, final int offset, final int limit) {
+  public final void toLowerCase(final char[] buffer, final int offset, final int limit) {
     assert buffer.length >= limit;
     assert offset <=0 && offset <= buffer.length;
     for (int i = offset; i < limit;) {
       i += Character.toChars(
               Character.toLowerCase(
-                  codePointAt(buffer, i)), buffer, i);
+                  codePointAt(buffer, i, limit)), buffer, i);
      }
   }
-  
+
+  /** Converts a sequence of Java characters to a sequence of unicode code points.
+   *  @return the number of code points written to the destination buffer */
+  public final int toCodePoints(char[] src, int srcOff, int srcLen, int[] dest, int destOff) {
+    if (srcLen < 0) {
+      throw new IllegalArgumentException("srcLen must be >= 0");
+    }
+    int codePointCount = 0;
+    for (int i = 0; i < srcLen; ) {
+      final int cp = codePointAt(src, srcOff + i, srcOff + srcLen);
+      final int charCount = Character.charCount(cp);
+      dest[destOff + codePointCount++] = cp;
+      i += charCount;
+    }
+    return codePointCount;
+  }
+
+  /** Converts a sequence of unicode code points to a sequence of Java characters.
+   *  @return the number of chars written to the destination buffer */
+  public final int toChars(int[] src, int srcOff, int srcLen, char[] dest, int destOff) {
+    if (srcLen < 0) {
+      throw new IllegalArgumentException("srcLen must be >= 0");
+    }
+    int written = 0;
+    for (int i = 0; i < srcLen; ++i) {
+      written += Character.toChars(src[srcOff + i], dest, destOff + written);
+    }
+    return written;
+  }
+
   /**
    * Fills the {@link CharacterBuffer} with characters read from the given
-   * reader {@link Reader}. This method tries to read as many characters into
-   * the {@link CharacterBuffer} as possible, each call to fill will start
-   * filling the buffer from offset <code>0</code> up to the length of the size
-   * of the internal character array.
+   * reader {@link Reader}. This method tries to read <code>numChars</code>
+   * characters into the {@link CharacterBuffer}, each call to fill will start
+   * filling the buffer from offset <code>0</code> up to <code>numChars</code>.
+   * In case code points can span across 2 java characters, this method may
+   * only fill <code>numChars - 1</code> characters in order not to split in
+   * the middle of a surrogate pair, even if there are remaining characters in
+   * the {@link Reader}.
    * <p>
    * Depending on the {@link Version} passed to
    * {@link CharacterUtils#getInstance(Version)} this method implements
    * supplementary character awareness when filling the given buffer. For all
-   * {@link Version} &gt; 3.0 {@link #fill(CharacterBuffer, Reader)} guarantees
+   * {@link Version} &gt; 3.0 {@link #fill(CharacterBuffer, Reader, int)} guarantees
    * that the given {@link CharacterBuffer} will never contain a high surrogate
    * character as the last element in the buffer unless it is the last available
    * character in the reader. In other words, high and low surrogate pairs will
    * always be preserved across buffer boarders.
    * </p>
+   * <p>
+   * A return value of <code>false</code> means that this method call exhausted
+   * the reader, but there may be some bytes which have been read, which can be
+   * verified by checking whether <code>buffer.getLength() &gt; 0</code>.
+   * </p>
    * 
    * @param buffer
    *          the buffer to fill.
    * @param reader
    *          the reader to read characters from.
-   * @return <code>true</code> if and only if no more characters are available
-   *         in the reader, otherwise <code>false</code>.
+   * @param numChars
+   *          the number of chars to read
+   * @return <code>false</code> if and only if reader.read returned -1 while trying to fill the buffer
    * @throws IOException
    *           if the reader throws an {@link IOException}.
    */
-  public abstract boolean fill(CharacterBuffer buffer, Reader reader) throws IOException;
+  public abstract boolean fill(CharacterBuffer buffer, Reader reader, int numChars) throws IOException;
+
+  /** Convenience method which calls <code>fill(buffer, reader, buffer.buffer.length)</code>. */
+  public final boolean fill(CharacterBuffer buffer, Reader reader) throws IOException {
+    return fill(buffer, reader, buffer.buffer.length);
+  }
+
+  /** Return the index within <code>buf[start:start+count]</code> which is by <code>offset</code>
+   *  code points from <code>index</code>. */
+  public abstract int offsetByCodePoints(char[] buf, int start, int count, int index, int offset);
+
+  static int readFully(Reader reader, char[] dest, int offset, int len) throws IOException {
+    int read = 0;
+    while (read < len) {
+      final int r = reader.read(dest, offset + read, len - read);
+      if (r == -1) {
+        break;
+      }
+      read += r;
+    }
+    return read;
+  }
 
   private static final class Java5CharacterUtils extends CharacterUtils {
     Java5CharacterUtils() {
     }
 
     @Override
-    public int codePointAt(final char[] chars, final int offset) {
-      return Character.codePointAt(chars, offset);
-    }
-
-    @Override
     public int codePointAt(final CharSequence seq, final int offset) {
       return Character.codePointAt(seq, offset);
     }
@@ -198,7 +234,11 @@
     }
 
     @Override
-    public boolean fill(final CharacterBuffer buffer, final Reader reader) throws IOException {
+    public boolean fill(final CharacterBuffer buffer, final Reader reader, int numChars) throws IOException {
+      assert buffer.buffer.length >= 2;
+      if (numChars < 2 || numChars > buffer.buffer.length) {
+        throw new IllegalArgumentException("numChars must be >= 2 and <= the buffer size");
+      }
       final char[] charBuffer = buffer.buffer;
       buffer.offset = 0;
       final int offset;
@@ -206,47 +246,36 @@
       // Install the previously saved ending high surrogate:
       if (buffer.lastTrailingHighSurrogate != 0) {
         charBuffer[0] = buffer.lastTrailingHighSurrogate;
+        buffer.lastTrailingHighSurrogate = 0;
         offset = 1;
       } else {
         offset = 0;
       }
 
-      final int read = reader.read(charBuffer,
-                                   offset,
-                                   charBuffer.length - offset);
-      if (read == -1) {
-        buffer.length = offset;
-        buffer.lastTrailingHighSurrogate = 0;
-        return offset != 0;
-      }
-      assert read > 0;
-      buffer.length = read + offset;
+      final int read = readFully(reader, charBuffer, offset, numChars - offset);
 
-      // If we read only a single char, and that char was a
-      // high surrogate, read again:
-      if (buffer.length == 1
-          && Character.isHighSurrogate(charBuffer[buffer.length - 1])) {
-        final int read2 = reader.read(charBuffer,
-                                      1,
-                                      charBuffer.length - 1);
-        if (read2 == -1) {
-          // NOTE: mal-formed input (ended on a high
-          // surrogate)!  Consumer must deal with it...
-          return true;
-        }
-        assert read2 > 0;
-
-        buffer.length += read2;
+      buffer.length = offset + read;
+      final boolean result = buffer.length == numChars;
+      if (buffer.length < numChars) {
+        // We failed to fill the buffer. Even if the last char is a high
+        // surrogate, there is nothing we can do
+        return result;
       }
 
-      if (buffer.length > 1
-          && Character.isHighSurrogate(charBuffer[buffer.length - 1])) {
+      if (Character.isHighSurrogate(charBuffer[buffer.length - 1])) {
         buffer.lastTrailingHighSurrogate = charBuffer[--buffer.length];
-      } else {
-        buffer.lastTrailingHighSurrogate = 0;
       }
+      return result;
+    }
 
-      return true;
+    @Override
+    public int codePointCount(CharSequence seq) {
+      return Character.codePointCount(seq, 0, seq.length());
+    }
+
+    @Override
+    public int offsetByCodePoints(char[] buf, int start, int count, int index, int offset) {
+      return Character.offsetByCodePoints(buf, start, count, index, offset);
     }
   }
 
@@ -255,11 +284,6 @@
     }
 
     @Override
-    public int codePointAt(final char[] chars, final int offset) {
-      return chars[offset];
-    }
-
-    @Override
     public int codePointAt(final CharSequence seq, final int offset) {
       return seq.charAt(offset);
     }
@@ -272,13 +296,31 @@
     }
 
     @Override
-    public boolean fill(final CharacterBuffer buffer, final Reader reader) throws IOException {
+    public boolean fill(CharacterBuffer buffer, Reader reader, int numChars)
+        throws IOException {
+      assert buffer.buffer.length >= 1;
+      if (numChars < 1 || numChars > buffer.buffer.length) {
+        throw new IllegalArgumentException("numChars must be >= 1 and <= the buffer size");
+      }
       buffer.offset = 0;
-      final int read = reader.read(buffer.buffer);
-      if(read == -1)
-        return false;
+      final int read = readFully(reader, buffer.buffer, 0, numChars);
       buffer.length = read;
-      return true;
+      buffer.lastTrailingHighSurrogate = 0;
+      return read == numChars;
+    }
+
+    @Override
+    public int codePointCount(CharSequence seq) {
+      return seq.length();
+    }
+
+    @Override
+    public int offsetByCodePoints(char[] buf, int start, int count, int index, int offset) {
+      final int result = index + offset;
+      if (result < 0 || result > count) {
+        throw new IndexOutOfBoundsException();
+      }
+      return result;
     }
 
   }
diff --git a/lucene/analysis/common/src/resources/META-INF/services/org.apache.lucene.analysis.util.TokenFilterFactory b/lucene/analysis/common/src/resources/META-INF/services/org.apache.lucene.analysis.util.TokenFilterFactory
index 3497e30..a01b45f 100644
--- a/lucene/analysis/common/src/resources/META-INF/services/org.apache.lucene.analysis.util.TokenFilterFactory
+++ b/lucene/analysis/common/src/resources/META-INF/services/org.apache.lucene.analysis.util.TokenFilterFactory
@@ -66,6 +66,8 @@
 org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilterFactory
 org.apache.lucene.analysis.miscellaneous.TrimFilterFactory
 org.apache.lucene.analysis.miscellaneous.WordDelimiterFilterFactory
+org.apache.lucene.analysis.miscellaneous.ScandinavianFoldingFilterFactory
+org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilterFactory
 org.apache.lucene.analysis.ngram.EdgeNGramFilterFactory
 org.apache.lucene.analysis.ngram.NGramFilterFactory
 org.apache.lucene.analysis.no.NorwegianLightStemFilterFactory
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java
index ce50297..0112dde 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java
@@ -170,8 +170,6 @@
     char[] termBuffer = filter.getAttribute(CharTermAttribute.class).buffer();
     int length = highSurEndingLower.length();
     assertEquals('\ud801', termBuffer[length - 1]);
-    assertEquals('\udc3e', termBuffer[length]);
-    
   }
   
   public void testLowerCaseTokenizer() throws IOException {
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellDictionaryTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellDictionaryTest.java
index c0b207d..fd8f921 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellDictionaryTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellDictionaryTest.java
@@ -20,7 +20,9 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.text.ParseException;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.List;
 
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Assert;
@@ -131,12 +133,30 @@
     assertEquals(3, dictionary.lookupSuffix(new char[]{'e'}, 0, 1).size());
     assertEquals(1, dictionary.lookupPrefix(new char[]{'s'}, 0, 1).size());
     assertEquals(1, dictionary.lookupWord(new char[]{'o', 'l', 'r'}, 0, 3).size());
-    
+    assertEquals("Wrong number of flags for lucen", 1, dictionary.lookupWord(new char[]{'l', 'u', 'c', 'e', 'n'}, 0, 5).get(0).getFlags().length);
+
     affixStream.close();
     dictStream.close();
   }
 
   @Test
+  public void testHunspellDictionary_multipleDictWithOverride() throws IOException, ParseException {
+    InputStream affixStream = getClass().getResourceAsStream("test.aff");
+    List<InputStream> dictStreams = new ArrayList<InputStream>();
+    dictStreams.add(getClass().getResourceAsStream("test.dic"));
+    dictStreams.add(getClass().getResourceAsStream("testOverride.dic"));
+
+    HunspellDictionary dictionary = new HunspellDictionary(affixStream, dictStreams, TEST_VERSION_CURRENT, false);
+    assertEquals("Wrong number of flags for lucen", 3, dictionary.lookupWord(new char[]{'l', 'u', 'c', 'e', 'n'}, 0, 5).get(0).getFlags().length);
+    assertEquals("Wrong number of flags for bar", 1, dictionary.lookupWord(new char[]{'b', 'a', 'r'}, 0, 3).get(0).getFlags().length);
+
+    affixStream.close();
+    for(InputStream dstream : dictStreams) {
+      dstream.close();
+    }
+  }
+
+  @Test
   public void testCompressedHunspellDictionary_loadDicAff() throws IOException, ParseException {
     InputStream affixStream = getClass().getResourceAsStream("testCompressed.aff");
     InputStream dictStream = getClass().getResourceAsStream("testCompressed.dic");
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemmerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemmerTest.java
index 82a9b84..66a9410 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemmerTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemmerTest.java
@@ -94,17 +94,21 @@
     assertEquals(1, stems.size());
     assertEquals("foo", stems.get(0).getStemString());
     
-    stems = stemmer.stem("food");
+    stems = stemmer.stem("mood");
     assertEquals(1, stems.size());
-    assertEquals("foo", stems.get(0).getStemString());
+    assertEquals("moo", stems.get(0).getStemString());
     
     stems = stemmer.stem("Foos");
     assertEquals(1, stems.size());
     assertEquals("foo", stems.get(0).getStemString());
-    
+
+    // The "Foo" rule gets overridden by the "foo" rule, and we don't merge
     stems = stemmer.stem("Food");
+    assertEquals(0, stems.size());
+
+    stems = stemmer.stem("Mood");
     assertEquals(1, stems.size());
-    assertEquals("foo", stems.get(0).getStemString());
+    assertEquals("moo", stems.get(0).getStemString());
   }
 
   @Test
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/test.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/test.dic
index 1a9dc07..12efd8f 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/test.dic
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/test.dic
@@ -1,9 +1,10 @@
-6
+9
 lucen/A
 lucene
 mahout/A
 olr/B
 ab/C
 Apach/A
+Foo/E
 foo/D
-Foo/E
\ No newline at end of file
+Moo/E
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/testOverride.dic b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/testOverride.dic
new file mode 100644
index 0000000..c1111ef
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/testOverride.dic
@@ -0,0 +1,3 @@
+2
+lucen/ABC
+bar/A
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestScandinavianFoldingFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestScandinavianFoldingFilter.java
new file mode 100644
index 0000000..712ac58
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestScandinavianFoldingFilter.java
@@ -0,0 +1,126 @@
+package org.apache.lucene.analysis.miscellaneous;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.KeywordTokenizer;
+
+import java.io.Reader;
+
+public class TestScandinavianFoldingFilter extends BaseTokenStreamTestCase {
+
+
+  private Analyzer analyzer = new Analyzer() {
+    @Override
+    protected TokenStreamComponents createComponents(String field, Reader reader) {
+      final Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+      final TokenStream stream = new ScandinavianFoldingFilter(tokenizer);
+      return new TokenStreamComponents(tokenizer, stream);
+    }
+  };
+
+  public void test() throws Exception {
+
+    checkOneTerm(analyzer, "aeäaeeea", "aaaeea"); // should not cause ArrayOutOfBoundsException
+
+    checkOneTerm(analyzer, "aeäaeeeae", "aaaeea");
+    checkOneTerm(analyzer, "aeaeeeae", "aaeea");
+
+    checkOneTerm(analyzer, "bøen", "boen");
+    checkOneTerm(analyzer, "åene", "aene");
+
+
+    checkOneTerm(analyzer, "blåbærsyltetøj", "blabarsyltetoj");
+    checkOneTerm(analyzer, "blaabaarsyltetoej", "blabarsyltetoj");
+    checkOneTerm(analyzer, "blåbärsyltetöj", "blabarsyltetoj");
+
+    checkOneTerm(analyzer, "raksmorgas", "raksmorgas");
+    checkOneTerm(analyzer, "räksmörgås", "raksmorgas");
+    checkOneTerm(analyzer, "ræksmørgås", "raksmorgas");
+    checkOneTerm(analyzer, "raeksmoergaas", "raksmorgas");
+    checkOneTerm(analyzer, "ræksmörgaos", "raksmorgas");
+
+
+    checkOneTerm(analyzer, "ab", "ab");
+    checkOneTerm(analyzer, "ob", "ob");
+    checkOneTerm(analyzer, "Ab", "Ab");
+    checkOneTerm(analyzer, "Ob", "Ob");
+
+    checkOneTerm(analyzer, "å", "a");
+
+    checkOneTerm(analyzer, "aa", "a");
+    checkOneTerm(analyzer, "aA", "a");
+    checkOneTerm(analyzer, "ao", "a");
+    checkOneTerm(analyzer, "aO", "a");
+
+    checkOneTerm(analyzer, "AA", "A");
+    checkOneTerm(analyzer, "Aa", "A");
+    checkOneTerm(analyzer, "Ao", "A");
+    checkOneTerm(analyzer, "AO", "A");
+
+    checkOneTerm(analyzer, "æ", "a");
+    checkOneTerm(analyzer, "ä", "a");
+
+    checkOneTerm(analyzer, "Æ", "A");
+    checkOneTerm(analyzer, "Ä", "A");
+
+    checkOneTerm(analyzer, "ae", "a");
+    checkOneTerm(analyzer, "aE", "a");
+
+    checkOneTerm(analyzer, "Ae", "A");
+    checkOneTerm(analyzer, "AE", "A");
+
+
+    checkOneTerm(analyzer, "ö", "o");
+    checkOneTerm(analyzer, "ø", "o");
+    checkOneTerm(analyzer, "Ö", "O");
+    checkOneTerm(analyzer, "Ø", "O");
+
+
+    checkOneTerm(analyzer, "oo", "o");
+    checkOneTerm(analyzer, "oe", "o");
+    checkOneTerm(analyzer, "oO", "o");
+    checkOneTerm(analyzer, "oE", "o");
+
+    checkOneTerm(analyzer, "Oo", "O");
+    checkOneTerm(analyzer, "Oe", "O");
+    checkOneTerm(analyzer, "OO", "O");
+    checkOneTerm(analyzer, "OE", "O");
+  }
+  
+  /** check that the empty string doesn't cause issues */
+  public void testEmptyTerm() throws Exception {
+    Analyzer a = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+        Tokenizer tokenizer = new KeywordTokenizer(reader);
+        return new TokenStreamComponents(tokenizer, new ScandinavianFoldingFilter(tokenizer));
+      } 
+    };
+    checkOneTerm(a, "", "");
+  }
+  
+  /** blast some random strings through the analyzer */
+  public void testRandomData() throws Exception {
+    checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER);
+  }
+}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestScandinavianFoldingFilterFactory.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestScandinavianFoldingFilterFactory.java
new file mode 100644
index 0000000..4823cf1
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestScandinavianFoldingFilterFactory.java
@@ -0,0 +1,45 @@
+package org.apache.lucene.analysis.miscellaneous;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
+
+import java.io.Reader;
+import java.io.StringReader;
+
+public class TestScandinavianFoldingFilterFactory extends BaseTokenStreamFactoryTestCase {
+
+  public void testStemming() throws Exception {
+    Reader reader = new StringReader("räksmörgås");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("ScandinavianFolding").create(stream);
+    assertTokenStreamContents(stream, new String[] { "raksmorgas" });
+  }
+
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("ScandinavianFolding",
+          "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
+}
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestScandinavianNormalizationFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestScandinavianNormalizationFilter.java
new file mode 100644
index 0000000..4eff0ca
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestScandinavianNormalizationFilter.java
@@ -0,0 +1,125 @@
+package org.apache.lucene.analysis.miscellaneous;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.KeywordTokenizer;
+
+import java.io.Reader;
+
+
+public class TestScandinavianNormalizationFilter extends BaseTokenStreamTestCase {
+
+
+  private Analyzer analyzer = new Analyzer() {
+    @Override
+    protected TokenStreamComponents createComponents(String field, Reader reader) {
+      final Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+      final TokenStream stream = new ScandinavianNormalizationFilter(tokenizer);
+      return new TokenStreamComponents(tokenizer, stream);
+    }
+  };
+
+  public void test() throws Exception {
+
+    checkOneTerm(analyzer, "aeäaeeea", "æææeea"); // should not cause ArrayIndexOutOfBoundsException
+
+    checkOneTerm(analyzer, "aeäaeeeae", "æææeeæ");
+    checkOneTerm(analyzer, "aeaeeeae", "ææeeæ");
+
+    checkOneTerm(analyzer, "bøen", "bøen");
+    checkOneTerm(analyzer, "bOEen", "bØen");
+    checkOneTerm(analyzer, "åene", "åene");
+
+
+    checkOneTerm(analyzer, "blåbærsyltetøj", "blåbærsyltetøj");
+    checkOneTerm(analyzer, "blaabaersyltetöj", "blåbærsyltetøj");
+    checkOneTerm(analyzer, "räksmörgås", "ræksmørgås");
+    checkOneTerm(analyzer, "raeksmörgaos", "ræksmørgås");
+    checkOneTerm(analyzer, "raeksmörgaas", "ræksmørgås");
+    checkOneTerm(analyzer, "raeksmoergås", "ræksmørgås");
+
+
+    checkOneTerm(analyzer, "ab", "ab");
+    checkOneTerm(analyzer, "ob", "ob");
+    checkOneTerm(analyzer, "Ab", "Ab");
+    checkOneTerm(analyzer, "Ob", "Ob");
+
+    checkOneTerm(analyzer, "å", "å");
+
+    checkOneTerm(analyzer, "aa", "å");
+    checkOneTerm(analyzer, "aA", "å");
+    checkOneTerm(analyzer, "ao", "å");
+    checkOneTerm(analyzer, "aO", "å");
+
+    checkOneTerm(analyzer, "AA", "Å");
+    checkOneTerm(analyzer, "Aa", "Å");
+    checkOneTerm(analyzer, "Ao", "Å");
+    checkOneTerm(analyzer, "AO", "Å");
+
+    checkOneTerm(analyzer, "æ", "æ");
+    checkOneTerm(analyzer, "ä", "æ");
+
+    checkOneTerm(analyzer, "Æ", "Æ");
+    checkOneTerm(analyzer, "Ä", "Æ");
+
+    checkOneTerm(analyzer, "ae", "æ");
+    checkOneTerm(analyzer, "aE", "æ");
+
+    checkOneTerm(analyzer, "Ae", "Æ");
+    checkOneTerm(analyzer, "AE", "Æ");
+
+
+    checkOneTerm(analyzer, "ö", "ø");
+    checkOneTerm(analyzer, "ø", "ø");
+    checkOneTerm(analyzer, "Ö", "Ø");
+    checkOneTerm(analyzer, "Ø", "Ø");
+
+
+    checkOneTerm(analyzer, "oo", "ø");
+    checkOneTerm(analyzer, "oe", "ø");
+    checkOneTerm(analyzer, "oO", "ø");
+    checkOneTerm(analyzer, "oE", "ø");
+
+    checkOneTerm(analyzer, "Oo", "Ø");
+    checkOneTerm(analyzer, "Oe", "Ø");
+    checkOneTerm(analyzer, "OO", "Ø");
+    checkOneTerm(analyzer, "OE", "Ø");
+  }
+  
+  /** check that the empty string doesn't cause issues */
+  public void testEmptyTerm() throws Exception {
+    Analyzer a = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+        Tokenizer tokenizer = new KeywordTokenizer(reader);
+        return new TokenStreamComponents(tokenizer, new ScandinavianNormalizationFilter(tokenizer));
+      } 
+    };
+    checkOneTerm(a, "", "");
+  }
+  
+  /** blast some random strings through the analyzer */
+  public void testRandomData() throws Exception {
+    checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER);
+  }
+}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestScandinavianNormalizationFilterFactory.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestScandinavianNormalizationFilterFactory.java
new file mode 100644
index 0000000..fa7416b
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestScandinavianNormalizationFilterFactory.java
@@ -0,0 +1,45 @@
+package org.apache.lucene.analysis.miscellaneous;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
+
+import java.io.Reader;
+import java.io.StringReader;
+
+public class TestScandinavianNormalizationFilterFactory extends BaseTokenStreamFactoryTestCase {
+
+  public void testStemming() throws Exception {
+    Reader reader = new StringReader("räksmörgås");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("ScandinavianNormalization").create(stream);
+    assertTokenStreamContents(stream, new String[] { "ræksmørgås" });
+  }
+
+  /** Test that bogus arguments result in exception */
+  public void testBogusArguments() throws Exception {
+    try {
+      tokenFilterFactory("ScandinavianNormalization",
+          "bogusArg", "bogusValue");
+      fail();
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.getMessage().contains("Unknown parameters"));
+    }
+  }
+}
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java
index 3e34230..f3739e6 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java
@@ -78,7 +78,7 @@
       char[] charArray = randomRealisticUnicodeString.toCharArray();
       StringBuilder builder = new StringBuilder();
       for (int j = 0; j < charArray.length;) {
-        int cp = Character.codePointAt(charArray, j);
+        int cp = Character.codePointAt(charArray, j, charArray.length);
         if (!Character.isWhitespace(cp)) {
           builder.appendCodePoint(cp);
         }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
index 6b3d8c5..7aeec71 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
@@ -32,8 +32,10 @@
 import org.apache.lucene.analysis.core.LetterTokenizer;
 import org.apache.lucene.analysis.core.WhitespaceTokenizer;
 import org.apache.lucene.analysis.shingle.ShingleFilter;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util._TestUtil;
 
 /**
  * Tests {@link EdgeNGramTokenFilter} for correctness.
@@ -192,9 +194,9 @@
   }
 
   public void testGraphs() throws IOException {
-    TokenStream tk = new LetterTokenizer(Version.LUCENE_44, new StringReader("abc d efgh ij klmno p q"));
+    TokenStream tk = new LetterTokenizer(TEST_VERSION_CURRENT, new StringReader("abc d efgh ij klmno p q"));
     tk = new ShingleFilter(tk);
-    tk = new EdgeNGramTokenFilter(Version.LUCENE_44, tk, 7, 10);
+    tk = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tk, 7, 10);
     tk.reset();
     assertTokenStreamContents(tk,
         new String[] { "efgh ij", "ij klmn", "ij klmno", "klmno p" },
@@ -205,4 +207,25 @@
         23
     );
   }
+
+  public void testSupplementaryCharacters() throws IOException {
+    final String s = _TestUtil.randomUnicodeString(random(), 10);
+    final int codePointCount = s.codePointCount(0, s.length());
+    final int minGram = _TestUtil.nextInt(random(), 1, 3);
+    final int maxGram = _TestUtil.nextInt(random(), minGram, 10);
+    TokenStream tk = new KeywordTokenizer(new StringReader(s));
+    tk = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tk, minGram, maxGram);
+    final CharTermAttribute termAtt = tk.addAttribute(CharTermAttribute.class);
+    final OffsetAttribute offsetAtt = tk.addAttribute(OffsetAttribute.class);
+    tk.reset();
+    for (int i = minGram; i <= Math.min(codePointCount, maxGram); ++i) {
+      assertTrue(tk.incrementToken());
+      assertEquals(0, offsetAtt.startOffset());
+      assertEquals(s.length(), offsetAtt.endOffset());
+      final int end = Character.offsetByCodePoints(s, 0, i);
+      assertEquals(s.substring(0, end), termAtt.toString());
+    }
+    assertFalse(tk.incrementToken());
+  }
+
 }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
index 4db7efe..bd2da79 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
@@ -21,15 +21,15 @@
 import java.io.IOException;
 import java.io.Reader;
 import java.io.StringReader;
+import java.util.Arrays;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.util._TestUtil;
 
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+
 /**
  * Tests {@link EdgeNGramTokenizer} for correctness.
  */
@@ -120,25 +120,60 @@
                               false);
   }
 
+  private static void testNGrams(int minGram, int maxGram, int length, final String nonTokenChars) throws IOException {
+    final String s = RandomStrings.randomAsciiOfLength(random(), length);
+    testNGrams(minGram, maxGram, s, nonTokenChars);
+  }
+
+  private static void testNGrams(int minGram, int maxGram, String s, String nonTokenChars) throws IOException {
+    NGramTokenizerTest.testNGrams(minGram, maxGram, s, nonTokenChars, true);
+  }
+
   public void testLargeInput() throws IOException {
-    final String input = _TestUtil.randomSimpleString(random(), 1024 * 5);
-    final int minGram = _TestUtil.nextInt(random(), 1, 1024);
-    final int maxGram = _TestUtil.nextInt(random(), minGram, 5 * 1024);
-    EdgeNGramTokenizer tk = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, new StringReader(input), minGram, maxGram);
-    final CharTermAttribute charTermAtt = tk.addAttribute(CharTermAttribute.class);
-    final OffsetAttribute offsetAtt = tk.addAttribute(OffsetAttribute.class);
-    final PositionIncrementAttribute posIncAtt = tk.addAttribute(PositionIncrementAttribute.class);
-    tk.reset();
-    for (int i = minGram; i <= maxGram && i <= input.length(); ++i) {
-      assertTrue(tk.incrementToken());
-      assertEquals(0, offsetAtt.startOffset());
-      assertEquals(i, offsetAtt.endOffset());
-      assertEquals(1, posIncAtt.getPositionIncrement());
-      assertEquals(input.substring(0, i), charTermAtt.toString());
+    // test sliding
+    final int minGram = _TestUtil.nextInt(random(), 1, 100);
+    final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
+    testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), "");
+  }
+
+  public void testLargeMaxGram() throws IOException {
+    // test sliding with maxGram > 1024
+    final int minGram = _TestUtil.nextInt(random(), 1290, 1300);
+    final int maxGram = _TestUtil.nextInt(random(), minGram, 1300);
+    testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), "");
+  }
+
+  public void testPreTokenization() throws IOException {
+    final int minGram = _TestUtil.nextInt(random(), 1, 100);
+    final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
+    testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 0, 4 * 1024), "a");
+  }
+
+  public void testHeavyPreTokenization() throws IOException {
+    final int minGram = _TestUtil.nextInt(random(), 1, 100);
+    final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
+    testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 0, 4 * 1024), "abcdef");
+  }
+
+  public void testFewTokenChars() throws IOException {
+    final char[] chrs = new char[_TestUtil.nextInt(random(), 4000, 5000)];
+    Arrays.fill(chrs, ' ');
+    for (int i = 0; i < chrs.length; ++i) {
+      if (random().nextFloat() < 0.1) {
+        chrs[i] = 'a';
+      }
     }
-    assertFalse(tk.incrementToken());
-    tk.end();
-    assertEquals(input.length(), offsetAtt.startOffset());
+    final int minGram = _TestUtil.nextInt(random(), 1, 2);
+    final int maxGram = _TestUtil.nextInt(random(), minGram, 2);
+    testNGrams(minGram, maxGram, new String(chrs), " ");
+  }
+
+  public void testFullUTF8Range() throws IOException {
+    final int minGram = _TestUtil.nextInt(random(), 1, 100);
+    final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
+    final String s = _TestUtil.randomUnicodeString(random(), 4 * 1024);
+    testNGrams(minGram, maxGram, s, "");
+    testNGrams(minGram, maxGram, s, "abcdef");
   }
 
 }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
index 37db05d..20c1820 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
@@ -26,7 +26,10 @@
 import org.apache.lucene.analysis.core.KeywordTokenizer;
 import org.apache.lucene.analysis.core.WhitespaceTokenizer;
 import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.util.Version;
+import org.apache.lucene.util._TestUtil;
 
 import java.io.IOException;
 import java.io.Reader;
@@ -177,4 +180,27 @@
         );
   }
 
+  public void testSupplementaryCharacters() throws IOException {
+    final String s = _TestUtil.randomUnicodeString(random(), 10);
+    final int codePointCount = s.codePointCount(0, s.length());
+    final int minGram = _TestUtil.nextInt(random(), 1, 3);
+    final int maxGram = _TestUtil.nextInt(random(), minGram, 10);
+    TokenStream tk = new KeywordTokenizer(new StringReader(s));
+    tk = new NGramTokenFilter(TEST_VERSION_CURRENT, tk, minGram, maxGram);
+    final CharTermAttribute termAtt = tk.addAttribute(CharTermAttribute.class);
+    final OffsetAttribute offsetAtt = tk.addAttribute(OffsetAttribute.class);
+    tk.reset();
+    for (int start = 0; start < codePointCount; ++start) {
+      for (int end = start + minGram; end <= Math.min(codePointCount, start + maxGram); ++end) {
+        assertTrue(tk.incrementToken());
+        assertEquals(0, offsetAtt.startOffset());
+        assertEquals(s.length(), offsetAtt.endOffset());
+        final int startIndex = Character.offsetByCodePoints(s, 0, start);
+        final int endIndex = Character.offsetByCodePoints(s, 0, end);
+        assertEquals(s.substring(startIndex, endIndex), termAtt.toString());
+      }
+    }
+    assertFalse(tk.incrementToken());
+  }
+
 }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
index f56f413..0dbdefd 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
@@ -18,9 +18,12 @@
  */
 
 
+import static org.apache.lucene.analysis.ngram.NGramTokenizerTest.isTokenChar;
+
 import java.io.IOException;
 import java.io.Reader;
 import java.io.StringReader;
+import java.util.Arrays;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
@@ -115,23 +118,74 @@
     checkRandomData(random(), a, 50*RANDOM_MULTIPLIER, 1027, false, false);
   }
 
-  private void testNGrams(int minGram, int maxGram, int length) throws IOException {
+  private static void testNGrams(int minGram, int maxGram, int length, final String nonTokenChars) throws IOException {
     final String s = RandomStrings.randomAsciiOfLength(random(), length);
-    final TokenStream grams = new NGramTokenizer(TEST_VERSION_CURRENT, new StringReader(s), minGram, maxGram);
+    testNGrams(minGram, maxGram, s, nonTokenChars);
+  }
+
+  private static void testNGrams(int minGram, int maxGram, String s, String nonTokenChars) throws IOException {
+    testNGrams(minGram, maxGram, s, nonTokenChars, false);
+  }
+
+  static int[] toCodePoints(CharSequence s) {
+    final int[] codePoints = new int[Character.codePointCount(s, 0, s.length())];
+    for (int i = 0, j = 0; i < s.length(); ++j) {
+      codePoints[j] = Character.codePointAt(s, i);
+      i += Character.charCount(codePoints[j]);
+    }
+    return codePoints;
+  }
+
+  static boolean isTokenChar(String nonTokenChars, int codePoint) {
+    for (int i = 0; i < nonTokenChars.length(); ) {
+      final int cp = nonTokenChars.codePointAt(i);
+      if (cp == codePoint) {
+        return false;
+      }
+      i += Character.charCount(cp);
+    }
+    return true;
+  }
+
+  static void testNGrams(int minGram, int maxGram, String s, final String nonTokenChars, boolean edgesOnly) throws IOException {
+    // convert the string to code points
+    final int[] codePoints = toCodePoints(s);
+    final int[] offsets = new int[codePoints.length + 1];
+    for (int i = 0; i < codePoints.length; ++i) {
+      offsets[i+1] = offsets[i] + Character.charCount(codePoints[i]);
+    }
+    final TokenStream grams = new NGramTokenizer(TEST_VERSION_CURRENT, new StringReader(s), minGram, maxGram, edgesOnly) {
+      @Override
+      protected boolean isTokenChar(int chr) {
+        return nonTokenChars.indexOf(chr) < 0;
+      }
+    };
     final CharTermAttribute termAtt = grams.addAttribute(CharTermAttribute.class);
     final PositionIncrementAttribute posIncAtt = grams.addAttribute(PositionIncrementAttribute.class);
     final PositionLengthAttribute posLenAtt = grams.addAttribute(PositionLengthAttribute.class);
     final OffsetAttribute offsetAtt = grams.addAttribute(OffsetAttribute.class);
     grams.reset();
-    for (int start = 0; start < s.length(); ++start) {
-      for (int end = start + minGram; end <= start + maxGram && end <= s.length(); ++end) {
+    for (int start = 0; start < codePoints.length; ++start) {
+      nextGram:
+      for (int end = start + minGram; end <= start + maxGram && end <= codePoints.length; ++end) {
+        if (edgesOnly && start > 0 && isTokenChar(nonTokenChars, codePoints[start - 1])) {
+          // not on an edge
+          continue nextGram;
+        }
+        for (int j = start; j < end; ++j) {
+          if (!isTokenChar(nonTokenChars, codePoints[j])) {
+            continue nextGram;
+          }
+        }
         assertTrue(grams.incrementToken());
-        assertEquals(s.substring(start, end), termAtt.toString());
+        assertArrayEquals(Arrays.copyOfRange(codePoints, start, end), toCodePoints(termAtt));
         assertEquals(1, posIncAtt.getPositionIncrement());
-        assertEquals(start, offsetAtt.startOffset());
-        assertEquals(end, offsetAtt.endOffset());
+        assertEquals(1, posLenAtt.getPositionLength());
+        assertEquals(offsets[start], offsetAtt.startOffset());
+        assertEquals(offsets[end], offsetAtt.endOffset());
       }
     }
+    assertFalse(grams.incrementToken());
     grams.end();
     assertEquals(s.length(), offsetAtt.startOffset());
     assertEquals(s.length(), offsetAtt.endOffset());
@@ -141,14 +195,47 @@
     // test sliding
     final int minGram = _TestUtil.nextInt(random(), 1, 100);
     final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
-    testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 3 * 1024, 4 * 1024));
+    testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), "");
   }
 
   public void testLargeMaxGram() throws IOException {
     // test sliding with maxGram > 1024
-    final int minGram = _TestUtil.nextInt(random(), 1200, 1300);
+    final int minGram = _TestUtil.nextInt(random(), 1290, 1300);
     final int maxGram = _TestUtil.nextInt(random(), minGram, 1300);
-    testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 3 * 1024, 4 * 1024));
+    testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 3 * 1024, 4 * 1024), "");
+  }
+
+  public void testPreTokenization() throws IOException {
+    final int minGram = _TestUtil.nextInt(random(), 1, 100);
+    final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
+    testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 0, 4 * 1024), "a");
+  }
+
+  public void testHeavyPreTokenization() throws IOException {
+    final int minGram = _TestUtil.nextInt(random(), 1, 100);
+    final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
+    testNGrams(minGram, maxGram, _TestUtil.nextInt(random(), 0, 4 * 1024), "abcdef");
+  }
+
+  public void testFewTokenChars() throws IOException {
+    final char[] chrs = new char[_TestUtil.nextInt(random(), 4000, 5000)];
+    Arrays.fill(chrs, ' ');
+    for (int i = 0; i < chrs.length; ++i) {
+      if (random().nextFloat() < 0.1) {
+        chrs[i] = 'a';
+      }
+    }
+    final int minGram = _TestUtil.nextInt(random(), 1, 2);
+    final int maxGram = _TestUtil.nextInt(random(), minGram, 2);
+    testNGrams(minGram, maxGram, new String(chrs), " ");
+  }
+
+  public void testFullUTF8Range() throws IOException {
+    final int minGram = _TestUtil.nextInt(random(), 1, 100);
+    final int maxGram = _TestUtil.nextInt(random(), minGram, 100);
+    final String s = _TestUtil.randomUnicodeString(random(), 4 * 1024);
+    testNGrams(minGram, maxGram, s, "");
+    testNGrams(minGram, maxGram, s, "abcdef");
   }
 
 }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilter.java
index c0212a6..55eefe1 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilter.java
@@ -32,6 +32,9 @@
 import org.apache.lucene.analysis.util.CharArraySet;
 
 import static org.apache.lucene.analysis.VocabularyAssert.*;
+import static org.apache.lucene.analysis.no.NorwegianLightStemmer.BOKMAAL;
+import static org.apache.lucene.analysis.no.NorwegianLightStemmer.NYNORSK;
+
 
 /**
  * Simple tests for {@link NorwegianLightStemFilter}
@@ -42,7 +45,7 @@
     protected TokenStreamComponents createComponents(String fieldName,
         Reader reader) {
       Tokenizer source = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-      return new TokenStreamComponents(source, new NorwegianLightStemFilter(source));
+      return new TokenStreamComponents(source, new NorwegianLightStemFilter(source, BOKMAAL));
     }
   };
   
@@ -51,6 +54,18 @@
     assertVocabulary(analyzer, new FileInputStream(getDataFile("nb_light.txt")));
   }
   
+  /** Test against a Nynorsk vocabulary file */
+  public void testNynorskVocabulary() throws IOException {  
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+        Tokenizer source = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+        return new TokenStreamComponents(source, new NorwegianLightStemFilter(source, NYNORSK));
+      }
+    };
+    assertVocabulary(analyzer, new FileInputStream(getDataFile("nn_light.txt")));
+  }
+  
   public void testKeyword() throws IOException {
     final CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("sekretæren"), false);
     Analyzer a = new Analyzer() {
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilterFactory.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilterFactory.java
index fe5f62f..df61985 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilterFactory.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilterFactory.java
@@ -34,7 +34,23 @@
     stream = tokenFilterFactory("NorwegianLightStem").create(stream);
     assertTokenStreamContents(stream, new String[] { "epl", "epl" });
   }
-  
+
+  /** Test stemming with variant set explicitly to Bokmål */
+  public void testBokmaalStemming() throws Exception {
+    Reader reader = new StringReader("epler eple");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("NorwegianLightStem", "variant", "nb").create(stream);
+    assertTokenStreamContents(stream, new String[] { "epl", "epl" });
+  }
+
+  /** Test stemming with variant set explicitly to Nynorsk */
+  public void testNynorskStemming() throws Exception {
+    Reader reader = new StringReader("gutar gutane");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("NorwegianLightStem", "variant", "nn").create(stream);
+    assertTokenStreamContents(stream, new String[] { "gut", "gut" });
+  }
+
   /** Test that bogus arguments result in exception */
   public void testBogusArguments() throws Exception {
     try {
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilter.java
index 69cee77..38866f6 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilter.java
@@ -32,6 +32,8 @@
 import org.apache.lucene.analysis.util.CharArraySet;
 
 import static org.apache.lucene.analysis.VocabularyAssert.*;
+import static org.apache.lucene.analysis.no.NorwegianLightStemmer.BOKMAAL;
+import static org.apache.lucene.analysis.no.NorwegianLightStemmer.NYNORSK;
 
 /**
  * Simple tests for {@link NorwegianMinimalStemFilter}
@@ -42,15 +44,27 @@
     protected TokenStreamComponents createComponents(String fieldName,
         Reader reader) {
       Tokenizer source = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-      return new TokenStreamComponents(source, new NorwegianMinimalStemFilter(source));
+      return new TokenStreamComponents(source, new NorwegianMinimalStemFilter(source, BOKMAAL));
     }
   };
   
-  /** Test against a vocabulary file */
+  /** Test against a Bokmål vocabulary file */
   public void testVocabulary() throws IOException {
     assertVocabulary(analyzer, new FileInputStream(getDataFile("nb_minimal.txt")));
   }
   
+  /** Test against a Nynorsk vocabulary file */
+  public void testNynorskVocabulary() throws IOException {  
+    Analyzer analyzer = new Analyzer() {
+      @Override
+      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+        Tokenizer source = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+        return new TokenStreamComponents(source, new NorwegianMinimalStemFilter(source, NYNORSK));
+      }
+    };
+    assertVocabulary(analyzer, new FileInputStream(getDataFile("nn_minimal.txt")));
+  }
+  
   public void testKeyword() throws IOException {
     final CharArraySet exclusionSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("sekretæren"), false);
     Analyzer a = new Analyzer() {
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilterFactory.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilterFactory.java
index 9c03225..80b4ddd 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilterFactory.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilterFactory.java
@@ -35,6 +35,22 @@
     assertTokenStreamContents(stream, new String[] { "epl", "epl", "epl", "epl", "epl", "epl" });
   }
   
+  /** Test stemming with variant set explicitly to Bokmål */
+  public void testBokmaalStemming() throws Exception {
+    Reader reader = new StringReader("eple eplet epler eplene eplets eplenes");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("NorwegianMinimalStem", "variant", "nb").create(stream);
+    assertTokenStreamContents(stream, new String[] { "epl", "epl", "epl", "epl", "epl", "epl" });
+  }
+  
+  /** Test stemming with variant set explicitly to Nynorsk */
+  public void testNynorskStemming() throws Exception {
+    Reader reader = new StringReader("gut guten gutar gutane gutens gutanes");
+    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+    stream = tokenFilterFactory("NorwegianMinimalStem", "variant", "nn").create(stream);
+    assertTokenStreamContents(stream, new String[] { "gut", "gut", "gut", "gut", "gut", "gut" });
+  }
+  
   /** Test that bogus arguments result in exception */
   public void testBogusArguments() throws Exception {
     try {
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/nn_light.txt b/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/nn_light.txt
new file mode 100644
index 0000000..154da9e
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/nn_light.txt
@@ -0,0 +1,110 @@
+#
+# Tests for Norwegian Nynorsk light stemmer
+# It should tackle nouns, adjectives, genitive and some general endings
+#
+# Nouns masculine
+gut	gut
+guten	gut
+gutar	gut
+gutane	gut
+gutens	gut
+gutanes	gut
+søknad	søknad
+søknaden	søknad
+søknadar	søknad
+søknadane	søknad
+søknadens	søknad
+søknadanes	søknad
+# Nouns feminine
+kjole	kjol
+kjola	kjol
+kjoler	kjol
+kjolene	kjol
+kjolas	kjol
+# Nouns neutral
+dyr	dyr
+dyret	dyr
+dyra	dyr
+dyras	dyr
+prospekt	prospekt
+prospektet	prospekt
+prospekta	prospekt
+prospektas	prospekt
+innhald	innhald
+innhaldet	innhald
+innhalda	innhald
+# General endings
+hemmeleg	hemmeleg
+hemmelegheit	hemmeleg
+hemmelegheita	hemmeleg
+hemmelegheiter	hemmeleg
+vanskeleg	vanskeleg
+vanskelegheit	vanskeleg
+vanskelegheita	vanskeleg
+vanskelegheiter	vanskeleg
+hevelse	hev
+heva	hev
+hevelsen	hev
+heve	hev
+ærleg	ærleg
+ærlegdom	ærleg
+ærlegdommen	ærlegdomm
+ærlegdommens	ærlegdomm
+alderdom	alder
+alderdommen	alderdomm
+alderdommens	alderdomm
+trygg	trygg
+tryggleik	trygg
+tryggleiken	trygg
+tryggleikens	trygg
+tryggleikar	trygg
+kjærleik	kjær
+kjærleiken	kjær
+kjærleikens	kjær
+kjærleikar	kjær
+verke	verk
+verksemd	verk
+hjelpe	hjelp
+hjelpsemd	hjelp
+# Adjectives
+billeg	billeg
+billegare	billeg
+billegast	billeg
+smal	smal
+smalare	smal
+smalast	smal
+farleg	farleg
+farlegare	farleg
+farlegast	farleg
+#########################################
+# Words that should not be stemmed
+#
+# Irregular masculine nouns (not supposed to be handled correctly)
+søner	søn
+sønene	søn
+brør	brør
+brørne	brørn
+# Irregular feminine nouns, not handled
+dotter	dott
+døtrer	døtr
+døtrene	døtr
+klo	klo
+klørne	klørn
+mor	mor
+mødrer	mødr
+mødrene	mødr
+# Irregular neutral nouns, not handled
+vedunder	vedund
+# Other words that should not be touched
+abc	abc
+123	123
+Jens	Jens
+# Irregular adjectives that should not be stemmed
+gammal	gammal
+eldre	eldr
+eldst	eldst
+# Verbs, should not be stemmed
+syngje	syngj
+syng	syng
+song	song
+sunge	sung
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/nn_minimal.txt b/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/nn_minimal.txt
new file mode 100644
index 0000000..0ee8946
--- /dev/null
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/no/nn_minimal.txt
@@ -0,0 +1,76 @@
+#
+# Tests for Norwegian minimal stemmer using Nynorsk as variant
+# It only tries to stem nouns, i.e. being very little aggressive
+#
+# Nouns masculine
+gut	gut
+guten	gut
+gutar	gut
+gutane	gut
+gutens	gut
+gutanes	gut
+søknad	søknad
+søknaden	søknad
+søknadar	søknad
+søknadane	søknad
+søknadens	søknad
+søknadanes	søknad
+# Nouns feminine
+jente	jent
+jenta	jent
+jenter	jent
+jentene	jent
+jentas	jent
+# Nouns neutral
+dyr	dyr
+dyret	dyr
+dyra	dyr
+dyras	dyr
+prospekt	prospekt
+prospektet	prospekt
+prospekta	prospekt
+prospektas	prospekt
+innhald	innhald
+innhaldet	innhald
+innhalda	innhald
+#########################################
+# Words that should not be stemmed
+#
+# Irregular masculine nouns (not supposed to be handled correctly)
+søner	søn
+sønene	søn
+brør	brør
+brørne	brørn
+# Irregular feminine nouns, not handled
+dotter	dott
+døtrer	døtr
+døtrene	døtr
+klo	klo
+klørne	klørn
+mor	mor
+mødrer	mødr
+mødrene	mødr
+# Irregular neutral nouns, not handled
+vedunder	vedund
+# Other words that should not be touched
+abc	abc
+123	123
+Jens	Jens
+# Adjective, should not be stemmed
+farleg	farleg
+farlegare	farlegar
+farlegast	farlegast
+stor	stor
+større	størr
+størst	størst
+gammal	gammal
+eldre	eldr
+eldst	eldst
+# General endings, should not be stemmed
+sanning	sanning
+sanninga	sanning
+# Verbs, should not be stemmed
+syngje	syngj
+syng	syng
+song	song
+sunge	sung
\ No newline at end of file
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharacterUtils.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharacterUtils.java
index 502223f..3c88454 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharacterUtils.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharacterUtils.java
@@ -20,10 +20,11 @@
 import java.io.IOException;
 import java.io.Reader;
 import java.io.StringReader;
+import java.util.Arrays;
 
 import org.apache.lucene.analysis.util.CharacterUtils.CharacterBuffer;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util._TestUtil;
 import org.junit.Test;
 
 /**
@@ -32,32 +33,6 @@
 public class TestCharacterUtils extends LuceneTestCase {
 
   @Test
-  public void testCodePointAtCharArrayInt() {
-    CharacterUtils java4 = CharacterUtils.getJava4Instance();
-    char[] cpAt3 = "Abc\ud801\udc1c".toCharArray();
-    char[] highSurrogateAt3 = "Abc\ud801".toCharArray();
-    assertEquals((int) 'A', java4.codePointAt(cpAt3, 0));
-    assertEquals((int) '\ud801', java4.codePointAt(cpAt3, 3));
-    assertEquals((int) '\ud801', java4.codePointAt(highSurrogateAt3, 3));
-    try {
-      java4.codePointAt(highSurrogateAt3, 4);
-      fail("array index out of bounds");
-    } catch (IndexOutOfBoundsException e) {
-    }
-
-    CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
-    assertEquals((int) 'A', java5.codePointAt(cpAt3, 0));
-    assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
-        cpAt3, 3));
-    assertEquals((int) '\ud801', java5.codePointAt(highSurrogateAt3, 3));
-    try {
-      java5.codePointAt(highSurrogateAt3, 4);
-      fail("array index out of bounds");
-    } catch (IndexOutOfBoundsException e) {
-    }
-  }
-
-  @Test
   public void testCodePointAtCharSequenceInt() {
     CharacterUtils java4 = CharacterUtils.getJava4Instance();
     String cpAt3 = "Abc\ud801\udc1c";
@@ -98,7 +73,68 @@
     assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
         cpAt3, 3, 5));
     assertEquals((int) '\ud801', java5.codePointAt(highSurrogateAt3, 3, 4));
+  }
 
+  @Test
+  public void testCodePointCount() {
+    CharacterUtils java4 = CharacterUtils.getJava4Instance();
+    CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+    final String s = _TestUtil.randomUnicodeString(random());
+    assertEquals(s.length(), java4.codePointCount(s));
+    assertEquals(Character.codePointCount(s, 0, s.length()), java5.codePointCount(s));
+  }
+
+  @Test
+  public void testOffsetByCodePoint() {
+    CharacterUtils java4 = CharacterUtils.getJava4Instance();
+    CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+    for (int i = 0; i < 10; ++i) {
+      final char[] s = _TestUtil.randomUnicodeString(random()).toCharArray();
+      final int index = _TestUtil.nextInt(random(), 0, s.length);
+      final int offset = random().nextInt(7) - 3;
+      try {
+        final int o = java4.offsetByCodePoints(s, 0, s.length, index, offset);
+        assertEquals(o, index + offset);
+      } catch (IndexOutOfBoundsException e) {
+        assertTrue((index + offset) < 0 || (index + offset) > s.length);
+      }
+  
+      int o;
+      try {
+        o = java5.offsetByCodePoints(s, 0, s.length, index, offset);
+      } catch (IndexOutOfBoundsException e) {
+        try {
+          Character.offsetByCodePoints(s, 0, s.length, index, offset);
+          fail();
+        } catch (IndexOutOfBoundsException e2) {
+          // OK
+        }
+        o = -1;
+      }
+      if (o >= 0) {
+        assertEquals(Character.offsetByCodePoints(s, 0, s.length, index, offset), o);
+      }
+    }
+  }
+
+  public void testConversions() {
+    CharacterUtils java4 = CharacterUtils.getJava4Instance();
+    CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
+    testConversions(java4);
+    testConversions(java5);
+  }
+
+  private void testConversions(CharacterUtils charUtils) {
+    final char[] orig = _TestUtil.randomUnicodeString(random(), 100).toCharArray();
+    final int[] buf = new int[orig.length];
+    final char[] restored = new char[buf.length];
+    final int o1 = _TestUtil.nextInt(random(), 0, Math.min(5, orig.length));
+    final int o2 = _TestUtil.nextInt(random(), 0, o1);
+    final int o3 = _TestUtil.nextInt(random(), 0, o1);
+    final int codePointCount = charUtils.toCodePoints(orig, o1, orig.length - o1, buf, o2);
+    final int charCount = charUtils.toChars(buf, o2, codePointCount, restored, o3);
+    assertEquals(orig.length - o1, charCount);
+    assertArrayEquals(Arrays.copyOfRange(orig, o1, o1 + charCount), Arrays.copyOfRange(restored, o3, o3 + charCount));
   }
 
   @Test
@@ -132,7 +168,7 @@
       assertEquals(0, buffer.getOffset());
       assertEquals(6, buffer.getLength());
       assertEquals("hellow", new String(buffer.getBuffer()));
-      assertTrue(instance.fill(buffer,reader));
+      assertFalse(instance.fill(buffer,reader));
       assertEquals(4, buffer.getLength());
       assertEquals(0, buffer.getOffset());
 
@@ -159,15 +195,12 @@
     assertEquals(4, buffer.getLength());
     assertEquals("123\ud801", new String(buffer.getBuffer(),
         buffer.getOffset(), buffer.getLength()));
-    assertTrue(instance.fill(buffer, reader));
-    assertEquals(2, buffer.getLength());
-    assertEquals("\ud801\udc1c", new String(buffer.getBuffer(), buffer
-        .getOffset(), buffer.getLength()));
-    assertTrue(instance.fill(buffer, reader));
-    assertEquals(1, buffer.getLength());
-    assertEquals("\ud801", new String(buffer.getBuffer(), buffer
+    assertFalse(instance.fill(buffer, reader));
+    assertEquals(3, buffer.getLength());
+    assertEquals("\ud801\udc1c\ud801", new String(buffer.getBuffer(), buffer
         .getOffset(), buffer.getLength()));
     assertFalse(instance.fill(buffer, reader));
+    assertEquals(0, buffer.getLength());
   }
 
   @Test
diff --git a/lucene/analysis/icu/build.xml b/lucene/analysis/icu/build.xml
index fcd9201..5d3c76d 100644
--- a/lucene/analysis/icu/build.xml
+++ b/lucene/analysis/icu/build.xml
@@ -23,6 +23,8 @@
    Analysis integration with ICU (International Components for Unicode).
   </description>
 
+  <property name="rat.additional-includes" value="src/tools/**"/>
+
   <import file="../analysis-module-build.xml"/>
 
   <path id="icujar">
@@ -59,7 +61,7 @@
   <property name="gennorm2.src.files"
   	value="nfc.txt nfkc.txt nfkc_cf.txt BasicFoldings.txt DiacriticFolding.txt DingbatFolding.txt HanRadicalFolding.txt NativeDigitFolding.txt"/>
   <property name="gennorm2.tmp" value="${build.dir}/gennorm2/utr30.tmp"/>
-  <property name="gennorm2.dst" value="src/resources/org/apache/lucene/analysis/icu/utr30.nrm"/>
+  <property name="gennorm2.dst" value="${resources.dir}/org/apache/lucene/analysis/icu/utr30.nrm"/>
   <target name="gennorm2" depends="gen-utr30-data-files">
     <echo>Note that the gennorm2 and icupkg tools must be on your PATH. These tools
 are part of the ICU4C package. See http://site.icu-project.org/ </echo>
@@ -82,7 +84,7 @@
   </target>
   
   <property name="rbbi.src.dir" location="src/data/uax29"/>
-  <property name="rbbi.dst.dir" location="src/resources/org/apache/lucene/analysis/icu/segmentation"/>
+  <property name="rbbi.dst.dir" location="${resources.dir}/org/apache/lucene/analysis/icu/segmentation"/>
 		
   <target name="genrbbi" depends="compile-tools">
     <mkdir dir="${rbbi.dst.dir}"/>
diff --git a/lucene/analysis/kuromoji/build.xml b/lucene/analysis/kuromoji/build.xml
index 7ec776b..2428bac 100644
--- a/lucene/analysis/kuromoji/build.xml
+++ b/lucene/analysis/kuromoji/build.xml
@@ -26,6 +26,7 @@
   <!-- currently whether rat detects this as binary or not
        is platform dependent?! -->
   <property name="rat.excludes" value="**/*.txt,**/bocchan.utf-8"/>
+  <property name="rat.additional-includes" value="src/tools/**"/>
 
   <!-- we don't want to pull in ipadic/naist etc -->
   <property name="ivy.default.configuration" value="default"/>
@@ -45,7 +46,7 @@
   <property name="dict.encoding" value="euc-jp"/>
   <property name="dict.format" value="ipadic"/>
   <property name="dict.normalize" value="false"/>
-  <property name="dict.target.dir" location="./src/resources"/>
+  <property name="dict.target.dir" location="${resources.dir}"/>
 
 
   <available type="dir" file="${build.dir}/${ipadic.version}" property="dict.available"/>
@@ -83,7 +84,7 @@
   <target name="build-dict" depends="compile-tools, download-dict">
     <sequential>
       <delete verbose="true">
-        <fileset dir="src/resources/org/apache/lucene/analysis/ja/dict" includes="**/*"/>
+        <fileset dir="${resources.dir}/org/apache/lucene/analysis/ja/dict" includes="**/*"/>
       </delete>
       <!-- TODO: optimize the dictionary construction a bit so that you don't need 1G -->
       <java fork="true" failonerror="true" maxmemory="1g" classname="org.apache.lucene.analysis.ja.util.DictionaryBuilder">
diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionary.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionary.java
index fd7a676..6edcf34 100644
--- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionary.java
+++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionary.java
@@ -44,7 +44,7 @@
     try {
       is = getResource(FST_FILENAME_SUFFIX);
       is = new BufferedInputStream(is);
-      fst = new FST<Long>(new InputStreamDataInput(is), PositiveIntOutputs.getSingleton(true));
+      fst = new FST<Long>(new InputStreamDataInput(is), PositiveIntOutputs.getSingleton());
     } catch (IOException ioe) {
       priorE = ioe;
     } finally {
diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java
index 3ff5e64..10df235 100644
--- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java
+++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java
@@ -88,7 +88,7 @@
     List<String> data = new ArrayList<String>(featureEntries.size());
     List<int[]> segmentations = new ArrayList<int[]>(featureEntries.size());
     
-    PositiveIntOutputs fstOutput = PositiveIntOutputs.getSingleton(true);
+    PositiveIntOutputs fstOutput = PositiveIntOutputs.getSingleton();
     Builder<Long> fstBuilder = new Builder<Long>(FST.INPUT_TYPE.BYTE2, fstOutput);
     IntsRef scratch = new IntsRef();
     long ord = 0;
diff --git a/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java b/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java
index bec0c87..253bc87 100644
--- a/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java
+++ b/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java
@@ -131,7 +131,7 @@
     
     System.out.println("  encode...");
 
-    PositiveIntOutputs fstOutput = PositiveIntOutputs.getSingleton(true);
+    PositiveIntOutputs fstOutput = PositiveIntOutputs.getSingleton();
     Builder<Long> fstBuilder = new Builder<Long>(FST.INPUT_TYPE.BYTE2, 0, 0, true, true, Integer.MAX_VALUE, fstOutput, null, true, PackedInts.DEFAULT, true, 15);
     IntsRef scratch = new IntsRef();
     long ord = -1; // first ord will be 0
diff --git a/lucene/analysis/morfologik/ivy.xml b/lucene/analysis/morfologik/ivy.xml
index 5d93ef3..0c9c337 100644
--- a/lucene/analysis/morfologik/ivy.xml
+++ b/lucene/analysis/morfologik/ivy.xml
@@ -19,9 +19,9 @@
 <ivy-module version="2.0">
     <info organisation="org.apache.lucene" module="analyzers-morfologik"/>
     <dependencies>
-      <dependency org="org.carrot2" name="morfologik-polish" rev="1.5.5" transitive="false"/>
-      <dependency org="org.carrot2" name="morfologik-fsa" rev="1.5.5" transitive="false"/>
-      <dependency org="org.carrot2" name="morfologik-stemming" rev="1.5.5" transitive="false"/>
+      <dependency org="org.carrot2" name="morfologik-polish" rev="1.6.0" transitive="false"/>
+      <dependency org="org.carrot2" name="morfologik-fsa" rev="1.6.0" transitive="false"/>
+      <dependency org="org.carrot2" name="morfologik-stemming" rev="1.6.0" transitive="false"/>
       <exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/> 
     </dependencies>
 </ivy-module>
diff --git a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikAnalyzer.java b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikAnalyzer.java
index 081dbe6..6205d38 100644
--- a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikAnalyzer.java
+++ b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikAnalyzer.java
@@ -26,38 +26,21 @@
 import org.apache.lucene.analysis.standard.StandardTokenizer;
 import org.apache.lucene.util.Version;
 
-import morfologik.stemming.PolishStemmer.DICTIONARY;
-
 /**
  * {@link org.apache.lucene.analysis.Analyzer} using Morfologik library.
  * @see <a href="http://morfologik.blogspot.com/">Morfologik project page</a>
  */
 public class MorfologikAnalyzer extends Analyzer {
-
-  private final DICTIONARY dictionary;
   private final Version version;
 
   /**
-   * Builds an analyzer for a given PolishStemmer.DICTIONARY enum.
+   * Builds an analyzer with the default Morfologik's dictionary (polimorf).
    * 
-   * @param vers
-   *          lucene compatibility version
-   * @param dict
-   *          A constant specifying which dictionary to choose. See the
-   *          Morfologik documentation for details or use the default.
+   * @param version
+   *          Lucene compatibility version
    */
-  public MorfologikAnalyzer(final Version vers, final DICTIONARY dict) {
-    this.version = vers;
-    this.dictionary = dict;
-  }
-
-  /**
-   * Builds an analyzer for an original MORFOLOGIK dictionary.
-   * 
-   * @param vers         lucene compatibility version
-   */
-  public MorfologikAnalyzer(final Version vers) {
-    this(vers, DICTIONARY.MORFOLOGIK);
+  public MorfologikAnalyzer(final Version version) {
+    this.version = version;
   }
 
   /**
@@ -78,7 +61,7 @@
     final Tokenizer src = new StandardTokenizer(this.version, reader);
     
     return new TokenStreamComponents(
-      src,
-      new MorfologikFilter(new StandardFilter(this.version, src), this.dictionary, this.version));
+        src, 
+        new MorfologikFilter(new StandardFilter(this.version, src), this.version));
   }
 }
diff --git a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilter.java b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilter.java
index 5be56d2..049dad1 100644
--- a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilter.java
+++ b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilter.java
@@ -22,20 +22,21 @@
 import java.util.*;
 
 import morfologik.stemming.*;
-import morfologik.stemming.PolishStemmer.DICTIONARY;
 
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.util.CharacterUtils;
 import org.apache.lucene.util.*;
 
 /**
- * {@link TokenFilter} using Morfologik library.
+ * {@link TokenFilter} using Morfologik library to transform input tokens into lemma and
+ * morphosyntactic (POS) tokens. Applies to Polish only.  
  *
- * MorfologikFilter contains a {@link MorphosyntacticTagsAttribute}, which provides morphosyntactic
- * annotations for produced lemmas. See the Morfologik documentation for details.
+ * <p>MorfologikFilter contains a {@link MorphosyntacticTagsAttribute}, which provides morphosyntactic
+ * annotations for produced lemmas. See the Morfologik documentation for details.</p>
  * 
  * @see <a href="http://morfologik.blogspot.com/">Morfologik project page</a>
  */
@@ -44,6 +45,7 @@
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final MorphosyntacticTagsAttribute tagsAtt = addAttribute(MorphosyntacticTagsAttribute.class);
   private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+  private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class);
 
   private final CharsRef scratch = new CharsRef(0);
   private final CharacterUtils charUtils;
@@ -58,13 +60,11 @@
   private int lemmaListIndex;
 
   /**
-   * Builds a filter for given PolishStemmer.DICTIONARY enum.
-   * 
+   * Creates MorfologikFilter
    * @param in   input token stream
-   * @param dict PolishStemmer.DICTIONARY enum
    * @param version Lucene version compatibility for lowercasing.
    */
-  public MorfologikFilter(final TokenStream in, final DICTIONARY dict, final Version version) {
+  public MorfologikFilter(final TokenStream in, final Version version) {
     super(in);
     this.input = in;
     
@@ -73,7 +73,7 @@
     ClassLoader cl = me.getContextClassLoader();
     try {
       me.setContextClassLoader(PolishStemmer.class.getClassLoader());
-      this.stemmer = new PolishStemmer(dict);
+      this.stemmer = new PolishStemmer();
       this.charUtils = CharacterUtils.getInstance(version);
       this.lemmaList = Collections.emptyList();
     } finally {
@@ -81,29 +81,57 @@
     }  
   }
 
+  /**
+   * The tag encoding format has been changing in Morfologik from version
+   * to version. Let's keep both variants and determine which one to run
+   * based on this flag.
+   */
+  private final static boolean multipleTagsPerLemma = true;
+
   private void popNextLemma() {
-    // Collect all tags for the next unique lemma.
-    CharSequence currentStem;
-    int tags = 0;
-    do {
+    if (multipleTagsPerLemma) {
+      // One tag (concatenated) per lemma.
       final WordData lemma = lemmaList.get(lemmaListIndex++);
-      currentStem = lemma.getStem();
-      final CharSequence tag = lemma.getTag();
+      termAtt.setEmpty().append(lemma.getStem());
+      CharSequence tag = lemma.getTag();
       if (tag != null) {
-        if (tagsList.size() <= tags) {
-          tagsList.add(new StringBuilder());
+        String[] tags = tag.toString().split("\\+|\\|");
+        for (int i = 0; i < tags.length; i++) {
+          if (tagsList.size() <= i) {
+            tagsList.add(new StringBuilder());
+          }
+          StringBuilder buffer = tagsList.get(i);
+          buffer.setLength(0);
+          buffer.append(tags[i]);
         }
-
-        final StringBuilder buffer = tagsList.get(tags++);  
-        buffer.setLength(0);
-        buffer.append(lemma.getTag());
+        tagsAtt.setTags(tagsList.subList(0, tags.length));
+      } else {
+        tagsAtt.setTags(Collections.<StringBuilder> emptyList());
       }
-    } while (lemmaListIndex < lemmaList.size() &&
-             equalCharSequences(lemmaList.get(lemmaListIndex).getStem(), currentStem));
+    } else {
+      // One tag (concatenated) per stem (lemma repeated).
+      CharSequence currentStem;
+      int tags = 0;
+      do {
+        final WordData lemma = lemmaList.get(lemmaListIndex++);
+        currentStem = lemma.getStem();
+        final CharSequence tag = lemma.getTag();
+        if (tag != null) {
+          if (tagsList.size() <= tags) {
+            tagsList.add(new StringBuilder());
+          }
+  
+          final StringBuilder buffer = tagsList.get(tags++);  
+          buffer.setLength(0);
+          buffer.append(lemma.getTag());
+        }
+      } while (lemmaListIndex < lemmaList.size() &&
+               equalCharSequences(lemmaList.get(lemmaListIndex).getStem(), currentStem));
 
-    // Set the lemma's base form and tags as attributes.
-    termAtt.setEmpty().append(currentStem);
-    tagsAtt.setTags(tagsList.subList(0, tags));
+      // Set the lemma's base form and tags as attributes.
+      termAtt.setEmpty().append(currentStem);
+      tagsAtt.setTags(tagsList.subList(0, tags));
+    }
   }
 
   /**
@@ -140,7 +168,8 @@
       popNextLemma();
       return true;
     } else if (this.input.incrementToken()) {
-      if (lookupSurfaceForm(termAtt) || lookupSurfaceForm(toLowercase(termAtt))) {
+      if (!keywordAttr.isKeyword() && 
+          (lookupSurfaceForm(termAtt) || lookupSurfaceForm(toLowercase(termAtt)))) {
         current = captureState();
         popNextLemma();
       } else {
diff --git a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java
index 3abedb2..388a441 100644
--- a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java
+++ b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java
@@ -17,12 +17,8 @@
  * limitations under the License.
  */
 
-import java.util.Arrays;
-import java.util.Locale;
 import java.util.Map;
 
-import morfologik.stemming.PolishStemmer.DICTIONARY;
-
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.util.TokenFilterFactory;
 
@@ -32,39 +28,28 @@
  * &lt;fieldType name="text_polish" class="solr.TextField" positionIncrementGap="100"&gt;
  *   &lt;analyzer&gt;
  *     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
- *     &lt;filter class="solr.MorfologikFilterFactory" dictionary="MORFOLOGIK" /&gt;
+ *     &lt;filter class="solr.MorfologikFilterFactory" /&gt;
  *   &lt;/analyzer&gt;
  * &lt;/fieldType&gt;</pre>
  * 
- * <p>Any of Morfologik dictionaries can be used, these are at the moment:
- * <code>MORFOLOGIK</code> (Morfologik's original dictionary),
- * <code>MORFEUSZ</code> (Morfeusz-SIAT),
- * <code>COMBINED</code> (both of the dictionaries above, combined).
- * 
  * @see <a href="http://morfologik.blogspot.com/">Morfologik web site</a>
  */
 public class MorfologikFilterFactory extends TokenFilterFactory {
-  /** Dictionary. */
-  private DICTIONARY dictionary = DICTIONARY.MORFOLOGIK;
-  
   /** Schema attribute. */
+  @Deprecated
   public static final String DICTIONARY_SCHEMA_ATTRIBUTE = "dictionary";
-  
+
   /** Creates a new MorfologikFilterFactory */
   public MorfologikFilterFactory(Map<String,String> args) {
     super(args);
+
+    // Be specific about no-longer-supported dictionary attribute.
     String dictionaryName = get(args, DICTIONARY_SCHEMA_ATTRIBUTE);
     if (dictionaryName != null && !dictionaryName.isEmpty()) {
-      try {
-        DICTIONARY dictionary = DICTIONARY.valueOf(dictionaryName.toUpperCase(Locale.ROOT));
-        assert dictionary != null;
-        this.dictionary = dictionary;
-      } catch (IllegalArgumentException e) {
-        throw new IllegalArgumentException("The " + DICTIONARY_SCHEMA_ATTRIBUTE + " attribute accepts the "
-            + "following constants: " + Arrays.toString(DICTIONARY.values()) + ", this value is invalid: "  
-            + dictionaryName);
-      }
+      throw new IllegalArgumentException("The " + DICTIONARY_SCHEMA_ATTRIBUTE + " attribute is no "
+          + "longer supported (Morfologik has one dictionary): " + dictionaryName);
     }
+
     if (!args.isEmpty()) {
       throw new IllegalArgumentException("Unknown parameters: " + args);
     }
@@ -72,6 +57,6 @@
 
   @Override
   public TokenStream create(TokenStream ts) {
-    return new MorfologikFilter(ts, dictionary, luceneMatchVersion);
+    return new MorfologikFilter(ts, luceneMatchVersion);
   }
 }
diff --git a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorphosyntacticTagsAttribute.java b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorphosyntacticTagsAttribute.java
index 2951488..117be78 100644
--- a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorphosyntacticTagsAttribute.java
+++ b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorphosyntacticTagsAttribute.java
@@ -23,9 +23,9 @@
 import org.apache.lucene.util.Attribute;
 
 /** 
- * Morfologik dictionaries provide morphosyntactic annotations for
+ * Morfologik provides morphosyntactic annotations for
  * surface forms. For the exact format and description of these,
- * see the project's documentation (annotations vary by dictionary!).
+ * see the project's documentation.
  */
 public interface MorphosyntacticTagsAttribute extends Attribute {
   /** 
@@ -36,7 +36,9 @@
   public void setTags(List<StringBuilder> tags);
 
   /** 
-   * Returns the POS tag of the term.
+   * Returns the POS tag of the term. A single word may have multiple POS tags, 
+   * depending on the interpretation (context disambiguation is typically needed
+   * to determine which particular tag is appropriate).  
    */
   public List<StringBuilder> getTags();
 
diff --git a/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java b/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java
index a8c16d3..7490caa 100644
--- a/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java
+++ b/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java
@@ -18,11 +18,20 @@
  */
 
 import java.io.IOException;
+import java.io.Reader;
 import java.io.StringReader;
 import java.util.TreeSet;
 
-import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
+import org.apache.lucene.analysis.standard.StandardFilter;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.util.Version;
 
 /**
  * TODO: The tests below rely on the order of returned lemmas, which is probably not good. 
@@ -56,10 +65,22 @@
     assertAnalyzesToReuse(
         a,
         "T. Gl\u00FCcksberg",
-        new String[] { "to", "tom", "tona", "Gl\u00FCcksberg" },
-        new int[] { 0, 0, 0, 3  },
-        new int[] { 1, 1, 1, 13 },
-        new int[] { 1, 0, 0, 1  });
+        new String[] { "tom", "tona", "Gl\u00FCcksberg" },
+        new int[] { 0, 0, 3  },
+        new int[] { 1, 1, 13 },
+        new int[] { 1, 0, 1  });
+  }
+
+  @SuppressWarnings("unused")
+  private void dumpTokens(String input) throws IOException {
+    TokenStream ts = getTestAnalyzer().tokenStream("dummy", new StringReader(input));
+    ts.reset();
+
+    MorphosyntacticTagsAttribute attribute = ts.getAttribute(MorphosyntacticTagsAttribute.class);
+    CharTermAttribute charTerm = ts.getAttribute(CharTermAttribute.class);
+    while (ts.incrementToken()) {
+      System.out.println(charTerm.toString() + " => " + attribute.getTags());
+    }
   }
 
   /** Test reuse of MorfologikFilter with leftover stems. */
@@ -144,6 +165,34 @@
     ts.close();
   }
 
+  /** */
+  public final void testKeywordAttrTokens() throws IOException {
+    final Version version = TEST_VERSION_CURRENT;
+
+    Analyzer a = new MorfologikAnalyzer(version) {
+      @Override
+      protected TokenStreamComponents createComponents(String field, Reader reader) {
+        final CharArraySet keywords = new CharArraySet(version, 1, false);
+        keywords.add("liście");
+
+        final Tokenizer src = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
+        TokenStream result = new StandardFilter(TEST_VERSION_CURRENT, src);
+        result = new SetKeywordMarkerFilter(result, keywords);
+        result = new MorfologikFilter(result, TEST_VERSION_CURRENT); 
+
+        return new TokenStreamComponents(src, result);
+      }
+    };
+
+    assertAnalyzesToReuse(
+      a,
+      "liście danych",
+      new String[] { "liście", "dany", "dana", "dane", "dać" },
+      new int[] { 0, 7, 7, 7, 7 },
+      new int[] { 6, 13, 13, 13, 13 },
+      new int[] { 1, 1, 0, 0, 0 });
+  }
+
   /** blast some random strings through the analyzer */
   public void testRandom() throws Exception {
     checkRandomData(random(), getTestAnalyzer(), 1000 * RANDOM_MULTIPLIER); 
diff --git a/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikFilterFactory.java b/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikFilterFactory.java
index 9adc5a5..50085a9 100644
--- a/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikFilterFactory.java
+++ b/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikFilterFactory.java
@@ -18,8 +18,8 @@
  */
 
 import java.io.StringReader;
+import java.util.Collections;
 import java.util.HashMap;
-import java.util.Map;
 
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
@@ -31,10 +31,7 @@
 public class TestMorfologikFilterFactory extends BaseTokenStreamTestCase {
   public void testCreateDictionary() throws Exception {
     StringReader reader = new StringReader("rowery bilety");
-    Map<String,String> initParams = new HashMap<String,String>();
-    initParams.put(MorfologikFilterFactory.DICTIONARY_SCHEMA_ATTRIBUTE,
-        "morfologik");
-    MorfologikFilterFactory factory = new MorfologikFilterFactory(initParams);
+    MorfologikFilterFactory factory = new MorfologikFilterFactory(Collections.<String,String>emptyMap());
     TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
     stream = factory.create(stream);
     assertTokenStreamContents(stream, new String[] {"rower", "bilet"});
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java
index 7f8c994..24087aa 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java
@@ -119,14 +119,9 @@
       
       if (mergeScheduler.equals("org.apache.lucene.index.ConcurrentMergeScheduler")) {
         ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) iwConf.getMergeScheduler();
-        int v = config.get("concurrent.merge.scheduler.max.thread.count", -1);
-        if (v != -1) {
-          cms.setMaxThreadCount(v);
-        }
-        v = config.get("concurrent.merge.scheduler.max.merge.count", -1);
-        if (v != -1) {
-          cms.setMaxMergeCount(v);
-        }
+        int maxThreadCount = config.get("concurrent.merge.scheduler.max.thread.count", ConcurrentMergeScheduler.DEFAULT_MAX_THREAD_COUNT);
+        int maxMergeCount = config.get("concurrent.merge.scheduler.max.merge.count", ConcurrentMergeScheduler.DEFAULT_MAX_MERGE_COUNT);
+        cms.setMaxMergesAndThreads(maxMergeCount, maxThreadCount);
       }
     }
 
@@ -151,13 +146,10 @@
       } catch (Exception e) {
         throw new RuntimeException("unable to instantiate class '" + mergePolicy + "' as merge policy", e);
       }
+      iwConf.getMergePolicy().setNoCFSRatio(isCompound ? 1.0 : 0.0);
       if(iwConf.getMergePolicy() instanceof LogMergePolicy) {
         LogMergePolicy logMergePolicy = (LogMergePolicy) iwConf.getMergePolicy();
-        logMergePolicy.setUseCompoundFile(isCompound);
         logMergePolicy.setMergeFactor(config.get("merge.factor",OpenIndexTask.DEFAULT_MERGE_PFACTOR));
-      } else if(iwConf.getMergePolicy() instanceof TieredMergePolicy) {
-        TieredMergePolicy tieredMergePolicy = (TieredMergePolicy) iwConf.getMergePolicy();
-        tieredMergePolicy.setUseCompoundFile(isCompound);
       }
     }
     final double ramBuffer = config.get("ram.flush.mb",OpenIndexTask.DEFAULT_RAM_FLUSH_MB);
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
index 5f0eae9..6b11ffd 100755
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
@@ -49,6 +49,7 @@
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LogDocMergePolicy;
 import org.apache.lucene.index.LogMergePolicy;
+import org.apache.lucene.index.MergePolicy;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.SegmentInfos;
 import org.apache.lucene.index.SerialMergeScheduler;
@@ -754,7 +755,7 @@
     assertEquals(2, writer.getConfig().getMaxBufferedDocs());
     assertEquals(IndexWriterConfig.DISABLE_AUTO_FLUSH, (int) writer.getConfig().getRAMBufferSizeMB());
     assertEquals(3, ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor());
-    assertFalse(((LogMergePolicy) writer.getConfig().getMergePolicy()).getUseCompoundFile());
+    assertEquals(0.0d, writer.getConfig().getMergePolicy().getNoCFSRatio(), 0.0);
     writer.close();
     Directory dir = benchmark.getRunData().getDirectory();
     IndexReader reader = DirectoryReader.open(dir);
diff --git a/lucene/build.xml b/lucene/build.xml
index fc73ced..b02ec6e 100644
--- a/lucene/build.xml
+++ b/lucene/build.xml
@@ -183,7 +183,10 @@
     <forbidden-apis internalRuntimeForbidden="true" classpathref="forbidden-apis.classpath">
       <bundledSignatures name="jdk-unsafe-${javac.target}"/>
       <bundledSignatures name="jdk-deprecated-${javac.target}"/>
-      <signaturesFileSet file="${common.dir}/tools/forbiddenApis/executors.txt"/>
+      <signaturesFileSet dir="${common.dir}/tools/forbiddenApis">
+        <include name="executors.txt" />
+        <include name="chars.txt" />
+      </signaturesFileSet>
       <fileset dir="${basedir}/build" includes="**/*.class" />
     </forbidden-apis>
   </target>
@@ -345,7 +348,7 @@
   </target>
 
   <!-- rat-sources-typedef is *not* a useless dependency. do not remove -->
-  <target name="rat-sources" depends="rat-sources-typedef">
+  <target name="rat-sources" depends="rat-sources-typedef,common.rat-sources">
     <subant target="rat-sources" failonerror="true" inheritall="false">
       <propertyset refid="uptodate.and.compiled.properties"/>
       <fileset dir="core" includes="build.xml"/>
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexReader.java
index 532c9e6..6975f26 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexReader.java
@@ -44,7 +44,7 @@
  * @lucene.experimental */
 public class VariableGapTermsIndexReader extends TermsIndexReaderBase {
 
-  private final PositiveIntOutputs fstOutputs = PositiveIntOutputs.getSingleton(true);
+  private final PositiveIntOutputs fstOutputs = PositiveIntOutputs.getSingleton();
   private int indexDivisor;
 
   // Closed if indexLoaded is true:
@@ -199,7 +199,7 @@
         if (indexDivisor > 1) {
           // subsample
           final IntsRef scratchIntsRef = new IntsRef();
-          final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+          final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
           final Builder<Long> builder = new Builder<Long>(FST.INPUT_TYPE.BYTE1, outputs);
           final BytesRefFSTEnum<Long> fstEnum = new BytesRefFSTEnum<Long>(fst);
           BytesRefFSTEnum.InputOutput<Long> result;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexWriter.java
index 65a0b7e..6d3f6ba 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexWriter.java
@@ -235,7 +235,7 @@
 
     public FSTFieldWriter(FieldInfo fieldInfo, long termsFilePointer) throws IOException {
       this.fieldInfo = fieldInfo;
-      fstOutputs = PositiveIntOutputs.getSingleton(true);
+      fstOutputs = PositiveIntOutputs.getSingleton();
       fstBuilder = new Builder<Long>(FST.INPUT_TYPE.BYTE1, fstOutputs);
       indexStart = out.getFilePointer();
       ////System.out.println("VGW: field=" + fieldInfo.name);
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
index ff940e8..d576d3c 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
@@ -513,7 +513,7 @@
     }
 
     private void loadTerms() throws IOException {
-      PositiveIntOutputs posIntOutputs = PositiveIntOutputs.getSingleton(false);
+      PositiveIntOutputs posIntOutputs = PositiveIntOutputs.getSingleton();
       final Builder<PairOutputs.Pair<Long,PairOutputs.Pair<Long,Long>>> b;
       final PairOutputs<Long,Long> outputsInner = new PairOutputs<Long,Long>(posIntOutputs, posIntOutputs);
       final PairOutputs<Long,PairOutputs.Pair<Long,Long>> outputs = new PairOutputs<Long,PairOutputs.Pair<Long,Long>>(posIntOutputs,
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index 9a9d525..cb5d2a5 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -186,6 +186,7 @@
   <property name="build.encoding" value="utf-8"/>
 
   <property name="src.dir" location="src/java"/>
+  <property name="resources.dir" location="${src.dir}/../resources"/>
   <property name="tests.src.dir" location="src/test"/>
   <available property="module.has.tests" type="dir" file="${tests.src.dir}"/>
   <property name="build.dir" location="build"/>
@@ -255,6 +256,10 @@
 
   <!-- a reasonable default exclusion set, can be overridden for special cases -->
   <property name="rat.excludes" value="**/TODO,**/*.txt,**/*.iml"/>
+  
+  <!-- These patterns can be defined to add additional files for checks, relative to module's home dir -->
+  <property name="rat.additional-includes" value=""/>
+  <property name="rat.additional-excludes" value=""/>
 
   <propertyset id="uptodate.and.compiled.properties" dynamic="true">
     <propertyref regex=".*\.uptodate$$"/>
@@ -351,7 +356,7 @@
   <target name="resolve" depends="ivy-availability-check,ivy-configure">
     <!-- todo, make this a property or something. 
          only special cases need bundles -->
-    <ivy:retrieve type="jar,bundle" log="download-only" 
+    <ivy:retrieve type="jar,bundle,tests" log="download-only" 
                   conf="${ivy.default.configuration}" sync="${ivy.sync}"/>
   </target>
 
@@ -489,7 +494,7 @@
 
     <!-- Copy the resources folder (if existent) -->
     <copy todir="${build.dir}/classes/java">
-      <fileset dir="${src.dir}/../resources" erroronmissingdir="no"/>
+      <fileset dir="${resources.dir}" erroronmissingdir="no"/>
     </copy>
   </target>
 
@@ -864,6 +869,7 @@
         <mkdir dir="${tests.cachedir}/${name}" />
 
         <junit4:junit4
+            taskName="junit4"
             dir="@{workDir}"
             tempdir="@{workDir}/temp"
             maxmemory="${tests.heapsize}" 
@@ -1339,7 +1345,7 @@
     <sequential>
       <mkdir dir="${build.dir}" />
       <jarify basedir="${src.dir}" destfile="${build.dir}/${final.name}-src.jar">
-        <fileset dir="${src.dir}/../resources" erroronmissingdir="no"/>
+        <fileset dir="${resources.dir}" erroronmissingdir="no"/>
       </jarify>
     </sequential>
   </target>
@@ -1454,7 +1460,7 @@
 
   <target name="filter-pom-templates" unless="filtered.pom.templates.uptodate">
     <mkdir dir="${filtered.pom.templates.dir}"/>
-    <copy todir="${common.dir}/build/poms" overwrite="true">
+    <copy todir="${common.dir}/build/poms" overwrite="true" encoding="UTF-8">
       <fileset dir="${common.dir}/../dev-tools/maven"/>
       <filterset begintoken="@" endtoken="@">
         <filter token="version" value="${version}"/>
@@ -1508,28 +1514,34 @@
   </target>
 
   <target name="rat-sources-typedef" unless="rat.loaded">
-    <ivy:cachepath organisation="org.apache.rat" module="apache-rat" revision="0.8" transitive="false" inline="true" conf="master" type="jar" pathid="rat.classpath"/>
+    <ivy:cachepath organisation="org.apache.rat" module="apache-rat" revision="0.9" transitive="false" inline="true" conf="master" type="jar" pathid="rat.classpath"/>
     <typedef resource="org/apache/rat/anttasks/antlib.xml" uri="antlib:org.apache.rat.anttasks" classpathref="rat.classpath"/>
     <property name="rat.loaded" value="true"/>
   </target>
 
   <target name="rat-sources" depends="rat-sources-typedef"
 	  description="runs the tasks over source and test files">
-    <sequential>
     <!-- create a temp file for the log to go to -->
     <tempfile property="rat.sources.logfile"
               prefix="rat"
               destdir="${java.io.tmpdir}"/>
     <!-- run rat, going to the file -->
     <rat:report xmlns:rat="antlib:org.apache.rat.anttasks" 
-                reportFile="${rat.sources.logfile}">
-      <fileset dir="${src.dir}" excludes="${rat.excludes}"/>
+                reportFile="${rat.sources.logfile}" addDefaultLicenseMatchers="true">
+      <fileset dir="." includes="*.xml ${rat.additional-includes}" excludes="${rat.additional-excludes}"/>
+      <fileset dir="${src.dir}" excludes="${rat.excludes}" erroronmissingdir="false"/>
       <fileset dir="${tests.src.dir}" excludes="${rat.excludes}" erroronmissingdir="false"/>
-      <!-- some modules have a src/tools/[java,test] -->
-      <fileset dir="src/tools/java" excludes="${rat.excludes}" erroronmissingdir="false"/>
-      <fileset dir="src/tools/test" excludes="${rat.excludes}" erroronmissingdir="false"/>
+
+      <!-- TODO: Check all resource files. Currently not all stopword and similar files have no header! -->
+      <fileset dir="${resources.dir}" includes="META-INF/**" erroronmissingdir="false"/>
       
-      <!-- bsd-like stuff -->
+      <!-- BSD 4-clause stuff (is disallowed below) -->
+      <rat:substringMatcher licenseFamilyCategory="BSD4 "
+             licenseFamilyName="Original BSD License (with advertising clause)">
+        <pattern substring="All advertising materials"/>
+      </rat:substringMatcher>
+
+      <!-- BSD-like stuff -->
       <rat:substringMatcher licenseFamilyCategory="BSD  "
              licenseFamilyName="Modified BSD License">
       <!-- brics automaton -->
@@ -1542,16 +1554,20 @@
         <pattern substring="Egothor Software License version 1.00"/>
       <!-- JaSpell -->
         <pattern substring="Copyright (c) 2005 Bruno Martins"/>
+      <!-- d3.js -->
+        <pattern substring="THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS"/>
+      <!-- highlight.js -->
+        <pattern substring="THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS"/>
       </rat:substringMatcher>
 
-      <!-- mit-like -->
+      <!-- MIT-like -->
       <rat:substringMatcher licenseFamilyCategory="MIT  "
              licenseFamilyName="The MIT License">
       <!-- ICU license -->
         <pattern substring="Permission is hereby granted, free of charge, to any person obtaining a copy"/>
       </rat:substringMatcher>
 
-      <!-- apache -->
+      <!-- Apache -->
       <rat:substringMatcher licenseFamilyCategory="AL   "
              licenseFamilyName="Apache">
         <pattern substring="Licensed to the Apache Software Foundation (ASF) under"/>
@@ -1577,13 +1593,13 @@
     </rat:report>
     <!-- now print the output, for review -->
     <loadfile property="rat.output" srcFile="${rat.sources.logfile}"/>
-    <echo>${rat.output}</echo>
+    <echo taskname="rat">${rat.output}</echo>
     <delete>
       <fileset file="${rat.sources.logfile}">
         <and>
           <containsregexp expression="^0 Unknown Licenses"/>
           <not>
-            <containsregexp expression="^\s+!AL"/>
+            <containsregexp expression="^\s+!"/>
           </not>
         </and>
       </fileset>
@@ -1594,7 +1610,6 @@
         <available file="${rat.sources.logfile}"/>
       </condition>
     </fail>
-    </sequential>
   </target>
 
   <!--+
@@ -1814,10 +1829,71 @@
         </condition>
       </fail>
 
-
+      <patch-javadoc dir="@{destdir}" docencoding="${javadoc.charset}"/>
    </sequential>
   </macrodef>
 
+  <!--
+    Patch frame injection bugs in javadoc generated files - see CVE-2013-1571, http://www.kb.cert.org/vuls/id/225657
+    
+    Feel free to use this macro in your own Ant build file. This macro works together with the javadoc task on Ant
+    and should be invoked directly after its execution to patch broken javadocs, e.g.:
+      <patch-javadoc dir="..." docencoding="UTF-8"/>
+    Please make sure that the docencoding parameter uses the same charset like javadoc's docencoding. Default
+    is the platform default encoding (like the javadoc task).
+    The specified dir is the destination directory of the javadoc task.
+  -->
+  <macrodef name="patch-javadoc">
+    <attribute name="dir"/>
+    <attribute name="docencoding" default="${file.encoding}"/>
+    <sequential>
+      <replace encoding="@{docencoding}" summary="true" taskname="patch-javadoc">
+        <fileset dir="@{dir}" casesensitive="false" includes="**/index.html,**/index.htm,**/toc.html,**/toc.htm">
+          <!-- TODO: add encoding="@{docencoding}" to contains check, when we are on ANT 1.9.0: -->
+          <not><contains text="function validURL(url) {" casesensitive="true" /></not>
+        </fileset>
+        <replacetoken><![CDATA[function loadFrames() {]]></replacetoken>
+        <replacevalue expandProperties="false"><![CDATA[if (targetPage != "" && !validURL(targetPage))
+        targetPage = "undefined";
+    function validURL(url) {
+        var pos = url.indexOf(".html");
+        if (pos == -1 || pos != url.length - 5)
+            return false;
+        var allowNumber = false;
+        var allowSep = false;
+        var seenDot = false;
+        for (var i = 0; i < url.length - 5; i++) {
+            var ch = url.charAt(i);
+            if ('a' <= ch && ch <= 'z' ||
+                    'A' <= ch && ch <= 'Z' ||
+                    ch == '$' ||
+                    ch == '_') {
+                allowNumber = true;
+                allowSep = true;
+            } else if ('0' <= ch && ch <= '9'
+                    || ch == '-') {
+                if (!allowNumber)
+                     return false;
+            } else if (ch == '/' || ch == '.') {
+                if (!allowSep)
+                    return false;
+                allowNumber = false;
+                allowSep = false;
+                if (ch == '.')
+                     seenDot = true;
+                if (ch == '/' && seenDot)
+                     return false;
+            } else {
+                return false;
+            }
+        }
+        return true;
+    }
+    function loadFrames() {]]></replacevalue>
+      </replace>
+    </sequential>
+  </macrodef>
+
   <macrodef name="modules-crawl">
     <attribute name="target" default=""/>
     <attribute name="failonerror" default="true"/>
@@ -2030,7 +2106,7 @@
     <element name="nested" optional="false" implicit="true"/>
     <sequential>
       <copy todir="@{todir}" flatten="@{flatten}" overwrite="@{overwrite}" verbose="true"
-        preservelastmodified="false" encoding="UTF-8" outputencoding="UTF-8" taskname="pegdown"
+        preservelastmodified="false" encoding="UTF-8" taskname="pegdown"
       >
         <filterchain>
           <tokenfilter>
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java b/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
index 9f3d93a..04ec3b7 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
@@ -50,10 +50,6 @@
     totalTermFreq = other.totalTermFreq;
     termBlockOrd = other.termBlockOrd;
     blockFilePointer = other.blockFilePointer;
-
-    // NOTE: don't copy blockTermCount;
-    // it's "transient": used only by the "primary"
-    // termState, and regenerated on seek by TermState
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java
index 00cc4f4..c73942f 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java
@@ -17,7 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.Closeable;
 import java.io.IOException;
 import java.util.Arrays;
 
@@ -25,16 +24,13 @@
 import org.apache.lucene.index.SegmentInfo;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.packed.PackedInts;
 
 /**
  * Random-access reader for {@link CompressingStoredFieldsIndexWriter}.
  * @lucene.internal
  */
-public final class CompressingStoredFieldsIndexReader implements Closeable, Cloneable {
-
-  final IndexInput fieldsIndexIn;
+public final class CompressingStoredFieldsIndexReader implements Cloneable {
 
   static long moveLowOrderBitToSign(long n) {
     return ((n >>> 1) ^ -(n & 1));
@@ -48,8 +44,9 @@
   final PackedInts.Reader[] docBasesDeltas; // delta from the avg
   final PackedInts.Reader[] startPointersDeltas; // delta from the avg
 
+  // It is the responsibility of the caller to close fieldsIndexIn after this constructor
+  // has been called
   CompressingStoredFieldsIndexReader(IndexInput fieldsIndexIn, SegmentInfo si) throws IOException {
-    this.fieldsIndexIn = fieldsIndexIn;
     maxDoc = si.getDocCount();
     int[] docBases = new int[16];
     long[] startPointers = new long[16];
@@ -106,17 +103,6 @@
     this.startPointersDeltas = Arrays.copyOf(startPointersDeltas, blockCount);
   }
 
-  private CompressingStoredFieldsIndexReader(CompressingStoredFieldsIndexReader other) {
-    this.fieldsIndexIn = null;
-    this.maxDoc = other.maxDoc;
-    this.docBases = other.docBases;
-    this.startPointers = other.startPointers;
-    this.avgChunkDocs = other.avgChunkDocs;
-    this.avgChunkSizes = other.avgChunkSizes;
-    this.docBasesDeltas = other.docBasesDeltas;
-    this.startPointersDeltas = other.startPointersDeltas;
-  }
-
   private int block(int docID) {
     int lo = 0, hi = docBases.length - 1;
     while (lo <= hi) {
@@ -172,16 +158,7 @@
 
   @Override
   public CompressingStoredFieldsIndexReader clone() {
-    if (fieldsIndexIn == null) {
-      return this;
-    } else {
-      return new CompressingStoredFieldsIndexReader(this);
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    IOUtils.close(fieldsIndexIn);
+    return this;
   }
 
 }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java
index 287efe7..29fed88 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java
@@ -60,6 +60,9 @@
  */
 public final class CompressingStoredFieldsReader extends StoredFieldsReader {
 
+  // Do not reuse the decompression buffer when there is more than 32kb to decompress
+  private static final int BUFFER_REUSE_THRESHOLD = 1 << 15;
+
   private final FieldInfos fieldInfos;
   private final CompressingStoredFieldsIndexReader indexReader;
   private final IndexInput fieldsStream;
@@ -93,19 +96,22 @@
     numDocs = si.getDocCount();
     IndexInput indexStream = null;
     try {
-      fieldsStream = d.openInput(IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_EXTENSION), context);
+      // Load the index into memory
       final String indexStreamFN = IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_INDEX_EXTENSION);
       indexStream = d.openInput(indexStreamFN, context);
-
       final String codecNameIdx = formatName + CODEC_SFX_IDX;
-      final String codecNameDat = formatName + CODEC_SFX_DAT;
       CodecUtil.checkHeader(indexStream, codecNameIdx, VERSION_START, VERSION_CURRENT);
+      assert CodecUtil.headerLength(codecNameIdx) == indexStream.getFilePointer();
+      indexReader = new CompressingStoredFieldsIndexReader(indexStream, si);
+      indexStream.close();
+      indexStream = null;
+
+      // Open the data file and read metadata
+      final String fieldsStreamFN = IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_EXTENSION);
+      fieldsStream = d.openInput(fieldsStreamFN, context);
+      final String codecNameDat = formatName + CODEC_SFX_DAT;
       CodecUtil.checkHeader(fieldsStream, codecNameDat, VERSION_START, VERSION_CURRENT);
       assert CodecUtil.headerLength(codecNameDat) == fieldsStream.getFilePointer();
-      assert CodecUtil.headerLength(codecNameIdx) == indexStream.getFilePointer();
-
-      indexReader = new CompressingStoredFieldsIndexReader(indexStream, si);
-      indexStream = null;
 
       packedIntsVersion = fieldsStream.readVInt();
       decompressor = compressionMode.newDecompressor();
@@ -134,7 +140,7 @@
   @Override
   public void close() throws IOException {
     if (!closed) {
-      IOUtils.close(fieldsStream, indexReader);
+      IOUtils.close(fieldsStream);
       closed = true;
     }
   }
@@ -255,6 +261,7 @@
       return;
     }
 
+    final BytesRef bytes = totalLength <= BUFFER_REUSE_THRESHOLD ? this.bytes : new BytesRef();
     decompressor.decompress(fieldsStream, totalLength, offset, length, bytes);
     assert bytes.length == length;
 
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
index 545c86ca..48c8d5c 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
@@ -102,19 +102,22 @@
     numDocs = si.getDocCount();
     IndexInput indexStream = null;
     try {
-      vectorsStream = d.openInput(IndexFileNames.segmentFileName(segment, segmentSuffix, VECTORS_EXTENSION), context);
+      // Load the index into memory
       final String indexStreamFN = IndexFileNames.segmentFileName(segment, segmentSuffix, VECTORS_INDEX_EXTENSION);
       indexStream = d.openInput(indexStreamFN, context);
-
       final String codecNameIdx = formatName + CODEC_SFX_IDX;
-      final String codecNameDat = formatName + CODEC_SFX_DAT;
       CodecUtil.checkHeader(indexStream, codecNameIdx, VERSION_START, VERSION_CURRENT);
+      assert CodecUtil.headerLength(codecNameIdx) == indexStream.getFilePointer();
+      indexReader = new CompressingStoredFieldsIndexReader(indexStream, si);
+      indexStream.close();
+      indexStream = null;
+
+      // Open the data file and read metadata
+      final String vectorsStreamFN = IndexFileNames.segmentFileName(segment, segmentSuffix, VECTORS_EXTENSION);
+      vectorsStream = d.openInput(vectorsStreamFN, context);
+      final String codecNameDat = formatName + CODEC_SFX_DAT;
       CodecUtil.checkHeader(vectorsStream, codecNameDat, VERSION_START, VERSION_CURRENT);
       assert CodecUtil.headerLength(codecNameDat) == vectorsStream.getFilePointer();
-      assert CodecUtil.headerLength(codecNameIdx) == indexStream.getFilePointer();
-
-      indexReader = new CompressingStoredFieldsIndexReader(indexStream, si);
-      indexStream = null;
 
       packedIntsVersion = vectorsStream.readVInt();
       chunkSize = vectorsStream.readVInt();
@@ -161,7 +164,7 @@
   @Override
   public void close() throws IOException {
     if (!closed) {
-      IOUtils.close(vectorsStream, indexReader);
+      IOUtils.close(vectorsStream);
       closed = true;
     }
   }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsWriter.java
index 0386369..938d4c0 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsWriter.java
@@ -242,8 +242,8 @@
       if (payloads) {
         tvf.writeBytes(payloadData.bytes, payloadData.offset, payloadData.length);
       }
-      for (int i = 0; i < bufferedIndex; i++) {
-        if (offsets) {
+      if (offsets) {
+        for (int i = 0; i < bufferedIndex; i++) {
           tvf.writeVInt(offsetStartBuffer[i] - lastOffset);
           tvf.writeVInt(offsetEndBuffer[i] - offsetStartBuffer[i]);
           lastOffset = offsetEndBuffer[i];
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene40/package.html b/lucene/core/src/java/org/apache/lucene/codecs/lucene40/package.html
index d20037b..5187359 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene40/package.html
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene40/package.html
@@ -372,13 +372,7 @@
 <a name="Limitations" id="Limitations"></a>
 <h2>Limitations</h2>
 <div>
-<p>When referring to term numbers, Lucene's current implementation uses a Java
-<code>int</code> to hold the term index, which means the
-maximum number of unique terms in any single index segment is ~2.1 billion
-times the term index interval (default 128) = ~274 billion. This is technically
-not a limitation of the index file format, just of Lucene's current
-implementation.</p>
-<p>Similarly, Lucene uses a Java <code>int</code> to refer to
+<p>Lucene uses a Java <code>int</code> to refer to
 document numbers, and the index file format uses an <code>Int32</code>
 on-disk to store document numbers. This is a limitation
 of both the index file format and the current implementation. Eventually these
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene41/Lucene41PostingsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene41/Lucene41PostingsFormat.java
index 4838f7a..df7fe6f 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene41/Lucene41PostingsFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene41/Lucene41PostingsFormat.java
@@ -161,7 +161,7 @@
  *    <li>SkipFPDelta determines the position of this term's SkipData within the .doc
  *        file. In particular, it is the length of the TermFreq data.
  *        SkipDelta is only stored if DocFreq is not smaller than SkipMinimum
- *        (i.e. 8 in Lucene41PostingsFormat).</li>
+ *        (i.e. 128 in Lucene41PostingsFormat).</li>
  *    <li>SingletonDocID is an optimization when a term only appears in one document. In this case, instead
  *        of writing a file pointer to the .doc file (DocFPDelta), and then a VIntBlock at that location, the 
  *        single document ID is written to the term dictionary.</li>
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene41/package.html b/lucene/core/src/java/org/apache/lucene/codecs/lucene41/package.html
index 3df0293..d429cb0 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene41/package.html
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene41/package.html
@@ -381,13 +381,7 @@
 <a name="Limitations" id="Limitations"></a>
 <h2>Limitations</h2>
 <div>
-<p>When referring to term numbers, Lucene's current implementation uses a Java
-<code>int</code> to hold the term index, which means the
-maximum number of unique terms in any single index segment is ~2.1 billion
-times the term index interval (default 128) = ~274 billion. This is technically
-not a limitation of the index file format, just of Lucene's current
-implementation.</p>
-<p>Similarly, Lucene uses a Java <code>int</code> to refer to
+<p>Lucene uses a Java <code>int</code> to refer to
 document numbers, and the index file format uses an <code>Int32</code>
 on-disk to store document numbers. This is a limitation
 of both the index file format and the current implementation. Eventually these
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesConsumer.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesConsumer.java
index aced6ce..a1f6dc4 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesConsumer.java
@@ -245,7 +245,7 @@
     meta.writeVInt(field.number);
     meta.writeByte(FST);
     meta.writeLong(data.getFilePointer());
-    PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+    PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
     Builder<Long> builder = new Builder<Long>(INPUT_TYPE.BYTE1, outputs);
     IntsRef scratch = new IntsRef();
     long ord = 0;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesFormat.java
index bb384c7..3e81004 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesFormat.java
@@ -120,16 +120,33 @@
  * </ol>
  */
 public final class Lucene42DocValuesFormat extends DocValuesFormat {
-
-  /** Sole constructor */
+  final float acceptableOverheadRatio;
+  
+  /** 
+   * Calls {@link #Lucene42DocValuesFormat(float) 
+   * Lucene42DocValuesFormat(PackedInts.DEFAULT)} 
+   */
   public Lucene42DocValuesFormat() {
+    this(PackedInts.DEFAULT);
+  }
+  
+  /**
+   * Creates a new Lucene42DocValuesFormat with the specified
+   * <code>acceptableOverheadRatio</code> for NumericDocValues.
+   * @param acceptableOverheadRatio compression parameter for numerics. 
+   *        Currently this is only used when the number of unique values is small.
+   *        
+   * @lucene.experimental
+   */
+  public Lucene42DocValuesFormat(float acceptableOverheadRatio) {
     super("Lucene42");
+    this.acceptableOverheadRatio = acceptableOverheadRatio;
   }
 
   @Override
   public DocValuesConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
     // note: we choose DEFAULT here (its reasonably fast, and for small bpv has tiny waste)
-    return new Lucene42DocValuesConsumer(state, DATA_CODEC, DATA_EXTENSION, METADATA_CODEC, METADATA_EXTENSION, PackedInts.DEFAULT);
+    return new Lucene42DocValuesConsumer(state, DATA_CODEC, DATA_EXTENSION, METADATA_CODEC, METADATA_EXTENSION, acceptableOverheadRatio);
   }
   
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesProducer.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesProducer.java
index 30aad0f..c5ac3f1 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesProducer.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesProducer.java
@@ -278,7 +278,7 @@
       instance = fstInstances.get(field.number);
       if (instance == null) {
         data.seek(entry.offset);
-        instance = new FST<Long>(data, PositiveIntOutputs.getSingleton(true));
+        instance = new FST<Long>(data, PositiveIntOutputs.getSingleton());
         fstInstances.put(field.number, instance);
       }
     }
@@ -352,7 +352,7 @@
       instance = fstInstances.get(field.number);
       if (instance == null) {
         data.seek(entry.offset);
-        instance = new FST<Long>(data, PositiveIntOutputs.getSingleton(true));
+        instance = new FST<Long>(data, PositiveIntOutputs.getSingleton());
         fstInstances.put(field.number, instance);
       }
     }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42NormsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42NormsFormat.java
index 9c07bc3..a7c8c1a 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42NormsFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42NormsFormat.java
@@ -42,14 +42,32 @@
  * @see Lucene42DocValuesFormat
  */
 public final class Lucene42NormsFormat extends NormsFormat {
+  final float acceptableOverheadRatio;
 
-  /** Sole constructor */
-  public Lucene42NormsFormat() {}
+  /** 
+   * Calls {@link #Lucene42NormsFormat(float) 
+   * Lucene42DocValuesFormat(PackedInts.FASTEST)} 
+   */
+  public Lucene42NormsFormat() {
+    // note: we choose FASTEST here (otherwise our norms are half as big but 15% slower than previous lucene)
+    this(PackedInts.FASTEST);
+  }
+  
+  /**
+   * Creates a new Lucene42DocValuesFormat with the specified
+   * <code>acceptableOverheadRatio</code> for NumericDocValues.
+   * @param acceptableOverheadRatio compression parameter for numerics. 
+   *        Currently this is only used when the number of unique values is small.
+   *        
+   * @lucene.experimental
+   */
+  public Lucene42NormsFormat(float acceptableOverheadRatio) {
+    this.acceptableOverheadRatio = acceptableOverheadRatio;
+  }
   
   @Override
   public DocValuesConsumer normsConsumer(SegmentWriteState state) throws IOException {
-    // note: we choose FASTEST here (otherwise our norms are half as big but 15% slower than previous lucene)
-    return new Lucene42DocValuesConsumer(state, DATA_CODEC, DATA_EXTENSION, METADATA_CODEC, METADATA_EXTENSION, PackedInts.FASTEST);
+    return new Lucene42DocValuesConsumer(state, DATA_CODEC, DATA_EXTENSION, METADATA_CODEC, METADATA_EXTENSION, acceptableOverheadRatio);
   }
   
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/package.html b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/package.html
index 9ed17df..571b766 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/package.html
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/package.html
@@ -384,13 +384,7 @@
 <a name="Limitations" id="Limitations"></a>
 <h2>Limitations</h2>
 <div>
-<p>When referring to term numbers, Lucene's current implementation uses a Java
-<code>int</code> to hold the term index, which means the
-maximum number of unique terms in any single index segment is ~2.1 billion
-times the term index interval (default 128) = ~274 billion. This is technically
-not a limitation of the index file format, just of Lucene's current
-implementation.</p>
-<p>Similarly, Lucene uses a Java <code>int</code> to refer to
+<p>Lucene uses a Java <code>int</code> to refer to
 document numbers, and the index file format uses an <code>Int32</code>
 on-disk to store document numbers. This is a limitation
 of both the index file format and the current implementation. Eventually these
diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
index 0edadfc..142a67c 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
@@ -463,11 +463,11 @@
 
     if (onlySegments != null) {
       result.partial = true;
-      if (infoStream != null)
+      if (infoStream != null) {
         infoStream.print("\nChecking only these segments:");
-      for (String s : onlySegments) {
-        if (infoStream != null)
+        for (String s : onlySegments) {
           infoStream.print(" " + s);
+        }
       }
       result.segmentsChecked.addAll(onlySegments);
       msg(infoStream, ":");
diff --git a/lucene/core/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java b/lucene/core/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
index 42212ec..7c015aa 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
@@ -30,12 +30,11 @@
  *  separate thread.
  *
  *  <p>Specify the max number of threads that may run at
- *  once with {@link #setMaxThreadCount}.</p>
+ *  once, and the maximum number of simultaneous merges
+ *  with {@link #setMaxMergesAndThreads}.</p>
  *
- *  <p>Separately specify the maximum number of simultaneous
- *  merges with {@link #setMaxMergeCount}.  If the number of
- *  merges exceeds the max number of threads then the
- *  largest merges are paused until one of the smaller
+ *  <p>If the number of merges exceeds the max number of threads 
+ *  then the largest merges are paused until one of the smaller
  *  merges completes.</p>
  *
  *  <p>If more than {@link #getMaxMergeCount} merges are
@@ -49,21 +48,29 @@
 
   /** List of currently active {@link MergeThread}s. */
   protected List<MergeThread> mergeThreads = new ArrayList<MergeThread>();
+  
+  /** 
+   * Default {@code maxThreadCount}.
+   * We default to 1: tests on spinning-magnet drives showed slower
+   * indexing performance if more than one merge thread runs at
+   * once (though on an SSD it was faster)
+   */
+  public static final int DEFAULT_MAX_THREAD_COUNT = 1;
+  
+  /** Default {@code maxMergeCount}. */
+  public static final int DEFAULT_MAX_MERGE_COUNT = 2;
 
   // Max number of merge threads allowed to be running at
   // once.  When there are more merges then this, we
   // forcefully pause the larger ones, letting the smaller
   // ones run, up until maxMergeCount merges at which point
   // we forcefully pause incoming threads (that presumably
-  // are the ones causing so much merging).  We default to 1
-  // here: tests on spinning-magnet drives showed slower
-  // indexing perf if more than one merge thread runs at
-  // once (though on an SSD it was faster):
-  private int maxThreadCount = 1;
+  // are the ones causing so much merging).
+  private int maxThreadCount = DEFAULT_MAX_THREAD_COUNT;
 
   // Max number of merges we accept before forcefully
   // throttling the incoming threads
-  private int maxMergeCount = 2;
+  private int maxMergeCount = DEFAULT_MAX_MERGE_COUNT;
 
   /** {@link Directory} that holds the index. */
   protected Directory dir;
@@ -80,43 +87,40 @@
   public ConcurrentMergeScheduler() {
   }
 
-  /** Sets the max # simultaneous merge threads that should
-   *  be running at once.  This must be <= {@link
-   *  #setMaxMergeCount}. */
-  public void setMaxThreadCount(int count) {
-    if (count < 1) {
-      throw new IllegalArgumentException("count should be at least 1");
+  /**
+   * Sets the maximum number of merge threads and simultaneous merges allowed.
+   * 
+   * @param maxMergeCount the max # simultaneous merges that are allowed.
+   *       If a merge is necessary yet we already have this many
+   *       threads running, the incoming thread (that is calling
+   *       add/updateDocument) will block until a merge thread
+   *       has completed.  Note that we will only run the
+   *       smallest <code>maxThreadCount</code> merges at a time.
+   * @param maxThreadCount the max # simultaneous merge threads that should
+   *       be running at once.  This must be &lt;= <code>maxMergeCount</code>
+   */
+  public void setMaxMergesAndThreads(int maxMergeCount, int maxThreadCount) {
+    if (maxThreadCount < 1) {
+      throw new IllegalArgumentException("maxThreadCount should be at least 1");
     }
-    if (count > maxMergeCount) {
-      throw new IllegalArgumentException("count should be <= maxMergeCount (= " + maxMergeCount + ")");
+    if (maxMergeCount < 1) {
+      throw new IllegalArgumentException("maxMergeCount should be at least 1");
     }
-    maxThreadCount = count;
+    if (maxThreadCount > maxMergeCount) {
+      throw new IllegalArgumentException("maxThreadCount should be <= maxMergeCount (= " + maxMergeCount + ")");
+    }
+    this.maxThreadCount = maxThreadCount;
+    this.maxMergeCount = maxMergeCount;
   }
 
   /** Returns {@code maxThreadCount}.
    *
-   * @see #setMaxThreadCount(int) */
+   * @see #setMaxMergesAndThreads(int, int) */
   public int getMaxThreadCount() {
     return maxThreadCount;
   }
 
-  /** Sets the max # simultaneous merges that are allowed.
-   *  If a merge is necessary yet we already have this many
-   *  threads running, the incoming thread (that is calling
-   *  add/updateDocument) will block until a merge thread
-   *  has completed.  Note that we will only run the
-   *  smallest {@link #setMaxThreadCount} merges at a time. */
-  public void setMaxMergeCount(int count) {
-    if (count < 1) {
-      throw new IllegalArgumentException("count should be at least 1");
-    }
-    if (count < maxThreadCount) {
-      throw new IllegalArgumentException("count should be >= maxThreadCount (= " + maxThreadCount + ")");
-    }
-    maxMergeCount = count;
-  }
-
-  /** See {@link #setMaxMergeCount}. */
+  /** See {@link #setMaxMergesAndThreads}. */
   public int getMaxMergeCount() {
     return maxMergeCount;
   }
diff --git a/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java
index 788b157..e2372a1 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java
@@ -19,6 +19,7 @@
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.file.NoSuchFileException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -291,7 +292,7 @@
           // IOException allowed to throw there, in case
           // segments_N is corrupt
           sis.read(dir, fileName);
-        } catch (FileNotFoundException fnfe) {
+        } catch (FileNotFoundException | NoSuchFileException fnfe) {
           // LUCENE-948: on NFS (and maybe others), if
           // you have writers switching back and forth
           // between machines, it's very likely that the
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java b/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java
index e6dc129..7242f0d 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java
@@ -24,5 +24,4 @@
   abstract void finishDocument() throws IOException;
   abstract void flush(final SegmentWriteState state) throws IOException;
   abstract void abort();
-  abstract void doAfterFlush();
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java b/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java
index 5584dfd..e318e11 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java
@@ -144,15 +144,6 @@
     return fields;
   }
 
-  /** In flush we reset the fieldHash to not maintain per-field state
-   *  across segments */
-  @Override
-  void doAfterFlush() {
-    fieldHash = new DocFieldProcessorPerField[2];
-    hashMask = 1;
-    totalFieldCount = 0;
-  }
-
   private void rehash() {
     final int newHashSize = (fieldHash.length*2);
     assert newHashSize > fieldHash.length;
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessorPerField.java b/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessorPerField.java
index 367156e..32fad15 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessorPerField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessorPerField.java
@@ -17,12 +17,7 @@
  * limitations under the License.
  */
 
-import java.util.HashMap;
-import java.util.Map;
-
 import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.Counter;
 import org.apache.lucene.util.RamUsageEstimator;
 
 /**
@@ -33,19 +28,16 @@
 
   final DocFieldConsumerPerField consumer;
   final FieldInfo fieldInfo;
-  private final Counter bytesUsed;
 
   DocFieldProcessorPerField next;
   int lastGen = -1;
 
   int fieldCount;
   IndexableField[] fields = new IndexableField[1];
-  private final Map<FieldInfo,String> dvFields = new HashMap<FieldInfo,String>();
 
   public DocFieldProcessorPerField(final DocFieldProcessor docFieldProcessor, final FieldInfo fieldInfo) {
     this.consumer = docFieldProcessor.consumer.addField(fieldInfo);
     this.fieldInfo = fieldInfo;
-    this.bytesUsed = docFieldProcessor.bytesUsed;
   }
 
   public void addField(IndexableField field) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
index 3172f86..c51be89 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
@@ -29,15 +29,11 @@
 import org.apache.lucene.index.DocumentsWriterPerThread.IndexingChain;
 import org.apache.lucene.index.DocumentsWriterPerThreadPool.ThreadState;
 import org.apache.lucene.index.FieldInfos.FieldNumbers;
-import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FlushInfo;
-import org.apache.lucene.store.IOContext;
 import org.apache.lucene.util.InfoStream;
-import org.apache.lucene.util.MutableBits;
 
 /**
  * This class accepts multiple added documents and directly
@@ -114,6 +110,7 @@
   List<String> newFiles;
 
   final IndexWriter indexWriter;
+  final LiveIndexWriterConfig indexWriterConfig;
 
   private AtomicInteger numDocsInRAM = new AtomicInteger(0);
 
@@ -144,6 +141,7 @@
     this.indexWriter = writer;
     this.infoStream = config.getInfoStream();
     this.similarity = config.getSimilarity();
+    this.indexWriterConfig = writer.getConfig();
     this.perThreadPool = config.getIndexerThreadPool();
     this.chain = config.getIndexingChain();
     this.perThreadPool.initialize(this, globalFieldNumbers, config);
@@ -517,7 +515,7 @@
     // buffer, force them all to apply now. This is to
     // prevent too-frequent flushing of a long tail of
     // tiny segments:
-    final double ramBufferSizeMB = indexWriter.getConfig().getRAMBufferSizeMB();
+    final double ramBufferSizeMB = indexWriterConfig.getRAMBufferSizeMB();
     if (ramBufferSizeMB != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
         flushControl.getDeleteBytesUsed() > (1024*1024*ramBufferSizeMB/2)) {
       if (infoStream.isEnabled("DW")) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
index 3494322..cdc5088 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
@@ -112,7 +112,7 @@
 
     // Only called by asserts
     public boolean testPoint(String name) {
-      return docWriter.writer.testPoint(name);
+      return docWriter.testPoint(name);
     }
 
     public void clear() {
@@ -194,6 +194,7 @@
   private final NumberFormat nf = NumberFormat.getInstance(Locale.ROOT);
   final Allocator byteBlockAllocator;
   final IntBlockPool.Allocator intBlockAllocator;
+  private final LiveIndexWriterConfig indexWriterConfig;
 
   
   public DocumentsWriterPerThread(Directory directory, DocumentsWriter parent,
@@ -203,6 +204,7 @@
     this.parent = parent;
     this.fieldInfos = fieldInfos;
     this.writer = parent.indexWriter;
+    this.indexWriterConfig = parent.indexWriterConfig;
     this.infoStream = parent.infoStream;
     this.codec = parent.codec;
     this.docState = new DocState(this, infoStream);
@@ -232,6 +234,13 @@
     aborting = true;
   }
   
+  final boolean testPoint(String message) {
+    if (infoStream.isEnabled("TP")) {
+      infoStream.message("TP", message);
+    }
+    return true;
+  }
+  
   boolean checkAndResetHasAborted() {
     final boolean retval = hasAborted;
     hasAborted = false;
@@ -239,7 +248,7 @@
   }
 
   public void updateDocument(IndexDocument doc, Analyzer analyzer, Term delTerm) throws IOException {
-    assert writer.testPoint("DocumentsWriterPerThread addDocument start");
+    assert testPoint("DocumentsWriterPerThread addDocument start");
     assert deleteQueue != null;
     docState.doc = doc;
     docState.analyzer = analyzer;
@@ -292,7 +301,7 @@
   }
   
   public int updateDocuments(Iterable<? extends IndexDocument> docs, Analyzer analyzer, Term delTerm) throws IOException {
-    assert writer.testPoint("DocumentsWriterPerThread addDocuments start");
+    assert testPoint("DocumentsWriterPerThread addDocuments start");
     assert deleteQueue != null;
     docState.analyzer = analyzer;
     if (segmentInfo == null) {
@@ -428,7 +437,6 @@
   /** Reset after a flush */
   private void doAfterFlush() {
     segmentInfo = null;
-    consumer.doAfterFlush();
     directory.getCreatedFiles().clear();
     fieldInfos = new FieldInfos.Builder(fieldInfos.globalFieldNumbers);
     parent.subtractFlushedNumDocs(numDocsInRAM);
@@ -561,7 +569,7 @@
 
     boolean success = false;
     try {
-      if (writer.useCompoundFile(newSegment)) {
+      if (indexWriterConfig.getUseCompoundFile()) {
 
         // Now build compound file
         Collection<String> oldFiles = IndexWriter.createCompoundFile(infoStream, directory, MergeState.CheckAbort.NONE, newSegment.info, context);
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexDeletionPolicy.java b/lucene/core/src/java/org/apache/lucene/index/IndexDeletionPolicy.java
index 1e8ca35..ca04f30 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexDeletionPolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexDeletionPolicy.java
@@ -20,6 +20,8 @@
 import java.util.List;
 import java.io.IOException;
 
+import org.apache.lucene.store.Directory;
+
 /**
  * <p>Expert: policy for deletion of stale {@link IndexCommit index commits}. 
  * 
@@ -46,6 +48,10 @@
  * target="top"
  * href="http://issues.apache.org/jira/browse/LUCENE-710">LUCENE-710</a>
  * for details.</p>
+ *
+ * <p>Implementers of sub-classes should make sure that {@link #clone()}
+ * returns an independent instance able to work with any other {@link IndexWriter}
+ * or {@link Directory} instance.</p>
  */
 
 public abstract class IndexDeletionPolicy implements Cloneable {
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java b/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
index c6ec5d0..9ff0005 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
@@ -20,6 +20,7 @@
 import java.io.Closeable;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.file.NoSuchFileException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -170,7 +171,7 @@
             SegmentInfos sis = new SegmentInfos();
             try {
               sis.read(directory, fileName);
-            } catch (FileNotFoundException e) {
+            } catch (FileNotFoundException | NoSuchFileException e) {
               // LUCENE-948: on NFS (and maybe others), if
               // you have writers switching back and forth
               // between machines, it's very likely that the
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index c346ca8..d891562 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -214,7 +214,7 @@
   private final Analyzer analyzer;    // how to analyze text
 
   private volatile long changeCount; // increments every time a change is completed
-  private long lastCommitChangeCount; // last changeCount that was committed
+  private volatile long lastCommitChangeCount; // last changeCount that was committed
 
   private List<SegmentInfoPerCommit> rollbackSegments;      // list of segmentInfo we will fallback to if the commit fails
 
@@ -631,7 +631,12 @@
   /**
    * Constructs a new IndexWriter per the settings given in <code>conf</code>.
    * Note that the passed in {@link IndexWriterConfig} is
-   * privately cloned; if you need to make subsequent "live"
+   * privately cloned, which, in-turn, clones the
+   * {@link IndexWriterConfig#getFlushPolicy() flush policy},
+   * {@link IndexWriterConfig#getIndexDeletionPolicy() deletion policy},
+   * {@link IndexWriterConfig#getMergePolicy() merge policy},
+   * and {@link IndexWriterConfig#getMergeScheduler() merge scheduler}.
+   * If you need to make subsequent "live"
    * changes to the configuration use {@link #getConfig}.
    * <p>
    * 
@@ -2269,10 +2274,6 @@
     }
   }
 
-  synchronized boolean useCompoundFile(SegmentInfoPerCommit segmentInfo) throws IOException {
-    return mergePolicy.useCompoundFile(segmentInfos, segmentInfo);
-  }
-
   private synchronized void resetMergeExceptions() {
     mergeExceptions = new ArrayList<MergePolicy.OneMerge>();
     mergeGen++;
@@ -2826,6 +2827,11 @@
     commitInternal();
   }
 
+  /** Returns true if there are changes that have not been committed */
+  public final boolean hasUncommittedChanges() {
+    return changeCount != lastCommitChangeCount;
+  }
+
   private final void commitInternal() throws IOException {
 
     if (infoStream.isEnabled("IW")) {
@@ -2865,8 +2871,8 @@
         if (infoStream.isEnabled("IW")) {
           infoStream.message("IW", "commit: wrote segments file \"" + pendingCommit.getSegmentsFileName() + "\"");
         }
-        lastCommitChangeCount = pendingCommitChangeCount;
         segmentInfos.updateGeneration(pendingCommit);
+        lastCommitChangeCount = pendingCommitChangeCount;
         rollbackSegments = pendingCommit.createBackupSegmentInfos();
         deleter.checkpoint(pendingCommit, true);
       } finally {
@@ -4162,7 +4168,10 @@
   //   startCommitMergeDeletes
   //   startMergeInit
   //   DocumentsWriter.ThreadState.init start
-  boolean testPoint(String name) {
+  private final boolean testPoint(String message) {
+    if (infoStream.isEnabled("TP")) {
+      infoStream.message("TP", message);
+    }
     return true;
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
index de8d958..43de381 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
@@ -110,6 +110,10 @@
    *  others to finish. Default value is 8. */
   public final static int DEFAULT_MAX_THREAD_STATES = 8;
   
+  /** Default value for compound file system for newly written segments
+   *  (set to <code>true</code>). For batch indexing with very large 
+   *  ram buffers use <code>false</code> */
+  public final static boolean DEFAULT_USE_COMPOUND_FILE_SYSTEM = true;
   /**
    * Sets the default (for any instance) maximum time to wait for a write lock
    * (in milliseconds).
@@ -540,5 +544,9 @@
   public IndexWriterConfig setTermIndexInterval(int interval) {
     return (IndexWriterConfig) super.setTermIndexInterval(interval);
   }
+  
+  public IndexWriterConfig setUseCompoundFile(boolean useCompoundFile) {
+    return (IndexWriterConfig) super.setUseCompoundFile(useCompoundFile);
+  }
 
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java b/lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java
index 448715a..8bb344e 100755
--- a/lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java
+++ b/lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java
@@ -98,6 +98,9 @@
   /** {@link Version} that {@link IndexWriter} should emulate. */
   protected final Version matchVersion;
 
+  /** True if segment flushes should use compound file format */
+  protected volatile boolean useCompoundFile = IndexWriterConfig.DEFAULT_USE_COMPOUND_FILE_SYSTEM;
+
   // used by IndexWriterConfig
   LiveIndexWriterConfig(Analyzer analyzer, Version matchVersion) {
     this.analyzer = analyzer;
@@ -110,6 +113,7 @@
     termIndexInterval = IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL; // TODO: this should be private to the codec, not settable here
     delPolicy = new KeepOnlyLastCommitDeletionPolicy();
     commit = null;
+    useCompoundFile = IndexWriterConfig.DEFAULT_USE_COMPOUND_FILE_SYSTEM;
     openMode = OpenMode.CREATE_OR_APPEND;
     similarity = IndexSearcher.getDefaultSimilarity();
     mergeScheduler = new ConcurrentMergeScheduler();
@@ -154,6 +158,7 @@
     readerPooling = config.getReaderPooling();
     flushPolicy = config.getFlushPolicy();
     perThreadHardLimitMB = config.getRAMPerThreadHardLimitMB();
+    useCompoundFile = config.getUseCompoundFile();
   }
 
   /** Returns the default analyzer to use for indexing documents. */
@@ -542,6 +547,33 @@
     return infoStream;
   }
   
+  /**
+   * Sets if the {@link IndexWriter} should pack newly written segments in a
+   * compound file. Default is <code>true</code>.
+   * <p>
+   * Use <code>false</code> for batch indexing with very large ram buffer
+   * settings.
+   * </p>
+   * <p>
+   * <b>Note: To control compound file usage during segment merges see
+   * {@link MergePolicy#setNoCFSRatio(double)} and
+   * {@link MergePolicy#setMaxCFSSegmentSizeMB(double)}. This setting only
+   * applies to newly created segments.</b>
+   * </p>
+   */
+  public LiveIndexWriterConfig setUseCompoundFile(boolean useCompoundFile) {
+    this.useCompoundFile = useCompoundFile;
+    return this;
+  }
+  
+  /**
+   * Returns <code>true</code> iff the {@link IndexWriter} packs
+   * newly written segments in a compound file. Default is <code>true</code>.
+   */
+  public boolean getUseCompoundFile() {
+    return useCompoundFile ;
+  }
+  
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
@@ -567,7 +599,10 @@
     sb.append("indexerThreadPool=").append(getIndexerThreadPool()).append("\n");
     sb.append("readerPooling=").append(getReaderPooling()).append("\n");
     sb.append("perThreadHardLimitMB=").append(getRAMPerThreadHardLimitMB()).append("\n");
+    sb.append("useCompoundFile=").append(getUseCompoundFile()).append("\n");
     return sb.toString();
   }
 
+
+
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
index b35f2aa..7518642 100644
--- a/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
@@ -64,16 +64,9 @@
 
   /** Default noCFSRatio.  If a merge's size is >= 10% of
    *  the index, then we disable compound file for it.
-   *  @see #setNoCFSRatio */
+   *  @see MergePolicy#setNoCFSRatio */
   public static final double DEFAULT_NO_CFS_RATIO = 0.1;
 
-  /** Default maxCFSSegmentSize value allows compound file
-   * for a segment of any size. The actual file format is
-   * still subject to noCFSRatio.
-   * @see #setMaxCFSSegmentSizeMB(double)
-   */
-  public static final long DEFAULT_MAX_CFS_SEGMENT_SIZE = Long.MAX_VALUE;
-
   /** How many segments to merge at a time. */
   protected int mergeFactor = DEFAULT_MERGE_FACTOR;
 
@@ -96,30 +89,14 @@
    *  will never be merged. */
   protected int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
 
-  /** If the size of the merge segment exceeds this ratio of
-   *  the total index size then it will remain in
-   *  non-compound format even if {@link
-   *  #setUseCompoundFile} is {@code true}. */
-  protected double noCFSRatio = DEFAULT_NO_CFS_RATIO;
-
-  /** If the size of the merged segment exceeds
-   *  this value then it will not use compound file format. */
-  protected long maxCFSSegmentSize = DEFAULT_MAX_CFS_SEGMENT_SIZE;
-
   /** If true, we pro-rate a segment's size by the
    *  percentage of non-deleted documents. */
   protected boolean calibrateSizeByDeletes = true;
 
-  /** True if new segments (flushed or merged) should use
-   *  the compound file format.  Note that large segments
-   *  may sometimes still use non-compound format (see
-   *  {@link #setNoCFSRatio}. */
-  protected boolean useCompoundFile = true;
-
   /** Sole constructor. (For invocation by subclass 
    *  constructors, typically implicit.) */
   public LogMergePolicy() {
-    super();
+    super(DEFAULT_NO_CFS_RATIO, MergePolicy.DEFAULT_MAX_CFS_SEGMENT_SIZE);
   }
 
   /** Returns true if {@code LMP} is enabled in {@link
@@ -129,25 +106,6 @@
     return w != null && w.infoStream.isEnabled("LMP");
   }
 
-  /** Returns current {@code noCFSRatio}.
-   *
-   *  @see #setNoCFSRatio */
-  public double getNoCFSRatio() {
-    return noCFSRatio;
-  }
-
-  /** If a merged segment will be more than this percentage
-   *  of the total size of the index, leave the segment as
-   *  non-compound file even if compound file is enabled.
-   *  Set to 1.0 to always use CFS regardless of merge
-   *  size. */
-  public void setNoCFSRatio(double noCFSRatio) {
-    if (noCFSRatio < 0.0 || noCFSRatio > 1.0) {
-      throw new IllegalArgumentException("noCFSRatio must be 0.0 to 1.0 inclusive; got " + noCFSRatio);
-    }
-    this.noCFSRatio = noCFSRatio;
-  }
-
   /** Print a debug message to {@link IndexWriter}'s {@code
    *  infoStream}. */
   protected void message(String message) {
@@ -178,39 +136,6 @@
     this.mergeFactor = mergeFactor;
   }
 
-  // Javadoc inherited
-  @Override
-  public boolean useCompoundFile(SegmentInfos infos, SegmentInfoPerCommit mergedInfo) throws IOException {
-    if (!getUseCompoundFile()) {
-      return false;
-    }
-    long mergedInfoSize = size(mergedInfo);
-    if (mergedInfoSize > maxCFSSegmentSize) {
-      return false;
-    }
-    if (getNoCFSRatio() >= 1.0) {
-      return true;
-    }
-    long totalSize = 0;
-    for (SegmentInfoPerCommit info : infos) {
-      totalSize += size(info);
-    }
-    return mergedInfoSize <= getNoCFSRatio() * totalSize;
-  }
-
-  /** Sets whether compound file format should be used for
-   *  newly flushed and newly merged segments. */
-  public void setUseCompoundFile(boolean useCompoundFile) {
-    this.useCompoundFile = useCompoundFile;
-  }
-
-  /** Returns true if newly flushed and newly merge segments
-   *  are written in compound file format. @see
-   *  #setUseCompoundFile */
-  public boolean getUseCompoundFile() {
-    return useCompoundFile;
-  }
-
   /** Sets whether the segment size should be calibrated by
    *  the number of deletes when choosing segments for merge. */
   public void setCalibrateSizeByDeletes(boolean calibrateSizeByDeletes) {
@@ -226,9 +151,6 @@
   @Override
   public void close() {}
 
-  /** Return the size of the provided {@link
-   *  SegmentInfoPerCommit}. */
-  abstract protected long size(SegmentInfoPerCommit info) throws IOException;
 
   /** Return the number of documents in the provided {@link
    *  SegmentInfoPerCommit}, pro-rated by percentage of
@@ -249,15 +171,10 @@
    *  non-deleted documents if {@link
    *  #setCalibrateSizeByDeletes} is set. */
   protected long sizeBytes(SegmentInfoPerCommit info) throws IOException {
-    long byteSize = info.sizeInBytes();
     if (calibrateSizeByDeletes) {
-      int delCount = writer.get().numDeletedDocs(info);
-      double delRatio = (info.info.getDocCount() <= 0 ? 0.0f : ((float)delCount / (float)info.info.getDocCount()));
-      assert delRatio <= 1.0;
-      return (info.info.getDocCount() <= 0 ?  byteSize : (long)(byteSize * (1.0 - delRatio)));
-    } else {
-      return byteSize;
+      return super.size(info);
     }
+    return info.sizeInBytes();
   }
   
   /** Returns true if the number of segments eligible for
@@ -282,19 +199,6 @@
       (numToMerge != 1 || !segmentIsOriginal || isMerged(mergeInfo));
   }
 
-  /** Returns true if this single info is already fully merged (has no
-   *  pending norms or deletes, is in the same dir as the
-   *  writer, and matches the current compound file setting */
-  protected boolean isMerged(SegmentInfoPerCommit info)
-    throws IOException {
-    IndexWriter w = writer.get();
-    assert w != null;
-    boolean hasDeletions = w.numDeletedDocs(info) > 0;
-    return !hasDeletions &&
-      info.info.dir == w.getDirectory() &&
-      (info.info.getUseCompoundFile() == useCompoundFile || noCFSRatio < 1.0);
-  }
-
   /**
    * Returns the merges necessary to merge the index, taking the max merge
    * size or max merge docs into consideration. This method attempts to respect
@@ -726,29 +630,10 @@
     sb.append("maxMergeSizeForForcedMerge=").append(maxMergeSizeForForcedMerge).append(", ");
     sb.append("calibrateSizeByDeletes=").append(calibrateSizeByDeletes).append(", ");
     sb.append("maxMergeDocs=").append(maxMergeDocs).append(", ");
-    sb.append("useCompoundFile=").append(useCompoundFile).append(", ");
     sb.append("maxCFSSegmentSizeMB=").append(getMaxCFSSegmentSizeMB()).append(", ");
     sb.append("noCFSRatio=").append(noCFSRatio);
     sb.append("]");
     return sb.toString();
   }
 
-  /** Returns the largest size allowed for a compound file segment */
-  public final double getMaxCFSSegmentSizeMB() {
-    return maxCFSSegmentSize/1024/1024.;
-  }
-
-  /** If a merged segment will be more than this value,
-   *  leave the segment as
-   *  non-compound file even if compound file is enabled.
-   *  Set this to Double.POSITIVE_INFINITY (default) and noCFSRatio to 1.0
-   *  to always use CFS regardless of merge size. */
-  public final void setMaxCFSSegmentSizeMB(double v) {
-    if (v < 0.0) {
-      throw new IllegalArgumentException("maxCFSSegmentSizeMB must be >=0 (got " + v + ")");
-    }
-    v *= 1024 * 1024;
-    this.maxCFSSegmentSize = (v > Long.MAX_VALUE) ? Long.MAX_VALUE : (long) v;
-  }
-
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
index c17f1ab..3352c1f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
@@ -57,7 +57,6 @@
  *
  * @lucene.experimental
  */
-
 public abstract class MergePolicy implements java.io.Closeable, Cloneable {
 
   /** A map of doc IDs. */
@@ -361,9 +360,29 @@
       super(message);
     }
   }
+  
+  /**
+   * Default ratio for compound file system usage. Set to <tt>1.0</tt>, always use 
+   * compound file system.
+   */
+  protected static final double DEFAULT_NO_CFS_RATIO = 1.0;
+
+  /**
+   * Default max segment size in order to use compound file system. Set to {@link Long#MAX_VALUE}.
+   */
+  protected static final long DEFAULT_MAX_CFS_SEGMENT_SIZE = Long.MAX_VALUE;
 
   /** {@link IndexWriter} that contains this instance. */
   protected SetOnce<IndexWriter> writer;
+  
+  /** If the size of the merge segment exceeds this ratio of
+   *  the total index size then it will remain in
+   *  non-compound format */
+  protected double noCFSRatio = DEFAULT_NO_CFS_RATIO;
+  
+  /** If the size of the merged segment exceeds
+   *  this value then it will not use compound file format. */
+  protected long maxCFSSegmentSize = DEFAULT_MAX_CFS_SEGMENT_SIZE;
 
   @Override
   public MergePolicy clone() {
@@ -384,7 +403,18 @@
    * {@link #setIndexWriter(IndexWriter)}.
    */
   public MergePolicy() {
+    this(DEFAULT_NO_CFS_RATIO, DEFAULT_MAX_CFS_SEGMENT_SIZE);
+  }
+  
+  /**
+   * Creates a new merge policy instance with default settings for noCFSRatio
+   * and maxCFSSegmentSize. This ctor should be used by subclasses using different
+   * defaults than the {@link MergePolicy}
+   */
+  protected MergePolicy(double defaultNoCFSRatio, long defaultMaxCFSSegmentSize) {
     writer = new SetOnce<IndexWriter>();
+    this.noCFSRatio = defaultNoCFSRatio;
+    this.maxCFSSegmentSize = defaultMaxCFSSegmentSize;
   }
 
   /**
@@ -451,12 +481,91 @@
   @Override
   public abstract void close();
   
-  
   /**
-   * Returns true if a new segment (regardless of its origin) should use the compound file format.
+   * Returns true if a new segment (regardless of its origin) should use the
+   * compound file format. The default implementation returns <code>true</code>
+   * iff the size of the given mergedInfo is less or equal to
+   * {@link #getMaxCFSSegmentSizeMB()} and the size is less or equal to the
+   * TotalIndexSize * {@link #getNoCFSRatio()} otherwise <code>false</code>.
    */
-  public abstract boolean useCompoundFile(SegmentInfos segments, SegmentInfoPerCommit newSegment) throws IOException;
+  public boolean useCompoundFile(SegmentInfos infos, SegmentInfoPerCommit mergedInfo) throws IOException {
+    if (getNoCFSRatio() == 0.0) {
+      return false;
+    }
+    long mergedInfoSize = size(mergedInfo);
+    if (mergedInfoSize > maxCFSSegmentSize) {
+      return false;
+    }
+    if (getNoCFSRatio() >= 1.0) {
+      return true;
+    }
+    long totalSize = 0;
+    for (SegmentInfoPerCommit info : infos) {
+      totalSize += size(info);
+    }
+    return mergedInfoSize <= getNoCFSRatio() * totalSize;
+  }
   
+  /** Return the byte size of the provided {@link
+   *  SegmentInfoPerCommit}, pro-rated by percentage of
+   *  non-deleted documents is set. */
+  protected long size(SegmentInfoPerCommit info) throws IOException {
+    long byteSize = info.sizeInBytes();
+    int delCount = writer.get().numDeletedDocs(info);
+    double delRatio = (info.info.getDocCount() <= 0 ? 0.0f : ((float)delCount / (float)info.info.getDocCount()));
+    assert delRatio <= 1.0;
+    return (info.info.getDocCount() <= 0 ?  byteSize : (long)(byteSize * (1.0 - delRatio)));
+  }
+  
+  /** Returns true if this single info is already fully merged (has no
+   *  pending deletes, is in the same dir as the
+   *  writer, and matches the current compound file setting */
+  protected final boolean isMerged(SegmentInfoPerCommit info) {
+    IndexWriter w = writer.get();
+    assert w != null;
+    boolean hasDeletions = w.numDeletedDocs(info) > 0;
+    return !hasDeletions &&
+      info.info.dir == w.getDirectory() &&
+      ((noCFSRatio > 0.0 && noCFSRatio < 1.0) || maxCFSSegmentSize < Long.MAX_VALUE);
+  }
+  
+  /** Returns current {@code noCFSRatio}.
+   *
+   *  @see #setNoCFSRatio */
+  public final double getNoCFSRatio() {
+    return noCFSRatio;
+  }
+
+  /** If a merged segment will be more than this percentage
+   *  of the total size of the index, leave the segment as
+   *  non-compound file even if compound file is enabled.
+   *  Set to 1.0 to always use CFS regardless of merge
+   *  size. */
+  public final void setNoCFSRatio(double noCFSRatio) {
+    if (noCFSRatio < 0.0 || noCFSRatio > 1.0) {
+      throw new IllegalArgumentException("noCFSRatio must be 0.0 to 1.0 inclusive; got " + noCFSRatio);
+    }
+    this.noCFSRatio = noCFSRatio;
+  }
+
+  /** Returns the largest size allowed for a compound file segment */
+  public final double getMaxCFSSegmentSizeMB() {
+    return maxCFSSegmentSize/1024/1024.;
+  }
+
+  /** If a merged segment will be more than this value,
+   *  leave the segment as
+   *  non-compound file even if compound file is enabled.
+   *  Set this to Double.POSITIVE_INFINITY (default) and noCFSRatio to 1.0
+   *  to always use CFS regardless of merge size. */
+  public final void setMaxCFSSegmentSizeMB(double v) {
+    if (v < 0.0) {
+      throw new IllegalArgumentException("maxCFSSegmentSizeMB must be >=0 (got " + v + ")");
+    }
+    v *= 1024 * 1024;
+    this.maxCFSSegmentSize = (v > Long.MAX_VALUE) ? Long.MAX_VALUE : (long) v;
+  }
+
   /**
    * MergeTrigger is passed to
    * {@link MergePolicy#findMerges(MergeTrigger, SegmentInfos)} to indicate the
diff --git a/lucene/core/src/java/org/apache/lucene/index/MergeScheduler.java b/lucene/core/src/java/org/apache/lucene/index/MergeScheduler.java
index 8cd0b8d..385f4b4 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MergeScheduler.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MergeScheduler.java
@@ -24,7 +24,9 @@
  *  implementing this interface to execute the merges
  *  selected by a {@link MergePolicy}.  The default
  *  MergeScheduler is {@link ConcurrentMergeScheduler}.</p>
- *
+ *  <p>Implementers of sub-classes should make sure that {@link #clone()}
+ *  returns an independent instance able to work with any {@link IndexWriter}
+ *  instance.</p>
  * @lucene.experimental
 */
 public abstract class MergeScheduler implements Closeable, Cloneable {
diff --git a/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java
index bf147bc..697ce71 100644
--- a/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java
@@ -20,8 +20,6 @@
 import java.io.IOException;
 import java.util.Map;
 
-import org.apache.lucene.index.MergePolicy.MergeTrigger;
-import org.apache.lucene.index.MergePolicy.MergeSpecification;
 
 /**
  * A {@link MergePolicy} which never returns merges to execute (hence it's
@@ -49,6 +47,7 @@
   private final boolean useCompoundFile;
   
   private NoMergePolicy(boolean useCompoundFile) {
+    super(useCompoundFile ? 1.0 : 0.0, 0);
     // prevent instantiation
     this.useCompoundFile = useCompoundFile;
   }
@@ -71,6 +70,11 @@
 
   @Override
   public void setIndexWriter(IndexWriter writer) {}
+  
+  @Override
+  protected long size(SegmentInfoPerCommit info) throws IOException {
+      return Long.MAX_VALUE;
+  }
 
   @Override
   public String toString() {
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java b/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
index 7851e79..cdba96b 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
@@ -17,7 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.util.ArrayList;
@@ -613,10 +612,6 @@
           IndexInput genInput = null;
           try {
             genInput = directory.openInput(IndexFileNames.SEGMENTS_GEN, IOContext.READONCE);
-          } catch (FileNotFoundException e) {
-            if (infoStream != null) {
-              message("segments.gen open: FileNotFoundException " + e);
-            }
           } catch (IOException e) {
             if (infoStream != null) {
               message("segments.gen open: IOException " + e);
diff --git a/lucene/core/src/java/org/apache/lucene/index/StoredFieldsProcessor.java b/lucene/core/src/java/org/apache/lucene/index/StoredFieldsProcessor.java
index eea05b1..932e4fe 100644
--- a/lucene/core/src/java/org/apache/lucene/index/StoredFieldsProcessor.java
+++ b/lucene/core/src/java/org/apache/lucene/index/StoredFieldsProcessor.java
@@ -18,11 +18,13 @@
  */
 
 import java.io.IOException;
+import java.util.Arrays;
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.StoredFieldsWriter;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.RamUsageEstimator;
 
 /** This is a StoredFieldsConsumer that writes stored fields. */
@@ -32,8 +34,6 @@
   final DocumentsWriterPerThread docWriter;
   int lastDocID;
 
-  int freeCount;
-
   final DocumentsWriterPerThread.DocState docState;
   final Codec codec;
 
@@ -44,13 +44,13 @@
   }
 
   private int numStoredFields;
-  private StorableField[] storedFields;
-  private FieldInfo[] fieldInfos;
+  private StorableField[] storedFields = new StorableField[1];
+  private FieldInfo[] fieldInfos = new FieldInfo[1];
 
   public void reset() {
     numStoredFields = 0;
-    storedFields = new StorableField[1];
-    fieldInfos = new FieldInfo[1];
+    Arrays.fill(storedFields, null);
+    Arrays.fill(fieldInfos, null);
   }
   
   @Override
@@ -61,7 +61,6 @@
   @Override
   public void flush(SegmentWriteState state) throws IOException {
     int numDocs = state.segmentInfo.getDocCount();
-
     if (numDocs > 0) {
       // It's possible that all documents seen in this segment
       // hit non-aborting exceptions, in which case we will
@@ -69,14 +68,17 @@
       initFieldsWriter(state.context);
       fill(numDocs);
     }
-
     if (fieldsWriter != null) {
-      try {
-        fieldsWriter.finish(state.fieldInfos, numDocs);
-      } finally {
-        fieldsWriter.close();
-        fieldsWriter = null;
-        lastDocID = 0;
+        boolean success = false;
+        try {
+          fieldsWriter.finish(state.fieldInfos, numDocs);
+          success = true;
+        } finally {
+          if (success) {
+            IOUtils.close(fieldsWriter);
+          } else {
+            IOUtils.closeWhileHandlingException(fieldsWriter);
+          }
       }
     }
   }
@@ -88,7 +90,6 @@
     }
   }
 
-  int allocCount;
 
   @Override
   void abort() {
@@ -114,7 +115,7 @@
 
   @Override
   void finishDocument() throws IOException {
-    assert docWriter.writer.testPoint("StoredFieldsWriter.finishDocument start");
+    assert docWriter.testPoint("StoredFieldsWriter.finishDocument start");
 
     initFieldsWriter(IOContext.DEFAULT);
     fill(docState.docID);
@@ -129,7 +130,7 @@
     }
 
     reset();
-    assert docWriter.writer.testPoint("StoredFieldsWriter.finishDocument end");
+    assert docWriter.testPoint("StoredFieldsWriter.finishDocument end");
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumer.java b/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumer.java
index bafba22..f548eea 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumer.java
@@ -18,6 +18,7 @@
  */
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.Map;
 
 import org.apache.lucene.codecs.TermVectorsWriter;
@@ -32,9 +33,6 @@
 
   TermVectorsWriter writer;
   final DocumentsWriterPerThread docWriter;
-  int freeCount;
-  int lastDocID;
-
   final DocumentsWriterPerThread.DocState docState;
   final BytesRef flushTerm = new BytesRef();
 
@@ -42,6 +40,9 @@
   final ByteSliceReader vectorSliceReaderPos = new ByteSliceReader();
   final ByteSliceReader vectorSliceReaderOff = new ByteSliceReader();
   boolean hasVectors;
+  int numVectorFields;
+  int lastDocID;
+  private TermVectorsConsumerPerField[] perFields = new TermVectorsConsumerPerField[1];
 
   public TermVectorsConsumer(DocumentsWriterPerThread docWriter) {
     this.docWriter = docWriter;
@@ -52,6 +53,7 @@
   void flush(Map<String, TermsHashConsumerPerField> fieldsToFlush, final SegmentWriteState state) throws IOException {
     if (writer != null) {
       int numDocs = state.segmentInfo.getDocCount();
+      assert numDocs > 0;
       // At least one doc in this run had term vectors enabled
       try {
         fill(numDocs);
@@ -60,7 +62,6 @@
       } finally {
         IOUtils.close(writer);
         writer = null;
-
         lastDocID = 0;
         hasVectors = false;
       }
@@ -94,7 +95,7 @@
   @Override
   void finishDocument(TermsHash termsHash) throws IOException {
 
-    assert docWriter.writer.testPoint("TermVectorsTermsWriter.finishDocument start");
+    assert docWriter.testPoint("TermVectorsTermsWriter.finishDocument start");
 
     if (!hasVectors) {
       return;
@@ -117,7 +118,7 @@
 
     termsHash.reset();
     reset();
-    assert docWriter.writer.testPoint("TermVectorsTermsWriter.finishDocument end");
+    assert docWriter.testPoint("TermVectorsTermsWriter.finishDocument end");
   }
 
   @Override
@@ -130,17 +131,12 @@
     }
 
     lastDocID = 0;
-
     reset();
   }
 
-  int numVectorFields;
-
-  TermVectorsConsumerPerField[] perFields;
-
   void reset() {
+    Arrays.fill(perFields, null);// don't hang onto stuff from previous doc
     numVectorFields = 0;
-    perFields = new TermVectorsConsumerPerField[1];
   }
 
   @Override
@@ -175,10 +171,7 @@
   String lastVectorFieldName;
   final boolean vectorFieldsInOrder(FieldInfo fi) {
     try {
-      if (lastVectorFieldName != null)
-        return lastVectorFieldName.compareTo(fi.name) < 0;
-      else
-        return true;
+      return lastVectorFieldName != null ? lastVectorFieldName.compareTo(fi.name) < 0 : true; 
     } finally {
       lastVectorFieldName = fi.name;
     }
diff --git a/lucene/core/src/java/org/apache/lucene/index/TieredMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/TieredMergePolicy.java
index 745681d..e6a485b 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TieredMergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TieredMergePolicy.java
@@ -27,9 +27,6 @@
 import java.util.List;
 import java.util.ArrayList;
 
-import org.apache.lucene.index.MergePolicy.MergeTrigger;
-
-
 /**
  *  Merges segments of approximately equal size, subject to
  *  an allowed number of segments per tier.  This is similar
@@ -76,7 +73,11 @@
 //     maybe CMS should do so)
 
 public class TieredMergePolicy extends MergePolicy {
-
+  /** Default noCFSRatio.  If a merge's size is >= 10% of
+   *  the index, then we disable compound file for it.
+   *  @see MergePolicy#setNoCFSRatio */
+  public static final double DEFAULT_NO_CFS_RATIO = 0.1;
+  
   private int maxMergeAtOnce = 10;
   private long maxMergedSegmentBytes = 5*1024*1024*1024L;
   private int maxMergeAtOnceExplicit = 30;
@@ -84,14 +85,12 @@
   private long floorSegmentBytes = 2*1024*1024L;
   private double segsPerTier = 10.0;
   private double forceMergeDeletesPctAllowed = 10.0;
-  private boolean useCompoundFile = true;
-  private double noCFSRatio = 0.1;
-  private long maxCFSSegmentSize = Long.MAX_VALUE;
   private double reclaimDeletesWeight = 2.0;
 
   /** Sole constructor, setting all settings to their
    *  defaults. */
   public TieredMergePolicy() {
+    super(DEFAULT_NO_CFS_RATIO, MergePolicy.DEFAULT_MAX_CFS_SEGMENT_SIZE);
   }
 
   /** Maximum number of segments to be merged at a time
@@ -233,41 +232,6 @@
     return segsPerTier;
   }
 
-  /** Sets whether compound file format should be used for
-   *  newly flushed and newly merged segments.  Default
-   *  true. */
-  public TieredMergePolicy setUseCompoundFile(boolean useCompoundFile) {
-    this.useCompoundFile = useCompoundFile;
-    return this;
-  }
-
-  /** Returns the current useCompoundFile setting.
-   *
-   * @see  #setUseCompoundFile */
-  public boolean getUseCompoundFile() {
-    return useCompoundFile;
-  }
-
-  /** If a merged segment will be more than this percentage
-   *  of the total size of the index, leave the segment as
-   *  non-compound file even if compound file is enabled.
-   *  Set to 1.0 to always use CFS regardless of merge
-   *  size.  Default is 0.1. */
-  public TieredMergePolicy setNoCFSRatio(double noCFSRatio) {
-    if (noCFSRatio < 0.0 || noCFSRatio > 1.0) {
-      throw new IllegalArgumentException("noCFSRatio must be 0.0 to 1.0 inclusive; got " + noCFSRatio);
-    }
-    this.noCFSRatio = noCFSRatio;
-    return this;
-  }
-  
-  /** Returns the current noCFSRatio setting.
-   *
-   * @see #setNoCFSRatio */
-  public double getNoCFSRatio() {
-    return noCFSRatio;
-  }
-
   private class SegmentByteSizeDescending implements Comparator<SegmentInfoPerCommit> {
     @Override
     public int compare(SegmentInfoPerCommit o1, SegmentInfoPerCommit o2) {
@@ -637,46 +601,9 @@
   }
 
   @Override
-  public boolean useCompoundFile(SegmentInfos infos, SegmentInfoPerCommit mergedInfo) throws IOException {
-    if (!getUseCompoundFile()) {
-        return false;
-    }
-    long mergedInfoSize = size(mergedInfo);
-    if (mergedInfoSize > maxCFSSegmentSize) {
-        return false;
-    }
-    if (getNoCFSRatio() >= 1.0) {
-        return true;
-    }
-    long totalSize = 0;
-    for (SegmentInfoPerCommit info : infos) {
-        totalSize += size(info);
-    }
-    return mergedInfoSize <= getNoCFSRatio() * totalSize;
-  }
-
-  @Override
   public void close() {
   }
 
-  private boolean isMerged(SegmentInfoPerCommit info) {
-    IndexWriter w = writer.get();
-    assert w != null;
-    boolean hasDeletions = w.numDeletedDocs(info) > 0;
-    return !hasDeletions &&
-      info.info.dir == w.getDirectory() &&
-      (info.info.getUseCompoundFile() == useCompoundFile || noCFSRatio < 1.0 || maxCFSSegmentSize < Long.MAX_VALUE);
-  }
-
-  // Segment size in bytes, pro-rated by % deleted
-  private long size(SegmentInfoPerCommit info) throws IOException {
-    final long byteSize = info.sizeInBytes();    
-    final int delCount = writer.get().numDeletedDocs(info);
-    final double delRatio = (info.info.getDocCount() <= 0 ? 0.0f : ((double)delCount / (double)info.info.getDocCount()));    
-    assert delRatio <= 1.0;
-    return (long) (byteSize * (1.0-delRatio));
-  }
-
   private long floorSize(long bytes) {
     return Math.max(floorSegmentBytes, bytes);
   }
@@ -699,28 +626,8 @@
     sb.append("floorSegmentMB=").append(floorSegmentBytes/1024/1024.).append(", ");
     sb.append("forceMergeDeletesPctAllowed=").append(forceMergeDeletesPctAllowed).append(", ");
     sb.append("segmentsPerTier=").append(segsPerTier).append(", ");
-    sb.append("useCompoundFile=").append(useCompoundFile).append(", ");
     sb.append("maxCFSSegmentSizeMB=").append(getMaxCFSSegmentSizeMB()).append(", ");
     sb.append("noCFSRatio=").append(noCFSRatio);
     return sb.toString();
   }
-
-  /** Returns the largest size allowed for a compound file segment */
-  public final double getMaxCFSSegmentSizeMB() {
-    return maxCFSSegmentSize/1024/1024.;
-  }
-
-  /** If a merged segment will be more than this value,
-   *  leave the segment as
-   *  non-compound file even if compound file is enabled.
-   *  Set this to Double.POSITIVE_INFINITY (default) and noCFSRatio to 1.0
-   *  to always use CFS regardless of merge size. */
-  public final TieredMergePolicy setMaxCFSSegmentSizeMB(double v) {
-    if (v < 0.0) {
-      throw new IllegalArgumentException("maxCFSSegmentSizeMB must be >=0 (got " + v + ")");
-    }
-    v *= 1024 * 1024;
-    this.maxCFSSegmentSize = (v > Long.MAX_VALUE) ? Long.MAX_VALUE : (long) v;
-    return this;
-  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
index 4850b04..909cfe0 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
@@ -56,10 +56,10 @@
   private int docID = -1;
   private int freq;
 
-  private final Similarity.ExactSimScorer docScorer;
+  private final Similarity.SimScorer docScorer;
   
   ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
-                    Similarity.ExactSimScorer docScorer) throws IOException {
+                    Similarity.SimScorer docScorer) throws IOException {
     super(weight);
     this.docScorer = docScorer;
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldCache.java b/lucene/core/src/java/org/apache/lucene/search/FieldCache.java
index 4a3100a..f87fb51 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldCache.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldCache.java
@@ -49,34 +49,6 @@
  */
 public interface FieldCache {
 
-  /** Field values as 8-bit signed bytes */
-  public static abstract class Bytes {
-    /** Return a single Byte representation of this field's value. */
-    public abstract byte get(int docID);
-    
-    /** Zero value for every document */
-    public static final Bytes EMPTY = new Bytes() {
-      @Override
-      public byte get(int docID) {
-        return 0;
-      }
-    };
-  }
-
-  /** Field values as 16-bit signed shorts */
-  public static abstract class Shorts {
-    /** Return a short representation of this field's value. */
-    public abstract short get(int docID);
-    
-    /** Zero value for every document */
-    public static final Shorts EMPTY = new Shorts() {
-      @Override
-      public short get(int docID) {
-        return 0;
-      }
-    };
-  }
-
   /** Field values as 32-bit signed integers */
   public static abstract class Ints {
     /** Return an integer representation of this field's value. */
@@ -178,22 +150,6 @@
     public TermsEnum termsEnum(Terms terms) throws IOException;
   }
 
-  /** Interface to parse bytes from document fields.
-   * @see FieldCache#getBytes(AtomicReader, String, FieldCache.ByteParser, boolean)
-   */
-  public interface ByteParser extends Parser {
-    /** Return a single Byte representation of this field's value. */
-    public byte parseByte(BytesRef term);
-  }
-
-  /** Interface to parse shorts from document fields.
-   * @see FieldCache#getShorts(AtomicReader, String, FieldCache.ShortParser, boolean)
-   */
-  public interface ShortParser extends Parser {
-    /** Return a short representation of this field's value. */
-    public short parseShort(BytesRef term);
-  }
-
   /** Interface to parse ints from document fields.
    * @see FieldCache#getInts(AtomicReader, String, FieldCache.IntParser, boolean)
    */
@@ -229,135 +185,6 @@
   /** Expert: The cache used internally by sorting and range query classes. */
   public static FieldCache DEFAULT = new FieldCacheImpl();
 
-  /** The default parser for byte values, which are encoded by {@link Byte#toString(byte)} */
-  public static final ByteParser DEFAULT_BYTE_PARSER = new ByteParser() {
-    @Override
-    public byte parseByte(BytesRef term) {
-      // TODO: would be far better to directly parse from
-      // UTF8 bytes... but really users should use
-      // IntField, instead, which already decodes
-      // directly from byte[]
-      return Byte.parseByte(term.utf8ToString());
-    }
-    @Override
-    public String toString() { 
-      return FieldCache.class.getName()+".DEFAULT_BYTE_PARSER"; 
-    }
-    @Override
-    public TermsEnum termsEnum(Terms terms) throws IOException {
-      return terms.iterator(null);
-    }
-  };
-
-  /** The default parser for short values, which are encoded by {@link Short#toString(short)} */
-  public static final ShortParser DEFAULT_SHORT_PARSER = new ShortParser() {
-    @Override
-    public short parseShort(BytesRef term) {
-      // TODO: would be far better to directly parse from
-      // UTF8 bytes... but really users should use
-      // IntField, instead, which already decodes
-      // directly from byte[]
-      return Short.parseShort(term.utf8ToString());
-    }
-    @Override
-    public String toString() { 
-      return FieldCache.class.getName()+".DEFAULT_SHORT_PARSER"; 
-    }
-    
-    @Override
-    public TermsEnum termsEnum(Terms terms) throws IOException {
-      return terms.iterator(null);
-    }
-  };
-
-  /** The default parser for int values, which are encoded by {@link Integer#toString(int)} */
-  public static final IntParser DEFAULT_INT_PARSER = new IntParser() {
-    @Override
-    public int parseInt(BytesRef term) {
-      // TODO: would be far better to directly parse from
-      // UTF8 bytes... but really users should use
-      // IntField, instead, which already decodes
-      // directly from byte[]
-      return Integer.parseInt(term.utf8ToString());
-    }
-    
-    @Override
-    public TermsEnum termsEnum(Terms terms) throws IOException {
-      return terms.iterator(null);
-    }
-    
-    @Override
-    public String toString() { 
-      return FieldCache.class.getName()+".DEFAULT_INT_PARSER"; 
-    }
-  };
-
-  /** The default parser for float values, which are encoded by {@link Float#toString(float)} */
-  public static final FloatParser DEFAULT_FLOAT_PARSER = new FloatParser() {
-    @Override
-    public float parseFloat(BytesRef term) {
-      // TODO: would be far better to directly parse from
-      // UTF8 bytes... but really users should use
-      // FloatField, instead, which already decodes
-      // directly from byte[]
-      return Float.parseFloat(term.utf8ToString());
-    }
-    
-    @Override
-    public TermsEnum termsEnum(Terms terms) throws IOException {
-      return terms.iterator(null);
-    }
-    
-    @Override
-    public String toString() { 
-      return FieldCache.class.getName()+".DEFAULT_FLOAT_PARSER"; 
-    }
-  };
-
-  /** The default parser for long values, which are encoded by {@link Long#toString(long)} */
-  public static final LongParser DEFAULT_LONG_PARSER = new LongParser() {
-    @Override
-    public long parseLong(BytesRef term) {
-      // TODO: would be far better to directly parse from
-      // UTF8 bytes... but really users should use
-      // LongField, instead, which already decodes
-      // directly from byte[]
-      return Long.parseLong(term.utf8ToString());
-    }
-    
-    @Override
-    public TermsEnum termsEnum(Terms terms) throws IOException {
-      return terms.iterator(null);
-    }
-    
-    @Override
-    public String toString() { 
-      return FieldCache.class.getName()+".DEFAULT_LONG_PARSER"; 
-    }
-  };
-
-  /** The default parser for double values, which are encoded by {@link Double#toString(double)} */
-  public static final DoubleParser DEFAULT_DOUBLE_PARSER = new DoubleParser() {
-    @Override
-    public double parseDouble(BytesRef term) {
-      // TODO: would be far better to directly parse from
-      // UTF8 bytes... but really users should use
-      // DoubleField, instead, which already decodes
-      // directly from byte[]
-      return Double.parseDouble(term.utf8ToString());
-    }
-    
-    @Override
-    public TermsEnum termsEnum(Terms terms) throws IOException {
-      return terms.iterator(null);
-    }
-    
-    @Override
-    public String toString() { 
-      return FieldCache.class.getName()+".DEFAULT_DOUBLE_PARSER"; 
-    }
-  };
-
   /**
    * A parser instance for int values encoded by {@link NumericUtils}, e.g. when indexed
    * via {@link IntField}/{@link NumericTokenStream}.
@@ -450,60 +277,6 @@
   public Bits getDocsWithField(AtomicReader reader, String field) throws IOException;
 
   /** Checks the internal cache for an appropriate entry, and if none is
-   * found, reads the terms in <code>field</code> as a single byte and returns an array
-   * of size <code>reader.maxDoc()</code> of the value each document
-   * has in the given field.
-   * @param reader  Used to get field values.
-   * @param field   Which field contains the single byte values.
-   * @param setDocsWithField  If true then {@link #getDocsWithField} will
-   *        also be computed and stored in the FieldCache.
-   * @return The values in the given field for each document.
-   * @throws IOException  If any error occurs.
-   */
-  public Bytes getBytes(AtomicReader reader, String field, boolean setDocsWithField) throws IOException;
-
-  /** Checks the internal cache for an appropriate entry, and if none is found,
-   * reads the terms in <code>field</code> as bytes and returns an array of
-   * size <code>reader.maxDoc()</code> of the value each document has in the
-   * given field.
-   * @param reader  Used to get field values.
-   * @param field   Which field contains the bytes.
-   * @param parser  Computes byte for string values.
-   * @param setDocsWithField  If true then {@link #getDocsWithField} will
-   *        also be computed and stored in the FieldCache.
-   * @return The values in the given field for each document.
-   * @throws IOException  If any error occurs.
-   */
-  public Bytes getBytes(AtomicReader reader, String field, ByteParser parser, boolean setDocsWithField) throws IOException;
-
-  /** Checks the internal cache for an appropriate entry, and if none is
-   * found, reads the terms in <code>field</code> as shorts and returns an array
-   * of size <code>reader.maxDoc()</code> of the value each document
-   * has in the given field.
-   * @param reader  Used to get field values.
-   * @param field   Which field contains the shorts.
-   * @param setDocsWithField  If true then {@link #getDocsWithField} will
-   *        also be computed and stored in the FieldCache.
-   * @return The values in the given field for each document.
-   * @throws IOException  If any error occurs.
-   */
-  public Shorts getShorts (AtomicReader reader, String field, boolean setDocsWithField) throws IOException;
-
-  /** Checks the internal cache for an appropriate entry, and if none is found,
-   * reads the terms in <code>field</code> as shorts and returns an array of
-   * size <code>reader.maxDoc()</code> of the value each document has in the
-   * given field.
-   * @param reader  Used to get field values.
-   * @param field   Which field contains the shorts.
-   * @param parser  Computes short for string values.
-   * @param setDocsWithField  If true then {@link #getDocsWithField} will
-   *        also be computed and stored in the FieldCache.
-   * @return The values in the given field for each document.
-   * @throws IOException  If any error occurs.
-   */
-  public Shorts getShorts (AtomicReader reader, String field, ShortParser parser, boolean setDocsWithField) throws IOException;
-  
-  /** Checks the internal cache for an appropriate entry, and if none is
    * found, reads the terms in <code>field</code> as integers and returns an array
    * of size <code>reader.maxDoc()</code> of the value each document
    * has in the given field.
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java b/lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java
index b94ad6e..9b3d3a1 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java
@@ -38,13 +38,13 @@
 import org.apache.lucene.index.SortedSetDocValues;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FieldCacheSanityChecker;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.PagedBytes;
 import org.apache.lucene.util.packed.GrowableWriter;
+import org.apache.lucene.util.packed.MonotonicAppendingLongBuffer;
 import org.apache.lucene.util.packed.PackedInts;
 
 /**
@@ -62,8 +62,6 @@
 
   private synchronized void init() {
     caches = new HashMap<Class<?>,Cache>(9);
-    caches.put(Byte.TYPE, new ByteCache(this));
-    caches.put(Short.TYPE, new ShortCache(this));
     caches.put(Integer.TYPE, new IntCache(this));
     caches.put(Float.TYPE, new FloatCache(this));
     caches.put(Long.TYPE, new LongCache(this));
@@ -354,192 +352,6 @@
     }
     caches.get(DocsWithFieldCache.class).put(reader, new CacheKey(field, null), bits);
   }
-  
-  // inherit javadocs
-  public Bytes getBytes (AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
-    return getBytes(reader, field, null, setDocsWithField);
-  }
-
-  // inherit javadocs
-  public Bytes getBytes(AtomicReader reader, String field, ByteParser parser, boolean setDocsWithField)
-      throws IOException {
-    final NumericDocValues valuesIn = reader.getNumericDocValues(field);
-    if (valuesIn != null) {
-      // Not cached here by FieldCacheImpl (cached instead
-      // per-thread by SegmentReader):
-      return new Bytes() {
-        @Override
-        public byte get(int docID) {
-          return (byte) valuesIn.get(docID);
-        }
-      };
-    } else {
-      final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
-      if (info == null) {
-        return Bytes.EMPTY;
-      } else if (info.hasDocValues()) {
-        throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
-      } else if (!info.isIndexed()) {
-        return Bytes.EMPTY;
-      }
-      return (Bytes) caches.get(Byte.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
-    }
-  }
-
-  static class BytesFromArray extends Bytes {
-    private final byte[] values;
-
-    public BytesFromArray(byte[] values) {
-      this.values = values;
-    }
-    
-    @Override
-    public byte get(int docID) {
-      return values[docID];
-    }
-  }
-
-  static final class ByteCache extends Cache {
-    ByteCache(FieldCacheImpl wrapper) {
-      super(wrapper);
-    }
-
-    @Override
-    protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField)
-        throws IOException {
-
-      int maxDoc = reader.maxDoc();
-      final byte[] values;
-      final ByteParser parser = (ByteParser) key.custom;
-      if (parser == null) {
-        // Confusing: must delegate to wrapper (vs simply
-        // setting parser = DEFAULT_SHORT_PARSER) so cache
-        // key includes DEFAULT_SHORT_PARSER:
-        return wrapper.getBytes(reader, key.field, DEFAULT_BYTE_PARSER, setDocsWithField);
-      }
-
-      values = new byte[maxDoc];
-
-      Uninvert u = new Uninvert() {
-          private byte currentValue;
-
-          @Override
-          public void visitTerm(BytesRef term) {
-            currentValue = parser.parseByte(term);
-          }
-
-          @Override
-          public void visitDoc(int docID) {
-            values[docID] = currentValue;
-          }
-
-          @Override
-          protected TermsEnum termsEnum(Terms terms) throws IOException {
-            return parser.termsEnum(terms);
-          }
-        };
-
-      u.uninvert(reader, key.field, setDocsWithField);
-
-      if (setDocsWithField) {
-        wrapper.setDocsWithField(reader, key.field, u.docsWithField);
-      }
-
-      return new BytesFromArray(values);
-    }
-  }
-  
-  // inherit javadocs
-  public Shorts getShorts (AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
-    return getShorts(reader, field, null, setDocsWithField);
-  }
-
-  // inherit javadocs
-  public Shorts getShorts(AtomicReader reader, String field, ShortParser parser, boolean setDocsWithField)
-      throws IOException {
-    final NumericDocValues valuesIn = reader.getNumericDocValues(field);
-    if (valuesIn != null) {
-      // Not cached here by FieldCacheImpl (cached instead
-      // per-thread by SegmentReader):
-      return new Shorts() {
-        @Override
-        public short get(int docID) {
-          return (short) valuesIn.get(docID);
-        }
-      };
-    } else {
-      final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
-      if (info == null) {
-        return Shorts.EMPTY;
-      } else if (info.hasDocValues()) {
-        throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
-      } else if (!info.isIndexed()) {
-        return Shorts.EMPTY;
-      }
-      return (Shorts) caches.get(Short.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
-    }
-  }
-
-  static class ShortsFromArray extends Shorts {
-    private final short[] values;
-
-    public ShortsFromArray(short[] values) {
-      this.values = values;
-    }
-    
-    @Override
-    public short get(int docID) {
-      return values[docID];
-    }
-  }
-
-  static final class ShortCache extends Cache {
-    ShortCache(FieldCacheImpl wrapper) {
-      super(wrapper);
-    }
-
-    @Override
-    protected Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField)
-        throws IOException {
-
-      int maxDoc = reader.maxDoc();
-      final short[] values;
-      final ShortParser parser = (ShortParser) key.custom;
-      if (parser == null) {
-        // Confusing: must delegate to wrapper (vs simply
-        // setting parser = DEFAULT_SHORT_PARSER) so cache
-        // key includes DEFAULT_SHORT_PARSER:
-        return wrapper.getShorts(reader, key.field, DEFAULT_SHORT_PARSER, setDocsWithField);
-      }
-
-      values = new short[maxDoc];
-      Uninvert u = new Uninvert() {
-          private short currentValue;
-
-          @Override
-          public void visitTerm(BytesRef term) {
-            currentValue = parser.parseShort(term);
-          }
-
-          @Override
-          public void visitDoc(int docID) {
-            values[docID] = currentValue;
-          }
-          
-          @Override
-          protected TermsEnum termsEnum(Terms terms) throws IOException {
-            return parser.termsEnum(terms);
-          }
-        };
-
-      u.uninvert(reader, key.field, setDocsWithField);
-
-      if (setDocsWithField) {
-        wrapper.setDocsWithField(reader, key.field, u.docsWithField);
-      }
-      return new ShortsFromArray(values);
-    }
-  }
 
   // inherit javadocs
   public Ints getInts (AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
@@ -573,15 +385,19 @@
   }
 
   static class IntsFromArray extends Ints {
-    private final int[] values;
+    private final PackedInts.Reader values;
+    private final int minValue;
 
-    public IntsFromArray(int[] values) {
+    public IntsFromArray(PackedInts.Reader values, int minValue) {
+      assert values.getBitsPerValue() <= 32;
       this.values = values;
+      this.minValue = minValue;
     }
     
     @Override
     public int get(int docID) {
-      return values[docID];
+      final long delta = values.get(docID);
+      return minValue + (int) delta;
     }
   }
 
@@ -597,6 +413,15 @@
     }
   }
 
+  private static class GrowableWriterAndMinValue {
+    GrowableWriterAndMinValue(GrowableWriter array, long minValue) {
+      this.writer = array;
+      this.minValue = minValue;
+    }
+    public GrowableWriter writer;
+    public long minValue;
+  }
+
   static final class IntCache extends Cache {
     IntCache(FieldCacheImpl wrapper) {
       super(wrapper);
@@ -609,22 +434,17 @@
       final IntParser parser = (IntParser) key.custom;
       if (parser == null) {
         // Confusing: must delegate to wrapper (vs simply
-        // setting parser =
-        // DEFAULT_INT_PARSER/NUMERIC_UTILS_INT_PARSER) so
-        // cache key includes
-        // DEFAULT_INT_PARSER/NUMERIC_UTILS_INT_PARSER:
-        try {
-          return wrapper.getInts(reader, key.field, DEFAULT_INT_PARSER, setDocsWithField);
-        } catch (NumberFormatException ne) {
-          return wrapper.getInts(reader, key.field, NUMERIC_UTILS_INT_PARSER, setDocsWithField);
-        }
+        // setting parser = NUMERIC_UTILS_INT_PARSER) so
+        // cache key includes NUMERIC_UTILS_INT_PARSER:
+        return wrapper.getInts(reader, key.field, NUMERIC_UTILS_INT_PARSER, setDocsWithField);
       }
 
-      final HoldsOneThing<int[]> valuesRef = new HoldsOneThing<int[]>();
+      final HoldsOneThing<GrowableWriterAndMinValue> valuesRef = new HoldsOneThing<GrowableWriterAndMinValue>();
 
       Uninvert u = new Uninvert() {
+          private int minValue;
           private int currentValue;
-          private int[] values;
+          private GrowableWriter values;
 
           @Override
           public void visitTerm(BytesRef term) {
@@ -634,16 +454,28 @@
               // (which will hit a NumberFormatException
               // when we first try the DEFAULT_INT_PARSER),
               // we don't double-alloc:
-              values = new int[reader.maxDoc()];
-              valuesRef.set(values);
+              int startBitsPerValue;
+              // Make sure than missing values (0) can be stored without resizing
+              if (currentValue < 0) {
+                minValue = currentValue;
+                startBitsPerValue = PackedInts.bitsRequired((-minValue) & 0xFFFFFFFFL);
+              } else {
+                minValue = 0;
+                startBitsPerValue = PackedInts.bitsRequired(currentValue);
+              }
+              values = new GrowableWriter(startBitsPerValue, reader.maxDoc(), PackedInts.FAST);
+              if (minValue != 0) {
+                values.fill(0, values.size(), (-minValue) & 0xFFFFFFFFL); // default value must be 0
+              }
+              valuesRef.set(new GrowableWriterAndMinValue(values, minValue));
             }
           }
 
           @Override
           public void visitDoc(int docID) {
-            values[docID] = currentValue;
+            values.set(docID, (currentValue - minValue) & 0xFFFFFFFFL);
           }
-          
+
           @Override
           protected TermsEnum termsEnum(Terms terms) throws IOException {
             return parser.termsEnum(terms);
@@ -655,11 +487,11 @@
       if (setDocsWithField) {
         wrapper.setDocsWithField(reader, key.field, u.docsWithField);
       }
-      int[] values = valuesRef.get();
+      GrowableWriterAndMinValue values = valuesRef.get();
       if (values == null) {
-        values = new int[reader.maxDoc()];
+        return new IntsFromArray(new PackedInts.NullReader(reader.maxDoc()), 0);
       }
-      return new IntsFromArray(values);
+      return new IntsFromArray(values.writer.getMutable(), (int) values.minValue);
     }
   }
 
@@ -791,15 +623,9 @@
       final FloatParser parser = (FloatParser) key.custom;
       if (parser == null) {
         // Confusing: must delegate to wrapper (vs simply
-        // setting parser =
-        // DEFAULT_FLOAT_PARSER/NUMERIC_UTILS_FLOAT_PARSER) so
-        // cache key includes
-        // DEFAULT_FLOAT_PARSER/NUMERIC_UTILS_FLOAT_PARSER:
-        try {
-          return wrapper.getFloats(reader, key.field, DEFAULT_FLOAT_PARSER, setDocsWithField);
-        } catch (NumberFormatException ne) {
-          return wrapper.getFloats(reader, key.field, NUMERIC_UTILS_FLOAT_PARSER, setDocsWithField);
-        }
+        // setting parser = NUMERIC_UTILS_FLOAT_PARSER) so
+        // cache key includes NUMERIC_UTILS_FLOAT_PARSER:
+        return wrapper.getFloats(reader, key.field, NUMERIC_UTILS_FLOAT_PARSER, setDocsWithField);
       }
 
       final HoldsOneThing<float[]> valuesRef = new HoldsOneThing<float[]>();
@@ -878,15 +704,17 @@
   }
 
   static class LongsFromArray extends Longs {
-    private final long[] values;
+    private final PackedInts.Reader values;
+    private final long minValue;
 
-    public LongsFromArray(long[] values) {
+    public LongsFromArray(PackedInts.Reader values, long minValue) {
       this.values = values;
+      this.minValue = minValue;
     }
     
     @Override
     public long get(int docID) {
-      return values[docID];
+      return minValue + values.get(docID);
     }
   }
 
@@ -902,22 +730,17 @@
       final LongParser parser = (LongParser) key.custom;
       if (parser == null) {
         // Confusing: must delegate to wrapper (vs simply
-        // setting parser =
-        // DEFAULT_LONG_PARSER/NUMERIC_UTILS_LONG_PARSER) so
-        // cache key includes
-        // DEFAULT_LONG_PARSER/NUMERIC_UTILS_LONG_PARSER:
-        try {
-          return wrapper.getLongs(reader, key.field, DEFAULT_LONG_PARSER, setDocsWithField);
-        } catch (NumberFormatException ne) {
-          return wrapper.getLongs(reader, key.field, NUMERIC_UTILS_LONG_PARSER, setDocsWithField);
-        }
+        // setting parser = NUMERIC_UTILS_LONG_PARSER) so
+        // cache key includes NUMERIC_UTILS_LONG_PARSER:
+        return wrapper.getLongs(reader, key.field, NUMERIC_UTILS_LONG_PARSER, setDocsWithField);
       }
 
-      final HoldsOneThing<long[]> valuesRef = new HoldsOneThing<long[]>();
+      final HoldsOneThing<GrowableWriterAndMinValue> valuesRef = new HoldsOneThing<GrowableWriterAndMinValue>();
 
       Uninvert u = new Uninvert() {
+          private long minValue;
           private long currentValue;
-          private long[] values;
+          private GrowableWriter values;
 
           @Override
           public void visitTerm(BytesRef term) {
@@ -927,14 +750,26 @@
               // (which will hit a NumberFormatException
               // when we first try the DEFAULT_INT_PARSER),
               // we don't double-alloc:
-              values = new long[reader.maxDoc()];
-              valuesRef.set(values);
+              int startBitsPerValue;
+              // Make sure than missing values (0) can be stored without resizing
+              if (currentValue < 0) {
+                minValue = currentValue;
+                startBitsPerValue = minValue == Long.MIN_VALUE ? 64 : PackedInts.bitsRequired(-minValue);
+              } else {
+                minValue = 0;
+                startBitsPerValue = PackedInts.bitsRequired(currentValue);
+              }
+              values = new GrowableWriter(startBitsPerValue, reader.maxDoc(), PackedInts.FAST);
+              if (minValue != 0) {
+                values.fill(0, values.size(), -minValue); // default value must be 0
+              }
+              valuesRef.set(new GrowableWriterAndMinValue(values, minValue));
             }
           }
 
           @Override
           public void visitDoc(int docID) {
-            values[docID] = currentValue;
+            values.set(docID, currentValue - minValue);
           }
           
           @Override
@@ -948,11 +783,11 @@
       if (setDocsWithField) {
         wrapper.setDocsWithField(reader, key.field, u.docsWithField);
       }
-      long[] values = valuesRef.get();
+      GrowableWriterAndMinValue values = valuesRef.get();
       if (values == null) {
-        values = new long[reader.maxDoc()];
+        return new LongsFromArray(new PackedInts.NullReader(reader.maxDoc()), 0L);
       }
-      return new LongsFromArray(values);
+      return new LongsFromArray(values.writer.getMutable(), values.minValue);
     }
   }
 
@@ -1013,15 +848,9 @@
       final DoubleParser parser = (DoubleParser) key.custom;
       if (parser == null) {
         // Confusing: must delegate to wrapper (vs simply
-        // setting parser =
-        // DEFAULT_DOUBLE_PARSER/NUMERIC_UTILS_DOUBLE_PARSER) so
-        // cache key includes
-        // DEFAULT_DOUBLE_PARSER/NUMERIC_UTILS_DOUBLE_PARSER:
-        try {
-          return wrapper.getDoubles(reader, key.field, DEFAULT_DOUBLE_PARSER, setDocsWithField);
-        } catch (NumberFormatException ne) {
-          return wrapper.getDoubles(reader, key.field, NUMERIC_UTILS_DOUBLE_PARSER, setDocsWithField);
-        }
+        // setting parser = NUMERIC_UTILS_DOUBLE_PARSER) so
+        // cache key includes NUMERIC_UTILS_DOUBLE_PARSER:
+        return wrapper.getDoubles(reader, key.field, NUMERIC_UTILS_DOUBLE_PARSER, setDocsWithField);
       }
 
       final HoldsOneThing<double[]> valuesRef = new HoldsOneThing<double[]>();
@@ -1069,11 +898,11 @@
 
   public static class SortedDocValuesImpl extends SortedDocValues {
     private final PagedBytes.Reader bytes;
-    private final PackedInts.Reader termOrdToBytesOffset;
+    private final MonotonicAppendingLongBuffer termOrdToBytesOffset;
     private final PackedInts.Reader docToTermOrd;
     private final int numOrd;
 
-    public SortedDocValuesImpl(PagedBytes.Reader bytes, PackedInts.Reader termOrdToBytesOffset, PackedInts.Reader docToTermOrd, int numOrd) {
+    public SortedDocValuesImpl(PagedBytes.Reader bytes, MonotonicAppendingLongBuffer termOrdToBytesOffset, PackedInts.Reader docToTermOrd, int numOrd) {
       this.bytes = bytes;
       this.docToTermOrd = docToTermOrd;
       this.termOrdToBytesOffset = termOrdToBytesOffset;
@@ -1144,9 +973,7 @@
 
       final PagedBytes bytes = new PagedBytes(15);
 
-      int startBytesBPV;
       int startTermsBPV;
-      int startNumUniqueTerms;
 
       final int termCountHardLimit;
       if (maxDoc == Integer.MAX_VALUE) {
@@ -1169,22 +996,15 @@
             numUniqueTerms = termCountHardLimit;
           }
 
-          startBytesBPV = PackedInts.bitsRequired(numUniqueTerms*4);
           startTermsBPV = PackedInts.bitsRequired(numUniqueTerms);
-
-          startNumUniqueTerms = (int) numUniqueTerms;
         } else {
-          startBytesBPV = 1;
           startTermsBPV = 1;
-          startNumUniqueTerms = 1;
         }
       } else {
-        startBytesBPV = 1;
         startTermsBPV = 1;
-        startNumUniqueTerms = 1;
       }
 
-      GrowableWriter termOrdToBytesOffset = new GrowableWriter(startBytesBPV, 1+startNumUniqueTerms, acceptableOverheadRatio);
+      MonotonicAppendingLongBuffer termOrdToBytesOffset = new MonotonicAppendingLongBuffer();
       final GrowableWriter docToTermOrd = new GrowableWriter(startTermsBPV, maxDoc, acceptableOverheadRatio);
 
       int termOrd = 0;
@@ -1204,13 +1024,7 @@
             break;
           }
 
-          if (termOrd == termOrdToBytesOffset.size()) {
-            // NOTE: this code only runs if the incoming
-            // reader impl doesn't implement
-            // size (which should be uncommon)
-            termOrdToBytesOffset = termOrdToBytesOffset.resize(ArrayUtil.oversize(1+termOrd, 1));
-          }
-          termOrdToBytesOffset.set(termOrd, bytes.copyUsingLengthPrefix(term));
+          termOrdToBytesOffset.add(bytes.copyUsingLengthPrefix(term));
           docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
           while (true) {
             final int docID = docs.nextDoc();
@@ -1222,14 +1036,10 @@
           }
           termOrd++;
         }
-
-        if (termOrdToBytesOffset.size() > termOrd) {
-          termOrdToBytesOffset = termOrdToBytesOffset.resize(termOrd);
-        }
       }
 
       // maybe an int-only impl?
-      return new SortedDocValuesImpl(bytes.freeze(true), termOrdToBytesOffset.getMutable(), docToTermOrd.getMutable(), termOrd);
+      return new SortedDocValuesImpl(bytes.freeze(true), termOrdToBytesOffset, docToTermOrd.getMutable(), termOrd);
     }
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java b/lucene/core/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java
index 475a649..c262496 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java
@@ -193,109 +193,7 @@
       }
     };
   }
-  
-  /**
-   * Creates a numeric range filter using {@link FieldCache#getBytes(AtomicReader,String,boolean)}. This works with all
-   * byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
-   * of the values to <code>null</code>.
-   */
-  public static FieldCacheRangeFilter<Byte> newByteRange(String field, Byte lowerVal, Byte upperVal, boolean includeLower, boolean includeUpper) {
-    return newByteRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
-  }
-  
-  /**
-   * Creates a numeric range filter using {@link FieldCache#getBytes(AtomicReader,String,FieldCache.ByteParser,boolean)}. This works with all
-   * byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
-   * of the values to <code>null</code>.
-   */
-  public static FieldCacheRangeFilter<Byte> newByteRange(String field, FieldCache.ByteParser parser, Byte lowerVal, Byte upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter<Byte>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
-      @Override
-      public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
-        final byte inclusiveLowerPoint, inclusiveUpperPoint;
-        if (lowerVal != null) {
-          final byte i = lowerVal.byteValue();
-          if (!includeLower && i == Byte.MAX_VALUE)
-            return null;
-          inclusiveLowerPoint = (byte) (includeLower ?  i : (i + 1));
-        } else {
-          inclusiveLowerPoint = Byte.MIN_VALUE;
-        }
-        if (upperVal != null) {
-          final byte i = upperVal.byteValue();
-          if (!includeUpper && i == Byte.MIN_VALUE)
-            return null;
-          inclusiveUpperPoint = (byte) (includeUpper ? i : (i - 1));
-        } else {
-          inclusiveUpperPoint = Byte.MAX_VALUE;
-        }
-        
-        if (inclusiveLowerPoint > inclusiveUpperPoint)
-          return null;
-        
-        final FieldCache.Bytes values = FieldCache.DEFAULT.getBytes(context.reader(), field, (FieldCache.ByteParser) parser, false);
-        return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
-          @Override
-          protected boolean matchDoc(int doc) {
-            final byte value = values.get(doc);
-            return value >= inclusiveLowerPoint && value <= inclusiveUpperPoint;
-          }
-        };
-      }
-    };
-  }
-  
-  /**
-   * Creates a numeric range filter using {@link FieldCache#getShorts(AtomicReader,String,boolean)}. This works with all
-   * short fields containing exactly one numeric term in the field. The range can be half-open by setting one
-   * of the values to <code>null</code>.
-   */
-  public static FieldCacheRangeFilter<Short> newShortRange(String field, Short lowerVal, Short upperVal, boolean includeLower, boolean includeUpper) {
-    return newShortRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
-  }
-  
-  /**
-   * Creates a numeric range filter using {@link FieldCache#getShorts(AtomicReader,String,FieldCache.ShortParser,boolean)}. This works with all
-   * short fields containing exactly one numeric term in the field. The range can be half-open by setting one
-   * of the values to <code>null</code>.
-   */
-  public static FieldCacheRangeFilter<Short> newShortRange(String field, FieldCache.ShortParser parser, Short lowerVal, Short upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter<Short>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
-      @Override
-      public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
-        final short inclusiveLowerPoint, inclusiveUpperPoint;
-        if (lowerVal != null) {
-          short i = lowerVal.shortValue();
-          if (!includeLower && i == Short.MAX_VALUE)
-            return null;
-          inclusiveLowerPoint = (short) (includeLower ? i : (i + 1));
-        } else {
-          inclusiveLowerPoint = Short.MIN_VALUE;
-        }
-        if (upperVal != null) {
-          short i = upperVal.shortValue();
-          if (!includeUpper && i == Short.MIN_VALUE)
-            return null;
-          inclusiveUpperPoint = (short) (includeUpper ? i : (i - 1));
-        } else {
-          inclusiveUpperPoint = Short.MAX_VALUE;
-        }
-        
-        if (inclusiveLowerPoint > inclusiveUpperPoint)
-          return null;
-        
-        final FieldCache.Shorts values = FieldCache.DEFAULT.getShorts(context.reader(), field, (FieldCache.ShortParser) parser, false);
-        return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
-          @Override
-          protected boolean matchDoc(int doc) {
-            final short value = values.get(doc);
-            return value >= inclusiveLowerPoint && value <= inclusiveUpperPoint;
-          }
-        };
-      }
-    };
-  }
-  
+
   /**
    * Creates a numeric range filter using {@link FieldCache#getInts(AtomicReader,String,boolean)}. This works with all
    * int fields containing exactly one numeric term in the field. The range can be half-open by setting one
@@ -521,7 +419,7 @@
   }
 
   @Override
-  @SuppressWarnings({"unchecked","rawtypes"})
+  @SuppressWarnings({"rawtypes"})
   public final boolean equals(Object o) {
     if (this == o) return true;
     if (!(o instanceof FieldCacheRangeFilter)) return false;
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java b/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java
index ee9668e..28e6144 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java
@@ -22,12 +22,10 @@
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.search.FieldCache.ByteParser;
 import org.apache.lucene.search.FieldCache.DoubleParser;
 import org.apache.lucene.search.FieldCache.FloatParser;
 import org.apache.lucene.search.FieldCache.IntParser;
 import org.apache.lucene.search.FieldCache.LongParser;
-import org.apache.lucene.search.FieldCache.ShortParser;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 
@@ -218,78 +216,6 @@
     }
   }
 
-  /** Parses field's values as byte (using {@link
-   *  FieldCache#getBytes} and sorts by ascending value */
-  public static final class ByteComparator extends NumericComparator<Byte> {
-    private final byte[] values;
-    private final ByteParser parser;
-    private FieldCache.Bytes currentReaderValues;
-    private byte bottom;
-
-    ByteComparator(int numHits, String field, FieldCache.Parser parser, Byte missingValue) {
-      super(field, missingValue);
-      values = new byte[numHits];
-      this.parser = (ByteParser) parser;
-    }
-
-    @Override
-    public int compare(int slot1, int slot2) {
-      return Byte.compare(values[slot1], values[slot2]);
-    }
-
-    @Override
-    public int compareBottom(int doc) {
-      byte v2 = currentReaderValues.get(doc);
-      // Test for v2 == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
-        v2 = missingValue;
-      }
-
-      return Byte.compare(bottom, v2);
-    }
-
-    @Override
-    public void copy(int slot, int doc) {
-      byte v2 = currentReaderValues.get(doc);
-      // Test for v2 == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
-        v2 = missingValue;
-      }
-      values[slot] = v2;
-    }
-
-    @Override
-    public FieldComparator<Byte> setNextReader(AtomicReaderContext context) throws IOException {
-      // NOTE: must do this before calling super otherwise
-      // we compute the docsWithField Bits twice!
-      currentReaderValues = FieldCache.DEFAULT.getBytes(context.reader(), field, parser, missingValue != null);
-      return super.setNextReader(context);
-    }
-    
-    @Override
-    public void setBottom(final int bottom) {
-      this.bottom = values[bottom];
-    }
-
-    @Override
-    public Byte value(int slot) {
-      return Byte.valueOf(values[slot]);
-    }
-
-    @Override
-    public int compareDocToValue(int doc, Byte value) {
-      byte docValue = currentReaderValues.get(doc);
-      // Test for docValue == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && docValue == 0 && !docsWithField.get(doc)) {
-        docValue = missingValue;
-      }
-      return Byte.compare(docValue, value.byteValue());
-    }
-  }
-
   /** Parses field's values as double (using {@link
    *  FieldCache#getDoubles} and sorts by ascending value */
   public static final class DoubleComparator extends NumericComparator<Double> {
@@ -439,80 +365,6 @@
     }
   }
 
-  /** Parses field's values as short (using {@link
-   *  FieldCache#getShorts} and sorts by ascending value */
-  public static final class ShortComparator extends NumericComparator<Short> {
-    private final short[] values;
-    private final ShortParser parser;
-    private FieldCache.Shorts currentReaderValues;
-    private short bottom;
-
-    ShortComparator(int numHits, String field, FieldCache.Parser parser, Short missingValue) {
-      super(field, missingValue);
-      values = new short[numHits];
-      this.parser = (ShortParser) parser;
-    }
-
-    @Override
-    public int compare(int slot1, int slot2) {
-      return Short.compare(values[slot1], values[slot2]);
-    }
-
-    @Override
-    public int compareBottom(int doc) {
-      short v2 = currentReaderValues.get(doc);
-      // Test for v2 == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
-        v2 = missingValue;
-      }
-
-      return Short.compare(bottom, v2);
-    }
-
-    @Override
-    public void copy(int slot, int doc) {
-      short v2 = currentReaderValues.get(doc);
-      // Test for v2 == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
-        v2 = missingValue;
-      }
-
-      values[slot] = v2;
-    }
-
-    @Override
-    public FieldComparator<Short> setNextReader(AtomicReaderContext context) throws IOException {
-      // NOTE: must do this before calling super otherwise
-      // we compute the docsWithField Bits twice!
-      currentReaderValues = FieldCache.DEFAULT.getShorts(context.reader(), field, parser, missingValue != null);
-      return super.setNextReader(context);
-    }
-
-    @Override
-    public void setBottom(final int bottom) {
-      this.bottom = values[bottom];
-    }
-
-    @Override
-    public Short value(int slot) {
-      return Short.valueOf(values[slot]);
-    }
-
-    @Override
-    public int compareDocToValue(int doc, Short valueObj) {
-      final short value = valueObj.shortValue();
-      short docValue = currentReaderValues.get(doc);
-      // Test for docValue == 0 to save Bits.get method call for
-      // the common case (doc has value and value is non-zero):
-      if (docsWithField != null && docValue == 0 && !docsWithField.get(doc)) {
-        docValue = missingValue;
-      }
-      return Short.compare(docValue, value);
-    }
-  }
-
   /** Parses field's values as int (using {@link
    *  FieldCache#getInts} and sorts by ascending value */
   public static final class IntComparator extends NumericComparator<Integer> {
diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
index ce446a8..92837ec 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
@@ -31,7 +31,7 @@
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.similarities.Similarity.SloppySimScorer;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Bits;
@@ -245,14 +245,14 @@
       }
 
       if (slop == 0) {
-        ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity.exactSimScorer(stats, context));
+        ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity.simScorer(stats, context));
         if (s.noDocs) {
           return null;
         } else {
           return s;
         }
       } else {
-        return new SloppyPhraseScorer(this, postingsFreqs, slop, similarity.sloppySimScorer(stats, context));
+        return new SloppyPhraseScorer(this, postingsFreqs, slop, similarity.simScorer(stats, context));
       }
     }
 
@@ -263,7 +263,7 @@
         int newDoc = scorer.advance(doc);
         if (newDoc == doc) {
           float freq = slop == 0 ? scorer.freq() : ((SloppyPhraseScorer)scorer).sloppyFreq();
-          SloppySimScorer docScorer = similarity.sloppySimScorer(stats, context);
+          SimScorer docScorer = similarity.simScorer(stats, context);
           ComplexExplanation result = new ComplexExplanation();
           result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
           Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
diff --git a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
index 0911af4..b48a1dc 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
@@ -33,7 +33,7 @@
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.similarities.Similarity.SloppySimScorer;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Bits;
@@ -282,7 +282,7 @@
       }
 
       if (slop == 0) {  // optimize exact case
-        ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity.exactSimScorer(stats, context));
+        ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity.simScorer(stats, context));
         if (s.noDocs) {
           return null;
         } else {
@@ -290,7 +290,7 @@
         }
       } else {
         return
-          new SloppyPhraseScorer(this, postingsFreqs, slop, similarity.sloppySimScorer(stats, context));
+          new SloppyPhraseScorer(this, postingsFreqs, slop, similarity.simScorer(stats, context));
       }
     }
     
@@ -306,7 +306,7 @@
         int newDoc = scorer.advance(doc);
         if (newDoc == doc) {
           float freq = slop == 0 ? scorer.freq() : ((SloppyPhraseScorer)scorer).sloppyFreq();
-          SloppySimScorer docScorer = similarity.sloppySimScorer(stats, context);
+          SimScorer docScorer = similarity.simScorer(stats, context);
           ComplexExplanation result = new ComplexExplanation();
           result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
           Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
diff --git a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
index a4ad72a..0667d8b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
@@ -34,7 +34,7 @@
 
   private float sloppyFreq; //phrase frequency in current doc as computed by phraseFreq().
 
-  private final Similarity.SloppySimScorer docScorer;
+  private final Similarity.SimScorer docScorer;
   
   private final int slop;
   private final int numPostings;
@@ -52,7 +52,7 @@
   private final long cost;
   
   SloppyPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
-      int slop, Similarity.SloppySimScorer docScorer) {
+      int slop, Similarity.SimScorer docScorer) {
     super(weight);
     this.docScorer = docScorer;
     this.slop = slop;
diff --git a/lucene/core/src/java/org/apache/lucene/search/SortField.java b/lucene/core/src/java/org/apache/lucene/search/SortField.java
index d2251d1..f7001bf 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SortField.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SortField.java
@@ -71,18 +71,10 @@
      * lower values are at the front. */
     DOUBLE,
 
-    /** Sort using term values as encoded Shorts.  Sort values are Short and
-     * lower values are at the front. */
-    SHORT,
-
     /** Sort using a custom Comparator.  Sort values are any Comparable and
      * sorting is done according to natural order. */
     CUSTOM,
 
-    /** Sort using term values as encoded Bytes.  Sort values are Byte and
-     * lower values are at the front. */
-    BYTE,
-
     /** Sort using term values as Strings, but comparing by
      * value (using String.compareTo) for all comparisons.
      * This is typically slower than {@link #STRING}, which
@@ -164,8 +156,6 @@
   public SortField(String field, FieldCache.Parser parser, boolean reverse) {
     if (parser instanceof FieldCache.IntParser) initFieldType(field, Type.INT);
     else if (parser instanceof FieldCache.FloatParser) initFieldType(field, Type.FLOAT);
-    else if (parser instanceof FieldCache.ShortParser) initFieldType(field, Type.SHORT);
-    else if (parser instanceof FieldCache.ByteParser) initFieldType(field, Type.BYTE);
     else if (parser instanceof FieldCache.LongParser) initFieldType(field, Type.LONG);
     else if (parser instanceof FieldCache.DoubleParser) initFieldType(field, Type.DOUBLE);
     else {
@@ -177,7 +167,7 @@
   }
   
   public SortField setMissingValue(Object missingValue) {
-    if (type != Type.BYTE && type != Type.SHORT && type != Type.INT && type != Type.FLOAT && type != Type.LONG && type != Type.DOUBLE) {
+    if (type != Type.INT && type != Type.FLOAT && type != Type.LONG && type != Type.DOUBLE) {
       throw new IllegalArgumentException( "Missing value only works for numeric types" );
     }
     this.missingValue = missingValue;
@@ -274,14 +264,6 @@
         buffer.append("<string_val" + ": \"").append(field).append("\">");
         break;
 
-      case BYTE:
-        buffer.append("<byte: \"").append(field).append("\">");
-        break;
-
-      case SHORT:
-        buffer.append("<short: \"").append(field).append("\">");
-        break;
-
       case INT:
         buffer.append("<int" + ": \"").append(field).append("\">");
         break;
@@ -389,12 +371,6 @@
     case DOUBLE:
       return new FieldComparator.DoubleComparator(numHits, field, parser, (Double) missingValue);
 
-    case BYTE:
-      return new FieldComparator.ByteComparator(numHits, field, parser, (Byte) missingValue);
-
-    case SHORT:
-      return new FieldComparator.ShortComparator(numHits, field, parser, (Short) missingValue);
-
     case CUSTOM:
       assert comparatorSource != null;
       return comparatorSource.newComparator(field, numHits, sortPos, reverse);
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermQuery.java b/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
index fb5bfcc..099e90b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
@@ -29,7 +29,7 @@
 import org.apache.lucene.index.TermContext;
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.similarities.Similarity.ExactSimScorer;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.ToStringUtils;
@@ -84,7 +84,7 @@
       }
       DocsEnum docs = termsEnum.docs(acceptDocs, null);
       assert docs != null;
-      return new TermScorer(this, docs, similarity.exactSimScorer(stats, context));
+      return new TermScorer(this, docs, similarity.simScorer(stats, context));
     }
     
     /**
@@ -116,7 +116,7 @@
         int newDoc = scorer.advance(doc);
         if (newDoc == doc) {
           float freq = scorer.freq();
-          ExactSimScorer docScorer = similarity.exactSimScorer(stats, context);
+          SimScorer docScorer = similarity.simScorer(stats, context);
           ComplexExplanation result = new ComplexExplanation();
           result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
           Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "termFreq=" + freq));
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermScorer.java b/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
index 7623914..6697524 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
@@ -26,7 +26,7 @@
  */
 final class TermScorer extends Scorer {
   private final DocsEnum docsEnum;
-  private final Similarity.ExactSimScorer docScorer;
+  private final Similarity.SimScorer docScorer;
   
   /**
    * Construct a <code>TermScorer</code>.
@@ -36,10 +36,10 @@
    * @param td
    *          An iterator over the documents matching the <code>Term</code>.
    * @param docScorer
-   *          The </code>Similarity.ExactSimScorer</code> implementation 
+   *          The </code>Similarity.SimScorer</code> implementation 
    *          to be used for score computations.
    */
-  TermScorer(Weight weight, DocsEnum td, Similarity.ExactSimScorer docScorer) {
+  TermScorer(Weight weight, DocsEnum td, Similarity.SimScorer docScorer) {
     super(weight);
     this.docScorer = docScorer;
     this.docsEnum = td;
diff --git a/lucene/core/src/java/org/apache/lucene/search/package.html b/lucene/core/src/java/org/apache/lucene/search/package.html
index 53ebf87..4be5eba 100644
--- a/lucene/core/src/java/org/apache/lucene/search/package.html
+++ b/lucene/core/src/java/org/apache/lucene/search/package.html
@@ -441,9 +441,8 @@
                   explain(AtomicReaderContext context, int doc)} &mdash; Provide a means for explaining why a given document was
                 scored the way it was.
                 Typically a weight such as TermWeight
-                that scores via a {@link org.apache.lucene.search.similarities.Similarity Similarity} will make use of the Similarity's implementations:
-                {@link org.apache.lucene.search.similarities.Similarity.ExactSimScorer#explain(int, Explanation) ExactSimScorer#explain(int doc, Explanation freq)},
-                and {@link org.apache.lucene.search.similarities.Similarity.SloppySimScorer#explain(int, Explanation) SloppySimScorer#explain(int doc, Explanation freq)}
+                that scores via a {@link org.apache.lucene.search.similarities.Similarity Similarity} will make use of the Similarity's implementation:
+                {@link org.apache.lucene.search.similarities.Similarity.SimScorer#explain(int, Explanation) SimScorer#explain(int doc, Explanation freq)}.
                 </li>
              </li>
         </ol>
@@ -468,7 +467,7 @@
                 {@link org.apache.lucene.search.Scorer#score score()} &mdash; Return the score of the
                 current document. This value can be determined in any appropriate way for an application. For instance, the
                 {@link org.apache.lucene.search.TermScorer TermScorer} simply defers to the configured Similarity:
-                {@link org.apache.lucene.search.similarities.Similarity.ExactSimScorer#score(int, int) ExactSimScorer.score(int doc, int freq)}.
+                {@link org.apache.lucene.search.similarities.Similarity.SimScorer#score(int, float) SimScorer.score(int doc, float freq)}.
             </li>
             <li>
                 {@link org.apache.lucene.search.Scorer#freq freq()} &mdash; Returns the number of matches
diff --git a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
index ef2c6e5..31034ea 100644
--- a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
@@ -25,7 +25,7 @@
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.search.similarities.Similarity.SloppySimScorer;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.search.spans.NearSpansOrdered;
 import org.apache.lucene.search.spans.NearSpansUnordered;
 import org.apache.lucene.search.spans.SpanNearQuery;
@@ -53,7 +53,7 @@
  * <p/>
  * Payload scores are aggregated using a pluggable {@link PayloadFunction}.
  * 
- * @see org.apache.lucene.search.similarities.Similarity.SloppySimScorer#computePayloadFactor(int, int, int, BytesRef)
+ * @see org.apache.lucene.search.similarities.Similarity.SimScorer#computePayloadFactor(int, int, int, BytesRef)
  */
 public class PayloadNearQuery extends SpanNearQuery {
   protected String fieldName;
@@ -151,7 +151,7 @@
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
         boolean topScorer, Bits acceptDocs) throws IOException {
       return new PayloadNearSpanScorer(query.getSpans(context, acceptDocs, termContexts), this,
-          similarity, similarity.sloppySimScorer(stats, context));
+          similarity, similarity.simScorer(stats, context));
     }
     
     @Override
@@ -161,7 +161,7 @@
         int newDoc = scorer.advance(doc);
         if (newDoc == doc) {
           float freq = scorer.freq();
-          SloppySimScorer docScorer = similarity.sloppySimScorer(stats, context);
+          SimScorer docScorer = similarity.simScorer(stats, context);
           Explanation expl = new Explanation();
           expl.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
           Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
@@ -190,7 +190,7 @@
     private int payloadsSeen;
 
     protected PayloadNearSpanScorer(Spans spans, Weight weight,
-        Similarity similarity, Similarity.SloppySimScorer docScorer) throws IOException {
+        Similarity similarity, Similarity.SimScorer docScorer) throws IOException {
       super(spans, weight, docScorer);
       this.spans = spans;
     }
diff --git a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
index cab55df..b263999 100644
--- a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
@@ -27,7 +27,7 @@
 import org.apache.lucene.search.ComplexExplanation;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.search.similarities.Similarity.SloppySimScorer;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.TermSpans;
 import org.apache.lucene.search.spans.SpanTermQuery;
@@ -49,7 +49,7 @@
  * which returns 1 by default.
  * <p/>
  * Payload scores are aggregated using a pluggable {@link PayloadFunction}.
- * @see org.apache.lucene.search.similarities.Similarity.SloppySimScorer#computePayloadFactor(int, int, int, BytesRef)
+ * @see org.apache.lucene.search.similarities.Similarity.SimScorer#computePayloadFactor(int, int, int, BytesRef)
  **/
 public class PayloadTermQuery extends SpanTermQuery {
   protected PayloadFunction function;
@@ -82,7 +82,7 @@
     public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
         boolean topScorer, Bits acceptDocs) throws IOException {
       return new PayloadTermSpanScorer((TermSpans) query.getSpans(context, acceptDocs, termContexts),
-          this, similarity.sloppySimScorer(stats, context));
+          this, similarity.simScorer(stats, context));
     }
 
     protected class PayloadTermSpanScorer extends SpanScorer {
@@ -91,7 +91,7 @@
       protected int payloadsSeen;
       private final TermSpans termSpans;
 
-      public PayloadTermSpanScorer(TermSpans spans, Weight weight, Similarity.SloppySimScorer docScorer) throws IOException {
+      public PayloadTermSpanScorer(TermSpans spans, Weight weight, Similarity.SimScorer docScorer) throws IOException {
         super(spans, weight, docScorer);
         termSpans = spans;
       }
@@ -182,7 +182,7 @@
         int newDoc = scorer.advance(doc);
         if (newDoc == doc) {
           float freq = scorer.sloppyFreq();
-          SloppySimScorer docScorer = similarity.sloppySimScorer(stats, context);
+          SimScorer docScorer = similarity.simScorer(stats, context);
           Explanation expl = new Explanation();
           expl.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
           Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/BM25Similarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/BM25Similarity.java
index e612aa4..d062015 100644
--- a/lucene/core/src/java/org/apache/lucene/search/similarities/BM25Similarity.java
+++ b/lucene/core/src/java/org/apache/lucene/search/similarities/BM25Similarity.java
@@ -212,80 +212,18 @@
   }
 
   @Override
-  public final ExactSimScorer exactSimScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
+  public final SimScorer simScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
     BM25Stats bm25stats = (BM25Stats) stats;
-    final NumericDocValues norms = context.reader().getNormValues(bm25stats.field);
-    return norms == null 
-      ? new ExactBM25DocScorerNoNorms(bm25stats)
-      : new ExactBM25DocScorer(bm25stats, norms);
-  }
-
-  @Override
-  public final SloppySimScorer sloppySimScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
-    BM25Stats bm25stats = (BM25Stats) stats;
-    return new SloppyBM25DocScorer(bm25stats, context.reader().getNormValues(bm25stats.field));
+    return new BM25DocScorer(bm25stats, context.reader().getNormValues(bm25stats.field));
   }
   
-  private class ExactBM25DocScorer extends ExactSimScorer {
-    private final BM25Stats stats;
-    private final float weightValue;
-    private final NumericDocValues norms;
-    private final float[] cache;
-    
-    ExactBM25DocScorer(BM25Stats stats, NumericDocValues norms) throws IOException {
-      assert norms != null;
-      this.stats = stats;
-      this.weightValue = stats.weight * (k1 + 1); // boost * idf * (k1 + 1)
-      this.cache = stats.cache;
-      this.norms = norms;
-    }
-    
-    @Override
-    public float score(int doc, int freq) {
-      return weightValue * freq / (freq + cache[(byte)norms.get(doc) & 0xFF]);
-    }
-    
-    @Override
-    public Explanation explain(int doc, Explanation freq) {
-      return explainScore(doc, freq, stats, norms);
-    }
-  }
-  
-  /** there are no norms, we act as if b=0 */
-  private class ExactBM25DocScorerNoNorms extends ExactSimScorer {
-    private final BM25Stats stats;
-    private final float weightValue;
-    private static final int SCORE_CACHE_SIZE = 32;
-    private float[] scoreCache = new float[SCORE_CACHE_SIZE];
-
-    ExactBM25DocScorerNoNorms(BM25Stats stats) {
-      this.stats = stats;
-      this.weightValue = stats.weight * (k1 + 1); // boost * idf * (k1 + 1)
-      for (int i = 0; i < SCORE_CACHE_SIZE; i++)
-        scoreCache[i] = weightValue * i / (i + k1);
-    }
-    
-    @Override
-    public float score(int doc, int freq) {
-      // TODO: maybe score cache is more trouble than its worth?
-      return freq < SCORE_CACHE_SIZE        // check cache
-        ? scoreCache[freq]                  // cache hit
-        : weightValue * freq / (freq + k1); // cache miss
-    }
-    
-    @Override
-    public Explanation explain(int doc, Explanation freq) {
-      return explainScore(doc, freq, stats, null);
-    }
-  }
-  
-  private class SloppyBM25DocScorer extends SloppySimScorer {
+  private class BM25DocScorer extends SimScorer {
     private final BM25Stats stats;
     private final float weightValue; // boost * idf * (k1 + 1)
     private final NumericDocValues norms;
     private final float[] cache;
     
-    SloppyBM25DocScorer(BM25Stats stats, NumericDocValues norms) throws IOException {
+    BM25DocScorer(BM25Stats stats, NumericDocValues norms) throws IOException {
       this.stats = stats;
       this.weightValue = stats.weight * (k1 + 1);
       this.cache = stats.cache;
diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/DefaultSimilarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/DefaultSimilarity.java
index 355cafe..8f2b072 100644
--- a/lucene/core/src/java/org/apache/lucene/search/similarities/DefaultSimilarity.java
+++ b/lucene/core/src/java/org/apache/lucene/search/similarities/DefaultSimilarity.java
@@ -19,10 +19,40 @@
 
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.SmallFloat;
 
-/** Expert: Default scoring implementation. */
+/**
+ * Expert: Default scoring implementation which {@link #encodeNormValue(float)
+ * encodes} norm values as a single byte before being stored. At search time,
+ * the norm byte value is read from the index
+ * {@link org.apache.lucene.store.Directory directory} and
+ * {@link #decodeNormValue(long) decoded} back to a float <i>norm</i> value.
+ * This encoding/decoding, while reducing index size, comes with the price of
+ * precision loss - it is not guaranteed that <i>decode(encode(x)) = x</i>. For
+ * instance, <i>decode(encode(0.89)) = 0.75</i>.
+ * <p>
+ * Compression of norm values to a single byte saves memory at search time,
+ * because once a field is referenced at search time, its norms - for all
+ * documents - are maintained in memory.
+ * <p>
+ * The rationale supporting such lossy compression of norm values is that given
+ * the difficulty (and inaccuracy) of users to express their true information
+ * need by a query, only big differences matter. <br>
+ * &nbsp;<br>
+ * Last, note that search time is too late to modify this <i>norm</i> part of
+ * scoring, e.g. by using a different {@link Similarity} for search.
+ */
 public class DefaultSimilarity extends TFIDFSimilarity {
   
+  /** Cache of decoded bytes. */
+  private static final float[] NORM_TABLE = new float[256];
+
+  static {
+    for (int i = 0; i < 256; i++) {
+      NORM_TABLE[i] = SmallFloat.byte315ToFloat((byte)i);
+    }
+  }
+
   /** Sole constructor: parameter-free */
   public DefaultSimilarity() {}
   
@@ -38,6 +68,35 @@
     return (float)(1.0 / Math.sqrt(sumOfSquaredWeights));
   }
   
+  /**
+   * Encodes a normalization factor for storage in an index.
+   * <p>
+   * The encoding uses a three-bit mantissa, a five-bit exponent, and the
+   * zero-exponent point at 15, thus representing values from around 7x10^9 to
+   * 2x10^-9 with about one significant decimal digit of accuracy. Zero is also
+   * represented. Negative numbers are rounded up to zero. Values too large to
+   * represent are rounded down to the largest representable value. Positive
+   * values too small to represent are rounded up to the smallest positive
+   * representable value.
+   * 
+   * @see org.apache.lucene.document.Field#setBoost(float)
+   * @see org.apache.lucene.util.SmallFloat
+   */
+  @Override
+  public final long encodeNormValue(float f) {
+    return SmallFloat.floatToByte315(f);
+  }
+
+  /**
+   * Decodes the norm value, assuming it is a single byte.
+   * 
+   * @see #encodeNormValue(float)
+   */
+  @Override
+  public final float decodeNormValue(long norm) {
+    return NORM_TABLE[(int) (norm & 0xFF)];  // & 0xFF maps negative bytes to positive above 127
+  }
+
   /** Implemented as
    *  <code>state.getBoost()*lengthNorm(numTerms)</code>, where
    *  <code>numTerms</code> is {@link FieldInvertState#getLength()} if {@link
diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/MultiSimilarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/MultiSimilarity.java
index 28c6d80..507c568 100644
--- a/lucene/core/src/java/org/apache/lucene/search/similarities/MultiSimilarity.java
+++ b/lucene/core/src/java/org/apache/lucene/search/similarities/MultiSimilarity.java
@@ -57,60 +57,25 @@
   }
 
   @Override
-  public ExactSimScorer exactSimScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
-    ExactSimScorer subScorers[] = new ExactSimScorer[sims.length];
+  public SimScorer simScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
+    SimScorer subScorers[] = new SimScorer[sims.length];
     for (int i = 0; i < subScorers.length; i++) {
-      subScorers[i] = sims[i].exactSimScorer(((MultiStats)stats).subStats[i], context);
+      subScorers[i] = sims[i].simScorer(((MultiStats)stats).subStats[i], context);
     }
-    return new MultiExactDocScorer(subScorers);
-  }
-
-  @Override
-  public SloppySimScorer sloppySimScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
-    SloppySimScorer subScorers[] = new SloppySimScorer[sims.length];
-    for (int i = 0; i < subScorers.length; i++) {
-      subScorers[i] = sims[i].sloppySimScorer(((MultiStats)stats).subStats[i], context);
-    }
-    return new MultiSloppyDocScorer(subScorers);
+    return new MultiSimScorer(subScorers);
   }
   
-  static class MultiExactDocScorer extends ExactSimScorer {
-    private final ExactSimScorer subScorers[];
+  static class MultiSimScorer extends SimScorer {
+    private final SimScorer subScorers[];
     
-    MultiExactDocScorer(ExactSimScorer subScorers[]) {
-      this.subScorers = subScorers;
-    }
-    
-    @Override
-    public float score(int doc, int freq) {
-      float sum = 0.0f;
-      for (ExactSimScorer subScorer : subScorers) {
-        sum += subScorer.score(doc, freq);
-      }
-      return sum;
-    }
-
-    @Override
-    public Explanation explain(int doc, Explanation freq) {
-      Explanation expl = new Explanation(score(doc, (int)freq.getValue()), "sum of:");
-      for (ExactSimScorer subScorer : subScorers) {
-        expl.addDetail(subScorer.explain(doc, freq));
-      }
-      return expl;
-    }
-  }
-  
-  static class MultiSloppyDocScorer extends SloppySimScorer {
-    private final SloppySimScorer subScorers[];
-    
-    MultiSloppyDocScorer(SloppySimScorer subScorers[]) {
+    MultiSimScorer(SimScorer subScorers[]) {
       this.subScorers = subScorers;
     }
     
     @Override
     public float score(int doc, float freq) {
       float sum = 0.0f;
-      for (SloppySimScorer subScorer : subScorers) {
+      for (SimScorer subScorer : subScorers) {
         sum += subScorer.score(doc, freq);
       }
       return sum;
@@ -119,7 +84,7 @@
     @Override
     public Explanation explain(int doc, Explanation freq) {
       Explanation expl = new Explanation(score(doc, freq.getValue()), "sum of:");
-      for (SloppySimScorer subScorer : subScorers) {
+      for (SimScorer subScorer : subScorers) {
         expl.addDetail(subScorer.explain(doc, freq));
       }
       return expl;
diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/PerFieldSimilarityWrapper.java b/lucene/core/src/java/org/apache/lucene/search/similarities/PerFieldSimilarityWrapper.java
index 7856be9..17a461e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/similarities/PerFieldSimilarityWrapper.java
+++ b/lucene/core/src/java/org/apache/lucene/search/similarities/PerFieldSimilarityWrapper.java
@@ -54,15 +54,9 @@
   }
 
   @Override
-  public final ExactSimScorer exactSimScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
+  public final SimScorer simScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
     PerFieldSimWeight perFieldWeight = (PerFieldSimWeight) weight;
-    return perFieldWeight.delegate.exactSimScorer(perFieldWeight.delegateWeight, context);
-  }
-
-  @Override
-  public final SloppySimScorer sloppySimScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
-    PerFieldSimWeight perFieldWeight = (PerFieldSimWeight) weight;
-    return perFieldWeight.delegate.sloppySimScorer(perFieldWeight.delegateWeight, context);
+    return perFieldWeight.delegate.simScorer(perFieldWeight.delegateWeight, context);
   }
   
   /** 
diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
index 16435e5..b4ff8bb 100644
--- a/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
+++ b/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
@@ -88,10 +88,8 @@
  *       is called for each query leaf node, {@link Similarity#queryNorm(float)} is called for the top-level
  *       query, and finally {@link Similarity.SimWeight#normalize(float, float)} passes down the normalization value
  *       and any top-level boosts (e.g. from enclosing {@link BooleanQuery}s).
- *   <li>For each segment in the index, the Query creates a {@link #exactSimScorer(SimWeight, AtomicReaderContext)}
- *       (for queries with exact frequencies such as TermQuerys and exact PhraseQueries) or a 
- *       {@link #sloppySimScorer(SimWeight, AtomicReaderContext)} (for queries with sloppy frequencies such as
- *       SpanQuerys and sloppy PhraseQueries). The score() method is called for each matching document.
+ *   <li>For each segment in the index, the Query creates a {@link #simScorer(SimWeight, AtomicReaderContext)}
+ *       The score() method is called for each matching document.
  * </ol>
  * <p>
  * <a name="explaintime"/>
@@ -166,76 +164,31 @@
    * @return SimWeight object with the information this Similarity needs to score a query.
    */
   public abstract SimWeight computeWeight(float queryBoost, CollectionStatistics collectionStats, TermStatistics... termStats);
-  
+
   /**
-   * Creates a new {@link Similarity.ExactSimScorer} to score matching documents from a segment of the inverted index.
-   * @param weight collection information from {@link #computeWeight(float, CollectionStatistics, TermStatistics...)}
-   * @param context segment of the inverted index to be scored.
-   * @return ExactSimScorer for scoring documents across <code>context</code>
-   * @throws IOException if there is a low-level I/O error
-   */
-  public abstract ExactSimScorer exactSimScorer(SimWeight weight, AtomicReaderContext context) throws IOException;
-  
-  /**
-   * Creates a new {@link Similarity.SloppySimScorer} to score matching documents from a segment of the inverted index.
+   * Creates a new {@link Similarity.SimScorer} to score matching documents from a segment of the inverted index.
    * @param weight collection information from {@link #computeWeight(float, CollectionStatistics, TermStatistics...)}
    * @param context segment of the inverted index to be scored.
    * @return SloppySimScorer for scoring documents across <code>context</code>
    * @throws IOException if there is a low-level I/O error
    */
-  public abstract SloppySimScorer sloppySimScorer(SimWeight weight, AtomicReaderContext context) throws IOException;
+  public abstract SimScorer simScorer(SimWeight weight, AtomicReaderContext context) throws IOException;
   
   /**
-   * API for scoring exact queries such as {@link TermQuery} and 
-   * exact {@link PhraseQuery}.
-   * <p>
-   * Frequencies are integers (the term or phrase frequency within the document)
-   */
-  public static abstract class ExactSimScorer {
-    
-    /**
-     * Sole constructor. (For invocation by subclass 
-     * constructors, typically implicit.)
-     */
-    public ExactSimScorer() {}
-
-    /**
-     * Score a single document
-     * @param doc document id
-     * @param freq term frequency
-     * @return document's score
-     */
-    public abstract float score(int doc, int freq);
-    
-    /**
-     * Explain the score for a single document
-     * @param doc document id
-     * @param freq Explanation of how the term frequency was computed
-     * @return document's score
-     */
-    public Explanation explain(int doc, Explanation freq) {
-      Explanation result = new Explanation(score(doc, (int)freq.getValue()), 
-          "score(doc=" + doc + ",freq=" + freq.getValue() +"), with freq of:");
-      result.addDetail(freq);
-      return result;
-    }
-  }
-  
-  /**
-   * API for scoring "sloppy" queries such as {@link SpanQuery} and 
-   * sloppy {@link PhraseQuery}.
+   * API for scoring "sloppy" queries such as {@link TermQuery},
+   * {@link SpanQuery}, and {@link PhraseQuery}.
    * <p>
    * Frequencies are floating-point values: an approximate 
    * within-document frequency adjusted for "sloppiness" by 
-   * {@link SloppySimScorer#computeSlopFactor(int)}.
+   * {@link SimScorer#computeSlopFactor(int)}.
    */
-  public static abstract class SloppySimScorer {
+  public static abstract class SimScorer {
     
     /**
      * Sole constructor. (For invocation by subclass 
      * constructors, typically implicit.)
      */
-    public SloppySimScorer() {}
+    public SimScorer() {}
 
     /**
      * Score a single document
diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/SimilarityBase.java b/lucene/core/src/java/org/apache/lucene/search/similarities/SimilarityBase.java
index 4f4f678..c1ccff4 100644
--- a/lucene/core/src/java/org/apache/lucene/search/similarities/SimilarityBase.java
+++ b/lucene/core/src/java/org/apache/lucene/search/similarities/SimilarityBase.java
@@ -190,38 +190,20 @@
   }
   
   @Override
-  public ExactSimScorer exactSimScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
+  public SimScorer simScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
     if (stats instanceof MultiSimilarity.MultiStats) {
       // a multi term query (e.g. phrase). return the summation, 
       // scoring almost as if it were boolean query
       SimWeight subStats[] = ((MultiSimilarity.MultiStats) stats).subStats;
-      ExactSimScorer subScorers[] = new ExactSimScorer[subStats.length];
+      SimScorer subScorers[] = new SimScorer[subStats.length];
       for (int i = 0; i < subScorers.length; i++) {
         BasicStats basicstats = (BasicStats) subStats[i];
-        subScorers[i] = new BasicExactDocScorer(basicstats, context.reader().getNormValues(basicstats.field));
+        subScorers[i] = new BasicSimScorer(basicstats, context.reader().getNormValues(basicstats.field));
       }
-      return new MultiSimilarity.MultiExactDocScorer(subScorers);
+      return new MultiSimilarity.MultiSimScorer(subScorers);
     } else {
       BasicStats basicstats = (BasicStats) stats;
-      return new BasicExactDocScorer(basicstats, context.reader().getNormValues(basicstats.field));
-    }
-  }
-  
-  @Override
-  public SloppySimScorer sloppySimScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
-    if (stats instanceof MultiSimilarity.MultiStats) {
-      // a multi term query (e.g. phrase). return the summation, 
-      // scoring almost as if it were boolean query
-      SimWeight subStats[] = ((MultiSimilarity.MultiStats) stats).subStats;
-      SloppySimScorer subScorers[] = new SloppySimScorer[subStats.length];
-      for (int i = 0; i < subScorers.length; i++) {
-        BasicStats basicstats = (BasicStats) subStats[i];
-        subScorers[i] = new BasicSloppyDocScorer(basicstats, context.reader().getNormValues(basicstats.field));
-      }
-      return new MultiSimilarity.MultiSloppyDocScorer(subScorers);
-    } else {
-      BasicStats basicstats = (BasicStats) stats;
-      return new BasicSloppyDocScorer(basicstats, context.reader().getNormValues(basicstats.field));
+      return new BasicSimScorer(basicstats, context.reader().getNormValues(basicstats.field));
     }
   }
   
@@ -277,46 +259,17 @@
   
   // --------------------------------- Classes ---------------------------------
   
-  /** Delegates the {@link #score(int, int)} and
-   * {@link #explain(int, Explanation)} methods to
-   * {@link SimilarityBase#score(BasicStats, float, float)} and
-   * {@link SimilarityBase#explain(BasicStats, int, Explanation, float)},
-   * respectively.
-   */
-  private class BasicExactDocScorer extends ExactSimScorer {
-    private final BasicStats stats;
-    private final NumericDocValues norms;
-    
-    BasicExactDocScorer(BasicStats stats, NumericDocValues norms) throws IOException {
-      this.stats = stats;
-      this.norms = norms;
-    }
-    
-    @Override
-    public float score(int doc, int freq) {
-      // We have to supply something in case norms are omitted
-      return SimilarityBase.this.score(stats, freq,
-          norms == null ? 1F : decodeNormValue((byte)norms.get(doc)));
-    }
-    
-    @Override
-    public Explanation explain(int doc, Explanation freq) {
-      return SimilarityBase.this.explain(stats, doc, freq,
-          norms == null ? 1F : decodeNormValue((byte)norms.get(doc)));
-    }
-  }
-  
   /** Delegates the {@link #score(int, float)} and
    * {@link #explain(int, Explanation)} methods to
    * {@link SimilarityBase#score(BasicStats, float, float)} and
    * {@link SimilarityBase#explain(BasicStats, int, Explanation, float)},
    * respectively.
    */
-  private class BasicSloppyDocScorer extends SloppySimScorer {
+  private class BasicSimScorer extends SimScorer {
     private final BasicStats stats;
     private final NumericDocValues norms;
     
-    BasicSloppyDocScorer(BasicStats stats, NumericDocValues norms) throws IOException {
+    BasicSimScorer(BasicStats stats, NumericDocValues norms) throws IOException {
       this.stats = stats;
       this.norms = norms;
     }
diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/TFIDFSimilarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/TFIDFSimilarity.java
index 1a61477..1d5c9d3 100644
--- a/lucene/core/src/java/org/apache/lucene/search/similarities/TFIDFSimilarity.java
+++ b/lucene/core/src/java/org/apache/lucene/search/similarities/TFIDFSimilarity.java
@@ -28,7 +28,6 @@
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.TermStatistics;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.SmallFloat;
 
 
 /**
@@ -496,27 +495,8 @@
  *          <td></td>
  *        </tr>
  *      </table>
- *      <br>&nbsp;<br>
- *      However the resulted <i>norm</i> value is {@link #encodeNormValue(float) encoded} as a single byte
- *      before being stored.
- *      At search time, the norm byte value is read from the index
- *      {@link org.apache.lucene.store.Directory directory} and
- *      {@link #decodeNormValue(byte) decoded} back to a float <i>norm</i> value.
- *      This encoding/decoding, while reducing index size, comes with the price of
- *      precision loss - it is not guaranteed that <i>decode(encode(x)) = x</i>.
- *      For instance, <i>decode(encode(0.89)) = 0.75</i>.
- *      <br>&nbsp;<br>
- *      Compression of norm values to a single byte saves memory at search time, 
- *      because once a field is referenced at search time, its norms - for 
- *      all documents - are maintained in memory.
- *      <br>&nbsp;<br>
- *      The rationale supporting such lossy compression of norm values is that
- *      given the difficulty (and inaccuracy) of users to express their true information
- *      need by a query, only big differences matter.
- *      <br>&nbsp;<br>
- *      Last, note that search time is too late to modify this <i>norm</i> part of scoring, e.g. by
- *      using a different {@link Similarity} for search.
- *      <br>&nbsp;<br>
+ *      Note that search time is too late to modify this <i>norm</i> part of scoring, 
+ *      e.g. by using a different {@link Similarity} for search.
  *    </li>
  * </ol>
  *
@@ -572,25 +552,6 @@
    * when <code>freq</code> is large, and smaller values when <code>freq</code>
    * is small.
    *
-   * <p>The default implementation calls {@link #tf(float)}.
-   *
-   * @param freq the frequency of a term within a document
-   * @return a score factor based on a term's within-document frequency
-   */
-  public float tf(int freq) {
-    return tf((float)freq);
-  }
-
-  /** Computes a score factor based on a term or phrase's frequency in a
-   * document.  This value is multiplied by the {@link #idf(long, long)}
-   * factor for each term in the query and these products are then summed to
-   * form the initial score for a document.
-   *
-   * <p>Terms and phrases repeated in a document indicate the topic of the
-   * document, so implementations of this method usually return larger values
-   * when <code>freq</code> is large, and smaller values when <code>freq</code>
-   * is small.
-   *
    * @param freq the frequency of a term within a document
    * @return a score factor based on a term's within-document frequency
    */
@@ -655,7 +616,7 @@
 
   /** Computes a score factor based on a term's document frequency (the number
    * of documents which contain the term).  This value is multiplied by the
-   * {@link #tf(int)} factor for each term in the query and these products are
+   * {@link #tf(float)} factor for each term in the query and these products are
    * then summed to form the initial score for a document.
    *
    * <p>Terms that occur in fewer documents are better indicators of topic, so
@@ -685,38 +646,15 @@
     return encodeNormValue(normValue);
   }
   
-  /** Cache of decoded bytes. */
-  private static final float[] NORM_TABLE = new float[256];
-
-  static {
-    for (int i = 0; i < 256; i++) {
-      NORM_TABLE[i] = SmallFloat.byte315ToFloat((byte)i);
-    }
-  }
-
-  /** Decodes a normalization factor stored in an index.
+  /**
+   * Decodes a normalization factor stored in an index.
+   * 
    * @see #encodeNormValue(float)
    */
-  public float decodeNormValue(byte b) {
-    return NORM_TABLE[b & 0xFF];  // & 0xFF maps negative bytes to positive above 127
-  }
+  public abstract float decodeNormValue(long norm);
 
-  /** Encodes a normalization factor for storage in an index.
-  *
-  * <p>The encoding uses a three-bit mantissa, a five-bit exponent, and
-  * the zero-exponent point at 15, thus
-  * representing values from around 7x10^9 to 2x10^-9 with about one
-  * significant decimal digit of accuracy.  Zero is also represented.
-  * Negative numbers are rounded up to zero.  Values too large to represent
-  * are rounded down to the largest representable value.  Positive values too
-  * small to represent are rounded up to the smallest positive representable
-  * value.
-  * @see org.apache.lucene.document.Field#setBoost(float)
-  * @see org.apache.lucene.util.SmallFloat
-  */
-  public byte encodeNormValue(float f) {
-    return SmallFloat.floatToByte315(f);
-  }
+  /** Encodes a normalization factor for storage in an index. */
+  public abstract long encodeNormValue(float f);
  
   /** Computes the amount of a sloppy phrase match, based on an edit distance.
    * This value is summed for each sloppy phrase match in a document to form
@@ -755,49 +693,17 @@
   }
 
   @Override
-  public final ExactSimScorer exactSimScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
+  public final SimScorer simScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
     IDFStats idfstats = (IDFStats) stats;
-    return new ExactTFIDFDocScorer(idfstats, context.reader().getNormValues(idfstats.field));
-  }
-
-  @Override
-  public final SloppySimScorer sloppySimScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
-    IDFStats idfstats = (IDFStats) stats;
-    return new SloppyTFIDFDocScorer(idfstats, context.reader().getNormValues(idfstats.field));
+    return new TFIDFSimScorer(idfstats, context.reader().getNormValues(idfstats.field));
   }
   
-  // TODO: we can specialize these for omitNorms up front, but we should test that it doesn't confuse stupid hotspot.
-
-  private final class ExactTFIDFDocScorer extends ExactSimScorer {
+  private final class TFIDFSimScorer extends SimScorer {
     private final IDFStats stats;
     private final float weightValue;
     private final NumericDocValues norms;
     
-    ExactTFIDFDocScorer(IDFStats stats, NumericDocValues norms) throws IOException {
-      this.stats = stats;
-      this.weightValue = stats.value;
-      this.norms = norms; 
-    }
-    
-    @Override
-    public float score(int doc, int freq) {
-      final float raw = tf(freq)*weightValue;  // compute tf(f)*weight
-
-      return norms == null ? raw : raw * decodeNormValue((byte)norms.get(doc)); // normalize for field
-    }
-
-    @Override
-    public Explanation explain(int doc, Explanation freq) {
-      return explainScore(doc, freq, stats, norms);
-    }
-  }
-  
-  private final class SloppyTFIDFDocScorer extends SloppySimScorer {
-    private final IDFStats stats;
-    private final float weightValue;
-    private final NumericDocValues norms;
-    
-    SloppyTFIDFDocScorer(IDFStats stats, NumericDocValues norms) throws IOException {
+    TFIDFSimScorer(IDFStats stats, NumericDocValues norms) throws IOException {
       this.stats = stats;
       this.weightValue = stats.value;
       this.norms = norms;
@@ -807,7 +713,7 @@
     public float score(int doc, float freq) {
       final float raw = tf(freq) * weightValue; // compute tf(f)*weight
       
-      return norms == null ? raw : raw * decodeNormValue((byte)norms.get(doc));  // normalize for field
+      return norms == null ? raw : raw * decodeNormValue(norms.get(doc));  // normalize for field
     }
     
     @Override
@@ -894,8 +800,7 @@
     fieldExpl.addDetail(stats.idf);
 
     Explanation fieldNormExpl = new Explanation();
-    float fieldNorm =
-      norms!=null ? decodeNormValue((byte) norms.get(doc)) : 1.0f;
+    float fieldNorm = norms != null ? decodeNormValue(norms.get(doc)) : 1.0f;
     fieldNormExpl.setValue(fieldNorm);
     fieldNormExpl.setDescription("fieldNorm(doc="+doc+")");
     fieldExpl.addDetail(fieldNormExpl);
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
index a362763..74a098d 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
@@ -34,9 +34,9 @@
   protected int doc;
   protected float freq;
   protected int numMatches;
-  protected final Similarity.SloppySimScorer docScorer;
+  protected final Similarity.SimScorer docScorer;
   
-  protected SpanScorer(Spans spans, Weight weight, Similarity.SloppySimScorer docScorer)
+  protected SpanScorer(Spans spans, Weight weight, Similarity.SimScorer docScorer)
   throws IOException {
     super(weight);
     this.docScorer = docScorer;
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
index 6057308..8e428f1 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
@@ -23,7 +23,7 @@
 import org.apache.lucene.index.TermContext;
 import org.apache.lucene.search.*;
 import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.search.similarities.Similarity.SloppySimScorer;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.util.Bits;
 
 import java.io.IOException;
@@ -86,7 +86,7 @@
     if (stats == null) {
       return null;
     } else {
-      return new SpanScorer(query.getSpans(context, acceptDocs, termContexts), this, similarity.sloppySimScorer(stats, context));
+      return new SpanScorer(query.getSpans(context, acceptDocs, termContexts), this, similarity.simScorer(stats, context));
     }
   }
 
@@ -97,7 +97,7 @@
       int newDoc = scorer.advance(doc);
       if (newDoc == doc) {
         float freq = scorer.sloppyFreq();
-        SloppySimScorer docScorer = similarity.sloppySimScorer(stats, context);
+        SimScorer docScorer = similarity.simScorer(stats, context);
         ComplexExplanation result = new ComplexExplanation();
         result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
         Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
diff --git a/lucene/core/src/java/org/apache/lucene/store/Directory.java b/lucene/core/src/java/org/apache/lucene/store/Directory.java
index ca9b3df..5404655 100644
--- a/lucene/core/src/java/org/apache/lucene/store/Directory.java
+++ b/lucene/core/src/java/org/apache/lucene/store/Directory.java
@@ -21,6 +21,7 @@
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.Closeable;
+import java.nio.file.NoSuchFileException;
 import java.util.Collection; // for javadocs
 
 import org.apache.lucene.util.IOUtils;
@@ -70,12 +71,12 @@
    * Returns the length of a file in the directory. This method follows the
    * following contract:
    * <ul>
-   * <li>Throws {@link FileNotFoundException} if the file does not exist
+   * <li>Throws {@link FileNotFoundException} or {@link NoSuchFileException}
+   * if the file does not exist.
    * <li>Returns a value &ge;0 if the file exists, which specifies its length.
    * </ul>
    * 
    * @param name the name of the file for which to return the length.
-   * @throws FileNotFoundException if the file does not exist.
    * @throws IOException if there was an IO error while retrieving the file's
    *         length.
    */
@@ -106,7 +107,9 @@
    * the only Directory implementations that respect this
    * parameter are {@link FSDirectory} and {@link
    * CompoundFileDirectory}.
-  */
+   * <p>Throws {@link FileNotFoundException} or {@link NoSuchFileException}
+   * if the file does not exist.
+   */
   public abstract IndexInput openInput(String name, IOContext context) throws IOException; 
   
   /** Construct a {@link Lock}.
@@ -223,6 +226,8 @@
    * efficiently open one or more sliced {@link IndexInput} instances from a
    * single file handle. The underlying file handle is kept open until the
    * {@link IndexInputSlicer} is closed.
+   * <p>Throws {@link FileNotFoundException} or {@link NoSuchFileException}
+   * if the file does not exist.
    *
    * @throws IOException
    *           if an {@link IOException} occurs
diff --git a/lucene/core/src/java/org/apache/lucene/util/Constants.java b/lucene/core/src/java/org/apache/lucene/util/Constants.java
index 1bbfb6a..44f5446 100644
--- a/lucene/core/src/java/org/apache/lucene/util/Constants.java
+++ b/lucene/core/src/java/org/apache/lucene/util/Constants.java
@@ -46,6 +46,8 @@
   public static final boolean SUN_OS = OS_NAME.startsWith("SunOS");
   /** True iff running on Mac OS X */
   public static final boolean MAC_OS_X = OS_NAME.startsWith("Mac OS X");
+  /** True iff running on FreeBSD */
+  public static final boolean FREE_BSD = OS_NAME.startsWith("FreeBSD");
 
   public static final String OS_ARCH = System.getProperty("os.arch");
   public static final String OS_VERSION = System.getProperty("os.version");
diff --git a/lucene/core/src/java/org/apache/lucene/util/RollingBuffer.java b/lucene/core/src/java/org/apache/lucene/util/RollingBuffer.java
index d31bb4c..4cf03f5 100644
--- a/lucene/core/src/java/org/apache/lucene/util/RollingBuffer.java
+++ b/lucene/core/src/java/org/apache/lucene/util/RollingBuffer.java
@@ -17,9 +17,6 @@
  * limitations under the License.
  */
 
-// TODO: probably move this to core at some point (eg,
-// cutover kuromoji, synfilter, LookaheadTokenFilter)
-
 /** Acts like forever growing T[], but internally uses a
  *  circular buffer to reuse instances of T.
  * 
diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/SpecialOperations.java b/lucene/core/src/java/org/apache/lucene/util/automaton/SpecialOperations.java
index 0e6adf6..d479cd7 100644
--- a/lucene/core/src/java/org/apache/lucene/util/automaton/SpecialOperations.java
+++ b/lucene/core/src/java/org/apache/lucene/util/automaton/SpecialOperations.java
@@ -219,7 +219,7 @@
   /**
    * Returns the set of accepted strings, assuming that at most
    * <code>limit</code> strings are accepted. If more than <code>limit</code> 
-   * strings are accepted, null is returned. If <code>limit</code>&lt;0, then 
+   * strings are accepted, the first limit strings found are returned. If <code>limit</code>&lt;0, then 
    * the limit is infinite.
    */
   public static Set<IntsRef> getFiniteStrings(Automaton a, int limit) {
@@ -227,11 +227,9 @@
     if (a.isSingleton()) {
       if (limit > 0) {
         strings.add(Util.toUTF32(a.singleton, new IntsRef()));
-      } else {
-        return null;
       }
     } else if (!getFiniteStrings(a.initial, new HashSet<State>(), strings, new IntsRef(), limit)) {
-      return null;
+      return strings;
     }
     return strings;
   }
diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/NodeHash.java b/lucene/core/src/java/org/apache/lucene/util/fst/NodeHash.java
index 7e09a42..7b6d787 100644
--- a/lucene/core/src/java/org/apache/lucene/util/fst/NodeHash.java
+++ b/lucene/core/src/java/org/apache/lucene/util/fst/NodeHash.java
@@ -19,21 +19,21 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.util.packed.GrowableWriter;
 import org.apache.lucene.util.packed.PackedInts;
+import org.apache.lucene.util.packed.PagedGrowableWriter;
 
 // Used to dedup states (lookup already-frozen states)
 final class NodeHash<T> {
 
-  private GrowableWriter table;
-  private int count;
-  private int mask;
+  private PagedGrowableWriter table;
+  private long count;
+  private long mask;
   private final FST<T> fst;
   private final FST.Arc<T> scratchArc = new FST.Arc<T>();
   private final FST.BytesReader in;
 
   public NodeHash(FST<T> fst, FST.BytesReader in) {
-    table = new GrowableWriter(8, 16, PackedInts.COMPACT);
+    table = new PagedGrowableWriter(16, 1<<30, 8, PackedInts.COMPACT);
     mask = 15;
     this.fst = fst;
     this.in = in;
@@ -69,10 +69,10 @@
 
   // hash code for an unfrozen node.  This must be identical
   // to the un-frozen case (below)!!
-  private int hash(Builder.UnCompiledNode<T> node) {
+  private long hash(Builder.UnCompiledNode<T> node) {
     final int PRIME = 31;
     //System.out.println("hash unfrozen");
-    int h = 0;
+    long h = 0;
     // TODO: maybe if number of arcs is high we can safely subsample?
     for(int arcIdx=0;arcIdx<node.numArcs;arcIdx++) {
       final Builder.Arc<T> arc = node.arcs[arcIdx];
@@ -87,14 +87,14 @@
       }
     }
     //System.out.println("  ret " + (h&Integer.MAX_VALUE));
-    return h & Integer.MAX_VALUE;
+    return h & Long.MAX_VALUE;
   }
 
   // hash code for a frozen node
-  private int hash(long node) throws IOException {
+  private long hash(long node) throws IOException {
     final int PRIME = 31;
     //System.out.println("hash frozen node=" + node);
-    int h = 0;
+    long h = 0;
     fst.readFirstRealTargetArc(node, scratchArc, in);
     while(true) {
       //System.out.println("  label=" + scratchArc.label + " target=" + scratchArc.target + " h=" + h + " output=" + fst.outputs.outputToString(scratchArc.output) + " next?=" + scratchArc.flag(4) + " final?=" + scratchArc.isFinal() + " pos=" + in.getPosition());
@@ -111,13 +111,13 @@
       fst.readNextRealArc(scratchArc, in);
     }
     //System.out.println("  ret " + (h&Integer.MAX_VALUE));
-    return h & Integer.MAX_VALUE;
+    return h & Long.MAX_VALUE;
   }
 
   public long add(Builder.UnCompiledNode<T> nodeIn) throws IOException {
-    // System.out.println("hash: add count=" + count + " vs " + table.size());
-    final int h = hash(nodeIn);
-    int pos = h & mask;
+    //System.out.println("hash: add count=" + count + " vs " + table.size() + " mask=" + mask);
+    final long h = hash(nodeIn);
+    long pos = h & mask;
     int c = 0;
     while(true) {
       final long v = table.get(pos);
@@ -128,7 +128,8 @@
         assert hash(node) == h : "frozenHash=" + hash(node) + " vs h=" + h;
         count++;
         table.set(pos, node);
-        if (table.size() < 2*count) {
+        // Rehash at 2/3 occupancy:
+        if (count > 2*table.size()/3) {
           rehash();
         }
         return node;
@@ -144,7 +145,7 @@
 
   // called only by rehash
   private void addNew(long address) throws IOException {
-    int pos = hash(address) & mask;
+    long pos = hash(address) & mask;
     int c = 0;
     while(true) {
       if (table.get(pos) == 0) {
@@ -158,23 +159,15 @@
   }
 
   private void rehash() throws IOException {
-    final GrowableWriter oldTable = table;
+    final PagedGrowableWriter oldTable = table;
 
-    if (oldTable.size() >= Integer.MAX_VALUE/2) {
-      throw new IllegalStateException("FST too large (> 2.1 GB)");
-    }
-
-    table = new GrowableWriter(oldTable.getBitsPerValue(), 2*oldTable.size(), PackedInts.COMPACT);
+    table = new PagedGrowableWriter(2*oldTable.size(), 1<<30, PackedInts.bitsRequired(count), PackedInts.COMPACT);
     mask = table.size()-1;
-    for(int idx=0;idx<oldTable.size();idx++) {
+    for(long idx=0;idx<oldTable.size();idx++) {
       final long address = oldTable.get(idx);
       if (address != 0) {
         addNew(address);
       }
     }
   }
-
-  public int count() {
-    return count;
-  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/PositiveIntOutputs.java b/lucene/core/src/java/org/apache/lucene/util/fst/PositiveIntOutputs.java
index 2460f25..d13648a 100644
--- a/lucene/core/src/java/org/apache/lucene/util/fst/PositiveIntOutputs.java
+++ b/lucene/core/src/java/org/apache/lucene/util/fst/PositiveIntOutputs.java
@@ -33,26 +33,13 @@
   
   private final static Long NO_OUTPUT = new Long(0);
 
-  private final boolean doShare;
+  private final static PositiveIntOutputs singleton = new PositiveIntOutputs();
 
-  private final static PositiveIntOutputs singletonShare = new PositiveIntOutputs(true);
-  private final static PositiveIntOutputs singletonNoShare = new PositiveIntOutputs(false);
-
-  private PositiveIntOutputs(boolean doShare) {
-    this.doShare = doShare;
+  private PositiveIntOutputs() {
   }
 
-  /** Returns the instance of PositiveIntOutputs. */
   public static PositiveIntOutputs getSingleton() {
-    return getSingleton(true);
-  }
-
-  /** Expert: pass doShare=false to disable output sharing.
-   *  In some cases this may result in a smaller FST,
-   *  however it will also break methods like {@link
-   *  Util#getByOutput} and {@link Util#shortestPaths}. */
-  public static PositiveIntOutputs getSingleton(boolean doShare) {
-    return doShare ? singletonShare : singletonNoShare;
+    return singleton;
   }
 
   @Override
@@ -61,14 +48,10 @@
     assert valid(output2);
     if (output1 == NO_OUTPUT || output2 == NO_OUTPUT) {
       return NO_OUTPUT;
-    } else if (doShare) {
+    } else {
       assert output1 > 0;
       assert output2 > 0;
       return Math.min(output1, output2);
-    } else if (output1.equals(output2)) {
-      return output1;
-    } else {
-      return NO_OUTPUT;
     }
   }
 
@@ -134,6 +117,6 @@
 
   @Override
   public String toString() {
-    return "PositiveIntOutputs(doShare=" + doShare + ")";
+    return "PositiveIntOutputs";
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/Util.java b/lucene/core/src/java/org/apache/lucene/util/fst/Util.java
index 26aa69a..2a56b40 100644
--- a/lucene/core/src/java/org/apache/lucene/util/fst/Util.java
+++ b/lucene/core/src/java/org/apache/lucene/util/fst/Util.java
@@ -93,9 +93,7 @@
    *
    *  <p>NOTE: this only works with {@code FST<Long>}, only
    *  works when the outputs are ascending in order with
-   *  the inputs and only works when you shared
-   *  the outputs (pass doShare=true to {@link
-   *  PositiveIntOutputs#getSingleton}).
+   *  the inputs.
    *  For example, simple ordinals (0, 1,
    *  2, ...), or file offets (when appending to a file)
    *  fit this. */
@@ -517,11 +515,7 @@
   }
 
   /** Starting from node, find the top N min cost 
-   *  completions to a final node.
-   *
-   *  <p>NOTE: you must share the outputs when you build the
-   *  FST (pass doShare=true to {@link
-   *  PositiveIntOutputs#getSingleton}). */
+   *  completions to a final node. */
   public static <T> MinResult<T>[] shortestPaths(FST<T> fst, FST.Arc<T> fromNode, T startOutput, Comparator<T> comparator, int topN,
                                                  boolean allowEmptyString) throws IOException {
 
@@ -814,7 +808,7 @@
     final int charLimit = offset + length;
     while(charIdx < charLimit) {
       scratch.grow(intIdx+1);
-      final int utf32 = Character.codePointAt(s, charIdx);
+      final int utf32 = Character.codePointAt(s, charIdx, charLimit);
       scratch.ints[intIdx] = utf32;
       charIdx += Character.charCount(utf32);
       intIdx++;
diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/package.html b/lucene/core/src/java/org/apache/lucene/util/fst/package.html
index 93c16e1..dfd42a3 100644
--- a/lucene/core/src/java/org/apache/lucene/util/fst/package.html
+++ b/lucene/core/src/java/org/apache/lucene/util/fst/package.html
@@ -43,7 +43,7 @@
     String inputValues[] = {"cat", "dog", "dogs"};
     long outputValues[] = {5, 7, 12};
     
-    PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+    PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
     Builder&lt;Long&gt; builder = new Builder&lt;Long&gt;(INPUT_TYPE.BYTE1, outputs);
     BytesRef scratchBytes = new BytesRef();
     IntsRef scratchInts = new IntsRef();
@@ -60,8 +60,7 @@
 </pre>
 Retrieval by value:
 <pre class="prettyprint">
-    // Only works because outputs are also in sorted order, and
-    // we passed 'true' for sharing to PositiveIntOutputs.getSingleton
+    // Only works because outputs are also in sorted order
     IntsRef key = Util.getByOutput(fst, 12);
     System.out.println(Util.toBytesRef(key, scratchBytes).utf8ToString()); // dogs
 </pre>
@@ -77,7 +76,6 @@
 </pre>
 N-shortest paths by weight:
 <pre class="prettyprint">
-    // Only works because we passed 'true' for sharing to PositiveIntOutputs.getSingleton
     Comparator&lt;Long&gt; comparator = new Comparator&lt;Long&gt;() {
       public int compare(Long left, Long right) {
         return left.compareTo(right);
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/AbstractAppendingLongBuffer.java b/lucene/core/src/java/org/apache/lucene/util/packed/AbstractAppendingLongBuffer.java
index 087154d..78381e9 100644
--- a/lucene/core/src/java/org/apache/lucene/util/packed/AbstractAppendingLongBuffer.java
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/AbstractAppendingLongBuffer.java
@@ -17,6 +17,8 @@
  * limitations under the License.
  */
 
+import static org.apache.lucene.util.packed.PackedInts.checkBlockSize;
+
 import java.util.Arrays;
 
 import org.apache.lucene.util.ArrayUtil;
@@ -25,33 +27,37 @@
 /** Common functionality shared by {@link AppendingLongBuffer} and {@link MonotonicAppendingLongBuffer}. */
 abstract class AbstractAppendingLongBuffer {
 
-  static final int BLOCK_BITS = 10;
-  static final int MAX_PENDING_COUNT = 1 << BLOCK_BITS;
-  static final int BLOCK_MASK = MAX_PENDING_COUNT - 1;
+  static final int MIN_PAGE_SIZE = 64;
+  // More than 1M doesn't really makes sense with these appending buffers
+  // since their goal is to try to have small numbers of bits per value
+  static final int MAX_PAGE_SIZE = 1 << 20;
 
+  final int pageShift, pageMask;
   long[] minValues;
   PackedInts.Reader[] deltas;
   private long deltasBytes;
   int valuesOff;
-  long[] pending;
+  final long[] pending;
   int pendingOff;
 
-  AbstractAppendingLongBuffer(int initialBlockCount) {
-    minValues = new long[16];
-    deltas = new PackedInts.Reader[16];
-    pending = new long[MAX_PENDING_COUNT];
+  AbstractAppendingLongBuffer(int initialBlockCount, int pageSize) {
+    minValues = new long[initialBlockCount];
+    deltas = new PackedInts.Reader[initialBlockCount];
+    pending = new long[pageSize];
+    pageShift = checkBlockSize(pageSize, MIN_PAGE_SIZE, MAX_PAGE_SIZE);
+    pageMask = pageSize - 1;
     valuesOff = 0;
     pendingOff = 0;
   }
 
   /** Get the number of values that have been added to the buffer. */
   public final long size() {
-    return valuesOff * (long) MAX_PENDING_COUNT + pendingOff;
+    return valuesOff * (long) pending.length + pendingOff;
   }
 
   /** Append a value to this buffer. */
   public final void add(long l) {
-    if (pendingOff == MAX_PENDING_COUNT) {
+    if (pendingOff == pending.length) {
       // check size
       if (deltas.length == valuesOff) {
         final int newLength = ArrayUtil.oversize(valuesOff + 1, 8);
@@ -80,8 +86,8 @@
     if (index < 0 || index >= size()) {
       throw new IndexOutOfBoundsException("" + index);
     }
-    int block = (int) (index >> BLOCK_BITS);
-    int element = (int) (index & BLOCK_MASK);
+    final int block = (int) (index >> pageShift);
+    final int element = (int) (index & pageMask);
     return get(block, element);
   }
 
@@ -99,7 +105,7 @@
       if (valuesOff == 0) {
         currentValues = pending;
       } else {
-        currentValues = new long[MAX_PENDING_COUNT];
+        currentValues = new long[pending.length];
         fillValues();
       }
     }
@@ -115,7 +121,7 @@
     public final long next() {
       assert hasNext();
       long result = currentValues[pOff++];
-      if (pOff == MAX_PENDING_COUNT) {
+      if (pOff == pending.length) {
         vOff += 1;
         pOff = 0;
         if (vOff <= valuesOff) {
@@ -139,6 +145,7 @@
   public long ramBytesUsed() {
     // TODO: this is called per-doc-per-norms/dv-field, can we optimize this?
     long bytesUsed = RamUsageEstimator.alignObjectSize(baseRamBytesUsed())
+        + 2 * RamUsageEstimator.NUM_BYTES_INT // pageShift, pageMask
         + RamUsageEstimator.NUM_BYTES_LONG // valuesBytes
         + RamUsageEstimator.sizeOf(pending)
         + RamUsageEstimator.sizeOf(minValues)
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/AbstractBlockPackedWriter.java b/lucene/core/src/java/org/apache/lucene/util/packed/AbstractBlockPackedWriter.java
index 6b16c86..67c8d4b 100644
--- a/lucene/core/src/java/org/apache/lucene/util/packed/AbstractBlockPackedWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/AbstractBlockPackedWriter.java
@@ -17,6 +17,8 @@
  * limitations under the License.
  */
 
+import static org.apache.lucene.util.packed.PackedInts.checkBlockSize;
+
 import java.io.IOException;
 import java.util.Arrays;
 
@@ -24,22 +26,11 @@
 
 abstract class AbstractBlockPackedWriter {
 
+  static final int MIN_BLOCK_SIZE = 64;
   static final int MAX_BLOCK_SIZE = 1 << (30 - 3);
   static final int MIN_VALUE_EQUALS_0 = 1 << 0;
   static final int BPV_SHIFT = 1;
 
-  static void checkBlockSize(int blockSize) {
-    if (blockSize <= 0 || blockSize > MAX_BLOCK_SIZE) {
-      throw new IllegalArgumentException("blockSize must be > 0 and < " + MAX_BLOCK_SIZE + ", got " + blockSize);
-    }
-    if (blockSize < 64) {
-      throw new IllegalArgumentException("blockSize must be >= 64, got " + blockSize);
-    }
-    if ((blockSize & (blockSize - 1)) != 0) {
-      throw new IllegalArgumentException("blockSize must be a power of two, got " + blockSize);
-    }
-  }
-
   static long zigZagEncode(long n) {
     return (n >> 63) ^ (n << 1);
   }
@@ -66,7 +57,7 @@
    * @param blockSize the number of values of a single block, must be a multiple of <tt>64</tt>
    */
   public AbstractBlockPackedWriter(DataOutput out, int blockSize) {
-    checkBlockSize(blockSize);
+    checkBlockSize(blockSize, MIN_BLOCK_SIZE, MAX_BLOCK_SIZE);
     reset(out);
     values = new long[blockSize];
   }
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/AbstractPagedMutable.java b/lucene/core/src/java/org/apache/lucene/util/packed/AbstractPagedMutable.java
new file mode 100644
index 0000000..62341f9
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/AbstractPagedMutable.java
@@ -0,0 +1,163 @@
+package org.apache.lucene.util.packed;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static org.apache.lucene.util.packed.PackedInts.checkBlockSize;
+import static org.apache.lucene.util.packed.PackedInts.numBlocks;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+
+/**
+ * Base implementation for {@link PagedMutable} and {@link PagedGrowableWriter}.
+ * @lucene.internal
+ */
+abstract class AbstractPagedMutable<T extends AbstractPagedMutable<T>> {
+
+  static final int MIN_BLOCK_SIZE = 1 << 6;
+  static final int MAX_BLOCK_SIZE = 1 << 30;
+
+  final long size;
+  final int pageShift;
+  final int pageMask;
+  final PackedInts.Mutable[] subMutables;
+  final int bitsPerValue;
+
+  AbstractPagedMutable(int bitsPerValue, long size, int pageSize) {
+    this.bitsPerValue = bitsPerValue;
+    this.size = size;
+    pageShift = checkBlockSize(pageSize, MIN_BLOCK_SIZE, MAX_BLOCK_SIZE);
+    pageMask = pageSize - 1;
+    final int numPages = numBlocks(size, pageSize);
+    subMutables = new PackedInts.Mutable[numPages];
+  }
+
+  protected final void fillPages() {
+    final int numPages = numBlocks(size, pageSize());
+    for (int i = 0; i < numPages; ++i) {
+      // do not allocate for more entries than necessary on the last page
+      final int valueCount = i == numPages - 1 ? lastPageSize(size) : pageSize();
+      subMutables[i] = newMutable(valueCount, bitsPerValue);
+    }
+  }
+
+  protected abstract PackedInts.Mutable newMutable(int valueCount, int bitsPerValue);
+
+  final int lastPageSize(long size) {
+    final int sz = indexInPage(size);
+    return sz == 0 ? pageSize() : sz;
+  }
+
+  final int pageSize() {
+    return pageMask + 1;
+  }
+
+  /** The number of values. */
+  public final long size() {
+    return size;
+  }
+
+  final int pageIndex(long index) {
+    return (int) (index >>> pageShift);
+  }
+
+  final int indexInPage(long index) {
+    return (int) index & pageMask;
+  }
+
+  /** Get value at <code>index</code>. */
+  public final long get(long index) {
+    assert index >= 0 && index < size;
+    final int pageIndex = pageIndex(index);
+    final int indexInPage = indexInPage(index);
+    return subMutables[pageIndex].get(indexInPage);
+  }
+
+  /** Set value at <code>index</code>. */
+  public final void set(long index, long value) {
+    assert index >= 0 && index < size;
+    final int pageIndex = pageIndex(index);
+    final int indexInPage = indexInPage(index);
+    subMutables[pageIndex].set(indexInPage, value);
+  }
+
+  protected long baseRamBytesUsed() {
+    return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER
+        + RamUsageEstimator.NUM_BYTES_OBJECT_REF
+        + RamUsageEstimator.NUM_BYTES_LONG
+        + 3 * RamUsageEstimator.NUM_BYTES_INT;
+  }
+
+  /** Return the number of bytes used by this object. */
+  public long ramBytesUsed() {
+    long bytesUsed = RamUsageEstimator.alignObjectSize(baseRamBytesUsed());
+    bytesUsed += RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) RamUsageEstimator.NUM_BYTES_OBJECT_REF * subMutables.length);
+    for (PackedInts.Mutable gw : subMutables) {
+      bytesUsed += gw.ramBytesUsed();
+    }
+    return bytesUsed;
+  }
+
+  protected abstract T newUnfilledCopy(long newSize);
+
+  /** Create a new copy of size <code>newSize</code> based on the content of
+   *  this buffer. This method is much more efficient than creating a new
+   *  instance and copying values one by one. */
+  public final T resize(long newSize) {
+    final T copy = newUnfilledCopy(newSize);
+    final int numCommonPages = Math.min(copy.subMutables.length, subMutables.length);
+    final long[] copyBuffer = new long[1024];
+    for (int i = 0; i < copy.subMutables.length; ++i) {
+      final int valueCount = i == copy.subMutables.length - 1 ? lastPageSize(newSize) : pageSize();
+      final int bpv = i < numCommonPages ? subMutables[i].getBitsPerValue() : this.bitsPerValue;
+      copy.subMutables[i] = newMutable(valueCount, bpv);
+      if (i < numCommonPages) {
+        final int copyLength = Math.min(valueCount, subMutables[i].size());
+        PackedInts.copy(subMutables[i], 0, copy.subMutables[i], 0, copyLength, copyBuffer);
+      }
+    }
+    return copy;
+  }
+
+  /** Similar to {@link ArrayUtil#grow(long[], int)}. */
+  public final T grow(long minSize) {
+    assert minSize >= 0;
+    if (minSize <= size()) {
+      @SuppressWarnings("unchecked")
+      final T result = (T) this;
+      return result;
+    }
+    long extra = minSize >>> 3;
+    if (extra < 3) {
+      extra = 3;
+    }
+    final long newSize = minSize + extra;
+    return resize(newSize);
+  }
+
+  /** Similar to {@link ArrayUtil#grow(long[])}. */
+  public final T grow() {
+    return grow(size() + 1);
+  }
+
+  @Override
+  public final String toString() {
+    return getClass().getSimpleName() + "(size=" + size() + ",pageSize=" + pageSize() + ")";
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/AppendingLongBuffer.java b/lucene/core/src/java/org/apache/lucene/util/packed/AppendingLongBuffer.java
index 978fc32..2c29729 100644
--- a/lucene/core/src/java/org/apache/lucene/util/packed/AppendingLongBuffer.java
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/AppendingLongBuffer.java
@@ -27,9 +27,16 @@
  */
 public final class AppendingLongBuffer extends AbstractAppendingLongBuffer {
 
-  /** Sole constructor. */
+  /** @param initialPageCount the initial number of pages
+   *  @param pageSize         the size of a single page */
+  public AppendingLongBuffer(int initialPageCount, int pageSize) {
+    super(initialPageCount, pageSize);
+  }
+
+  /** Create an {@link AppendingLongBuffer} with initialPageCount=16 and
+   *  pageSize=1024. */
   public AppendingLongBuffer() {
-    super(16);
+    this(16, 1024);
   }
 
   @Override
@@ -43,8 +50,9 @@
     }
   }
 
+  @Override
   void packPendingValues() {
-    assert pendingOff == MAX_PENDING_COUNT;
+    assert pendingOff == pending.length;
 
     // compute max delta
     long minValue = pending[0];
@@ -71,6 +79,7 @@
   }
 
   /** Return an iterator over the values of this buffer. */
+  @Override
   public Iterator iterator() {
     return new Iterator();
   }
@@ -78,20 +87,21 @@
   /** A long iterator. */
   public final class Iterator extends AbstractAppendingLongBuffer.Iterator {
 
-    private Iterator() {
+    Iterator() {
       super();
     }
 
+    @Override
     void fillValues() {
       if (vOff == valuesOff) {
         currentValues = pending;
       } else if (deltas[vOff] == null) {
         Arrays.fill(currentValues, minValues[vOff]);
       } else {
-        for (int k = 0; k < MAX_PENDING_COUNT; ) {
-          k += deltas[vOff].get(k, currentValues, k, MAX_PENDING_COUNT - k);
+        for (int k = 0; k < pending.length; ) {
+          k += deltas[vOff].get(k, currentValues, k, pending.length - k);
         }
-        for (int k = 0; k < MAX_PENDING_COUNT; ++k) {
+        for (int k = 0; k < pending.length; ++k) {
           currentValues[k] += minValues[vOff];
         }
       }
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/BlockPackedReader.java b/lucene/core/src/java/org/apache/lucene/util/packed/BlockPackedReader.java
index a33da95..ff35ec1 100644
--- a/lucene/core/src/java/org/apache/lucene/util/packed/BlockPackedReader.java
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/BlockPackedReader.java
@@ -17,11 +17,14 @@
  * limitations under the License.
  */
 
+import static org.apache.lucene.util.packed.AbstractBlockPackedWriter.BPV_SHIFT;
+import static org.apache.lucene.util.packed.AbstractBlockPackedWriter.MAX_BLOCK_SIZE;
+import static org.apache.lucene.util.packed.AbstractBlockPackedWriter.MIN_BLOCK_SIZE;
+import static org.apache.lucene.util.packed.AbstractBlockPackedWriter.MIN_VALUE_EQUALS_0;
 import static org.apache.lucene.util.packed.BlockPackedReaderIterator.readVLong;
 import static org.apache.lucene.util.packed.BlockPackedReaderIterator.zigZagDecode;
-import static org.apache.lucene.util.packed.BlockPackedWriter.BPV_SHIFT;
-import static org.apache.lucene.util.packed.BlockPackedWriter.MIN_VALUE_EQUALS_0;
-import static org.apache.lucene.util.packed.BlockPackedWriter.checkBlockSize;
+import static org.apache.lucene.util.packed.PackedInts.checkBlockSize;
+import static org.apache.lucene.util.packed.PackedInts.numBlocks;
 
 import java.io.IOException;
 
@@ -40,14 +43,10 @@
 
   /** Sole constructor. */
   public BlockPackedReader(IndexInput in, int packedIntsVersion, int blockSize, long valueCount, boolean direct) throws IOException {
-    checkBlockSize(blockSize);
     this.valueCount = valueCount;
-    blockShift = Integer.numberOfTrailingZeros(blockSize);
+    blockShift = checkBlockSize(blockSize, MIN_BLOCK_SIZE, MAX_BLOCK_SIZE);
     blockMask = blockSize - 1;
-    final int numBlocks = (int) (valueCount / blockSize) + (valueCount % blockSize == 0 ? 0 : 1);
-    if ((long) numBlocks * blockSize < valueCount) {
-      throw new IllegalArgumentException("valueCount is too large for this block size");
-    }
+    final int numBlocks = numBlocks(valueCount, blockSize);
     long[] minValues = null;
     subReaders = new PackedInts.Reader[numBlocks];
     for (int i = 0; i < numBlocks; ++i) {
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/BlockPackedReaderIterator.java b/lucene/core/src/java/org/apache/lucene/util/packed/BlockPackedReaderIterator.java
index 288518d..7d8bbd3 100644
--- a/lucene/core/src/java/org/apache/lucene/util/packed/BlockPackedReaderIterator.java
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/BlockPackedReaderIterator.java
@@ -17,9 +17,13 @@
  * limitations under the License.
  */
 
-import static org.apache.lucene.util.packed.BlockPackedWriter.BPV_SHIFT;
-import static org.apache.lucene.util.packed.BlockPackedWriter.MIN_VALUE_EQUALS_0;
-import static org.apache.lucene.util.packed.BlockPackedWriter.checkBlockSize;
+import static org.apache.lucene.util.packed.AbstractBlockPackedWriter.BPV_SHIFT;
+import static org.apache.lucene.util.packed.AbstractBlockPackedWriter.MAX_BLOCK_SIZE;
+import static org.apache.lucene.util.packed.AbstractBlockPackedWriter.MIN_BLOCK_SIZE;
+import static org.apache.lucene.util.packed.AbstractBlockPackedWriter.MIN_VALUE_EQUALS_0;
+import static org.apache.lucene.util.packed.BlockPackedReaderIterator.readVLong;
+import static org.apache.lucene.util.packed.BlockPackedReaderIterator.zigZagDecode;
+import static org.apache.lucene.util.packed.PackedInts.checkBlockSize;
 
 import java.io.EOFException;
 import java.io.IOException;
@@ -87,7 +91,7 @@
    *                  been used to write the stream
    */
   public BlockPackedReaderIterator(DataInput in, int packedIntsVersion, int blockSize, long valueCount) {
-    checkBlockSize(blockSize);
+    checkBlockSize(blockSize, MIN_BLOCK_SIZE, MAX_BLOCK_SIZE);
     this.packedIntsVersion = packedIntsVersion;
     this.blockSize = blockSize;
     this.values = new long[blockSize];
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/GrowableWriter.java b/lucene/core/src/java/org/apache/lucene/util/packed/GrowableWriter.java
index f7bc2dd..45b3338 100644
--- a/lucene/core/src/java/org/apache/lucene/util/packed/GrowableWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/GrowableWriter.java
@@ -20,24 +20,35 @@
 import java.io.IOException;
 
 import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.RamUsageEstimator;
 
 /**     
  * Implements {@link PackedInts.Mutable}, but grows the
  * bit count of the underlying packed ints on-demand.
+ * <p>Beware that this class will accept to set negative values but in order
+ * to do this, it will grow the number of bits per value to 64.
  *
  * <p>@lucene.internal</p>
  */
-
 public class GrowableWriter implements PackedInts.Mutable {
 
-  private long currentMaxValue;
+  private long currentMask;
   private PackedInts.Mutable current;
   private final float acceptableOverheadRatio;
 
+  /**
+   * @param startBitsPerValue       the initial number of bits per value, may grow depending on the data
+   * @param valueCount              the number of values
+   * @param acceptableOverheadRatio an acceptable overhead ratio
+   */
   public GrowableWriter(int startBitsPerValue, int valueCount, float acceptableOverheadRatio) {
     this.acceptableOverheadRatio = acceptableOverheadRatio;
     current = PackedInts.getMutable(valueCount, startBitsPerValue, this.acceptableOverheadRatio);
-    currentMaxValue = PackedInts.maxValue(current.getBitsPerValue());
+    currentMask = mask(current.getBitsPerValue());
+  }
+
+  private static long mask(int bitsPerValue) {
+    return bitsPerValue == 64 ? ~0L : PackedInts.maxValue(bitsPerValue);
   }
 
   @Override
@@ -70,16 +81,16 @@
   }
 
   private void ensureCapacity(long value) {
-    assert value >= 0;
-    if (value <= currentMaxValue) {
+    if ((value & currentMask) == value) {
       return;
     }
-    final int bitsRequired = PackedInts.bitsRequired(value);
+    final int bitsRequired = value < 0 ? 64 : PackedInts.bitsRequired(value);
+    assert bitsRequired > current.getBitsPerValue();
     final int valueCount = size();
     PackedInts.Mutable next = PackedInts.getMutable(valueCount, bitsRequired, acceptableOverheadRatio);
     PackedInts.copy(current, 0, next, 0, valueCount, PackedInts.DEFAULT_BUFFER_SIZE);
     current = next;
-    currentMaxValue = PackedInts.maxValue(current.getBitsPerValue());
+    currentMask = mask(current.getBitsPerValue());
   }
 
   @Override
@@ -109,6 +120,10 @@
   public int set(int index, long[] arr, int off, int len) {
     long max = 0;
     for (int i = off, end = off + len; i < end; ++i) {
+      // bitwise or is nice because either all values are positive and the
+      // or-ed result will require as many bits per value as the max of the
+      // values, or one of them is negative and the result will be negative,
+      // forcing GrowableWriter to use 64 bits per value
       max |= arr[i];
     }
     ensureCapacity(max);
@@ -123,7 +138,12 @@
 
   @Override
   public long ramBytesUsed() {
-    return current.ramBytesUsed();
+    return RamUsageEstimator.alignObjectSize(
+        RamUsageEstimator.NUM_BYTES_OBJECT_HEADER
+        + RamUsageEstimator.NUM_BYTES_OBJECT_REF
+        + RamUsageEstimator.NUM_BYTES_LONG
+        + RamUsageEstimator.NUM_BYTES_FLOAT)
+        + current.ramBytesUsed();
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/MonotonicAppendingLongBuffer.java b/lucene/core/src/java/org/apache/lucene/util/packed/MonotonicAppendingLongBuffer.java
index 4b00994..abac58d 100644
--- a/lucene/core/src/java/org/apache/lucene/util/packed/MonotonicAppendingLongBuffer.java
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/MonotonicAppendingLongBuffer.java
@@ -37,14 +37,22 @@
     return (n >> 63) ^ (n << 1);
   }
 
-  private float[] averages;
+  float[] averages;
 
-  /** Sole constructor. */
-  public MonotonicAppendingLongBuffer() {
-    super(16);
-    averages = new float[16];
+  /** @param initialPageCount the initial number of pages
+   *  @param pageSize         the size of a single page */
+  public MonotonicAppendingLongBuffer(int initialPageCount, int pageSize) {
+    super(initialPageCount, pageSize);
+    averages = new float[pending.length];
   }
-  
+
+  /** Create an {@link MonotonicAppendingLongBuffer} with initialPageCount=16
+   *  and pageSize=1024. */
+  public MonotonicAppendingLongBuffer() {
+    this(16, 1024);
+  }
+
+  @Override
   long get(int block, int element) {
     if (block == valuesOff) {
       return pending[element];
@@ -66,16 +74,16 @@
 
   @Override
   void packPendingValues() {
-    assert pendingOff == MAX_PENDING_COUNT;
+    assert pendingOff == pending.length;
 
     minValues[valuesOff] = pending[0];
-    averages[valuesOff] = (float) (pending[BLOCK_MASK] - pending[0]) / BLOCK_MASK;
+    averages[valuesOff] = (float) (pending[pending.length - 1] - pending[0]) / (pending.length - 1);
 
-    for (int i = 0; i < MAX_PENDING_COUNT; ++i) {
+    for (int i = 0; i < pending.length; ++i) {
       pending[i] = zigZagEncode(pending[i] - minValues[valuesOff] - (long) (averages[valuesOff] * (long) i));
     }
     long maxDelta = 0;
-    for (int i = 0; i < MAX_PENDING_COUNT; ++i) {
+    for (int i = 0; i < pending.length; ++i) {
       if (pending[i] < 0) {
         maxDelta = -1;
         break;
@@ -94,6 +102,7 @@
   }
 
   /** Return an iterator over the values of this buffer. */
+  @Override
   public Iterator iterator() {
     return new Iterator();
   }
@@ -105,18 +114,19 @@
       super();
     }
 
+    @Override
     void fillValues() {
       if (vOff == valuesOff) {
         currentValues = pending;
       } else if (deltas[vOff] == null) {
-        for (int k = 0; k < MAX_PENDING_COUNT; ++k) {
+        for (int k = 0; k < pending.length; ++k) {
           currentValues[k] = minValues[vOff] + (long) (averages[vOff] * (long) k);
         }
       } else {
-        for (int k = 0; k < MAX_PENDING_COUNT; ) {
-          k += deltas[vOff].get(k, currentValues, k, MAX_PENDING_COUNT - k);
+        for (int k = 0; k < pending.length; ) {
+          k += deltas[vOff].get(k, currentValues, k, pending.length - k);
         }
-        for (int k = 0; k < MAX_PENDING_COUNT; ++k) {
+        for (int k = 0; k < pending.length; ++k) {
           currentValues[k] = minValues[vOff] + (long) (averages[vOff] * (long) k) + zigZagDecode(currentValues[k]);
         }
       }
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/MonotonicBlockPackedReader.java b/lucene/core/src/java/org/apache/lucene/util/packed/MonotonicBlockPackedReader.java
index 27b14dd..f7f6e44 100644
--- a/lucene/core/src/java/org/apache/lucene/util/packed/MonotonicBlockPackedReader.java
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/MonotonicBlockPackedReader.java
@@ -17,8 +17,11 @@
  * limitations under the License.
  */
 
-import static org.apache.lucene.util.packed.AbstractBlockPackedWriter.checkBlockSize;
+import static org.apache.lucene.util.packed.AbstractBlockPackedWriter.MAX_BLOCK_SIZE;
+import static org.apache.lucene.util.packed.AbstractBlockPackedWriter.MIN_BLOCK_SIZE;
 import static org.apache.lucene.util.packed.BlockPackedReaderIterator.zigZagDecode;
+import static org.apache.lucene.util.packed.PackedInts.checkBlockSize;
+import static org.apache.lucene.util.packed.PackedInts.numBlocks;
 
 import java.io.IOException;
 
@@ -39,14 +42,10 @@
 
   /** Sole constructor. */
   public MonotonicBlockPackedReader(IndexInput in, int packedIntsVersion, int blockSize, long valueCount, boolean direct) throws IOException {
-    checkBlockSize(blockSize);
     this.valueCount = valueCount;
-    blockShift = Integer.numberOfTrailingZeros(blockSize);
+    blockShift = checkBlockSize(blockSize, MIN_BLOCK_SIZE, MAX_BLOCK_SIZE);
     blockMask = blockSize - 1;
-    final int numBlocks = (int) (valueCount / blockSize) + (valueCount % blockSize == 0 ? 0 : 1);
-    if ((long) numBlocks * blockSize < valueCount) {
-      throw new IllegalArgumentException("valueCount is too large for this block size");
-    }
+    final int numBlocks = numBlocks(valueCount, blockSize);
     minValues = new long[numBlocks];
     averages = new float[numBlocks];
     subReaders = new PackedInts.Reader[numBlocks];
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/PackedInts.java b/lucene/core/src/java/org/apache/lucene/util/packed/PackedInts.java
index b6db582..9a3b668 100644
--- a/lucene/core/src/java/org/apache/lucene/util/packed/PackedInts.java
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/PackedInts.java
@@ -213,6 +213,11 @@
       this.format = format;
       this.bitsPerValue = bitsPerValue;
     }
+
+    @Override
+    public String toString() {
+      return "FormatAndBits(format=" + format + " bitsPerValue=" + bitsPerValue + ")";
+    }
   }
 
   /**
@@ -1036,14 +1041,21 @@
    */
   public static Mutable getMutable(int valueCount,
       int bitsPerValue, float acceptableOverheadRatio) {
-    assert valueCount >= 0;
-
     final FormatAndBits formatAndBits = fastestFormatAndBits(valueCount, bitsPerValue, acceptableOverheadRatio);
-    switch (formatAndBits.format) {
+    return getMutable(valueCount, formatAndBits.bitsPerValue, formatAndBits.format);
+  }
+
+  /** Same as {@link #getMutable(int, int, float)} with a pre-computed number
+   *  of bits per value and format.
+   *  @lucene.internal */
+  public static Mutable getMutable(int valueCount,
+      int bitsPerValue, PackedInts.Format format) {
+    assert valueCount >= 0;
+    switch (format) {
       case PACKED_SINGLE_BLOCK:
-        return Packed64SingleBlock.create(valueCount, formatAndBits.bitsPerValue);
+        return Packed64SingleBlock.create(valueCount, bitsPerValue);
       case PACKED:
-        switch (formatAndBits.bitsPerValue) {
+        switch (bitsPerValue) {
           case 8:
             return new Direct8(valueCount);
           case 16:
@@ -1063,7 +1075,7 @@
             }
             break;
         }
-        return new Packed64(valueCount, formatAndBits.bitsPerValue);
+        return new Packed64(valueCount, bitsPerValue);
       default:
         throw new AssertionError();
     }
@@ -1198,33 +1210,39 @@
       for (int i = 0; i < len; ++i) {
         dest.set(destPos++, src.get(srcPos++));
       }
-    } else {
+    } else if (len > 0) {
       // use bulk operations
-      long[] buf = new long[Math.min(capacity, len)];
-      int remaining = 0;
-      while (len > 0) {
-        final int read = src.get(srcPos, buf, remaining, Math.min(len, buf.length - remaining));
-        assert read > 0;
-        srcPos += read;
-        len -= read;
-        remaining += read;
-        final int written = dest.set(destPos, buf, 0, remaining);
-        assert written > 0;
-        destPos += written;
-        if (written < remaining) {
-          System.arraycopy(buf, written, buf, 0, remaining - written);
-        }
-        remaining -= written;
-      }
-      while (remaining > 0) {
-        final int written = dest.set(destPos, buf, 0, remaining);
-        destPos += written;
-        remaining -= written;
-        System.arraycopy(buf, written, buf, 0, remaining);
-      }
+      final long[] buf = new long[Math.min(capacity, len)];
+      copy(src, srcPos, dest, destPos, len, buf);
     }
   }
-  
+
+  /** Same as {@link #copy(Reader, int, Mutable, int, int, int)} but using a pre-allocated buffer. */
+  static void copy(Reader src, int srcPos, Mutable dest, int destPos, int len, long[] buf) {
+    assert buf.length > 0;
+    int remaining = 0;
+    while (len > 0) {
+      final int read = src.get(srcPos, buf, remaining, Math.min(len, buf.length - remaining));
+      assert read > 0;
+      srcPos += read;
+      len -= read;
+      remaining += read;
+      final int written = dest.set(destPos, buf, 0, remaining);
+      assert written > 0;
+      destPos += written;
+      if (written < remaining) {
+        System.arraycopy(buf, written, buf, 0, remaining - written);
+      }
+      remaining -= written;
+    }
+    while (remaining > 0) {
+      final int written = dest.set(destPos, buf, 0, remaining);
+      destPos += written;
+      remaining -= written;
+      System.arraycopy(buf, written, buf, 0, remaining);
+    }
+  }
+
   /**
    * Expert: reads only the metadata from a stream. This is useful to later
    * restore a stream or open a direct reader via 
@@ -1261,4 +1279,26 @@
     }    
   }
 
-}
\ No newline at end of file
+  /** Check that the block size is a power of 2, in the right bounds, and return
+   *  its log in base 2. */
+  static int checkBlockSize(int blockSize, int minBlockSize, int maxBlockSize) {
+    if (blockSize < minBlockSize || blockSize > maxBlockSize) {
+      throw new IllegalArgumentException("blockSize must be >= " + minBlockSize + " and <= " + maxBlockSize + ", got " + blockSize);
+    }
+    if ((blockSize & (blockSize - 1)) != 0) {
+      throw new IllegalArgumentException("blockSize must be a power of two, got " + blockSize);
+    }
+    return Integer.numberOfTrailingZeros(blockSize);
+  }
+
+  /** Return the number of blocks required to store <code>size</code> values on
+   *  <code>blockSize</code>. */
+  static int numBlocks(long size, int blockSize) {
+    final int numBlocks = (int) (size / blockSize) + (size % blockSize == 0 ? 0 : 1);
+    if ((long) numBlocks * blockSize < size) {
+      throw new IllegalArgumentException("size is too large for this block size");
+    }
+    return numBlocks;
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/PagedGrowableWriter.java b/lucene/core/src/java/org/apache/lucene/util/packed/PagedGrowableWriter.java
new file mode 100644
index 0000000..0563846
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/PagedGrowableWriter.java
@@ -0,0 +1,71 @@
+package org.apache.lucene.util.packed;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.RamUsageEstimator;
+import org.apache.lucene.util.packed.PackedInts.Mutable;
+
+/**
+ * A {@link PagedGrowableWriter}. This class slices data into fixed-size blocks
+ * which have independent numbers of bits per value and grow on-demand.
+ * <p>You should use this class instead of {@link AppendingLongBuffer} only when
+ * you need random write-access. Otherwise this class will likely be slower and
+ * less memory-efficient.
+ * @lucene.internal
+ */
+public final class PagedGrowableWriter extends AbstractPagedMutable<PagedGrowableWriter> {
+
+  final float acceptableOverheadRatio;
+
+  /**
+   * Create a new {@link PagedGrowableWriter} instance.
+   *
+   * @param size the number of values to store.
+   * @param pageSize the number of values per page
+   * @param startBitsPerValue the initial number of bits per value
+   * @param acceptableOverheadRatio an acceptable overhead ratio
+   */
+  public PagedGrowableWriter(long size, int pageSize,
+      int startBitsPerValue, float acceptableOverheadRatio) {
+    this(size, pageSize, startBitsPerValue, acceptableOverheadRatio, true);
+  }
+
+  PagedGrowableWriter(long size, int pageSize,int startBitsPerValue, float acceptableOverheadRatio, boolean fillPages) {
+    super(startBitsPerValue, size, pageSize);
+    this.acceptableOverheadRatio = acceptableOverheadRatio;
+    if (fillPages) {
+      fillPages();
+    }
+  }
+
+  @Override
+  protected Mutable newMutable(int valueCount, int bitsPerValue) {
+    return new GrowableWriter(bitsPerValue, valueCount, acceptableOverheadRatio);
+  }
+
+  @Override
+  protected PagedGrowableWriter newUnfilledCopy(long newSize) {
+    return new PagedGrowableWriter(newSize, pageSize(), bitsPerValue, acceptableOverheadRatio, false);
+  }
+
+  @Override
+  protected long baseRamBytesUsed() {
+    return super.baseRamBytesUsed() + RamUsageEstimator.NUM_BYTES_FLOAT;
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/PagedMutable.java b/lucene/core/src/java/org/apache/lucene/util/packed/PagedMutable.java
new file mode 100644
index 0000000..69e7619
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/PagedMutable.java
@@ -0,0 +1,71 @@
+package org.apache.lucene.util.packed;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.RamUsageEstimator;
+import org.apache.lucene.util.packed.PackedInts.Mutable;
+
+/**
+ * A {@link PagedMutable}. This class slices data into fixed-size blocks
+ * which have the same number of bits per value. It can be a useful replacement
+ * for {@link PackedInts.Mutable} to store more than 2B values.
+ * @lucene.internal
+ */
+public final class PagedMutable extends AbstractPagedMutable<PagedMutable> {
+
+  final PackedInts.Format format;
+
+  /**
+   * Create a new {@link PagedMutable} instance.
+   *
+   * @param size the number of values to store.
+   * @param pageSize the number of values per page
+   * @param bitsPerValue the number of bits per value
+   * @param acceptableOverheadRatio an acceptable overhead ratio
+   */
+  public PagedMutable(long size, int pageSize, int bitsPerValue, float acceptableOverheadRatio) {
+    this(size, pageSize, PackedInts.fastestFormatAndBits(pageSize, bitsPerValue, acceptableOverheadRatio));
+    fillPages();
+  }
+
+  PagedMutable(long size, int pageSize, PackedInts.FormatAndBits formatAndBits) {
+    this(size, pageSize, formatAndBits.bitsPerValue, formatAndBits.format);
+  }
+
+  PagedMutable(long size, int pageSize, int bitsPerValue, PackedInts.Format format) {
+    super(bitsPerValue, size, pageSize);
+    this.format = format;
+  }
+
+  @Override
+  protected Mutable newMutable(int valueCount, int bitsPerValue) {
+    assert this.bitsPerValue >= bitsPerValue;
+    return PackedInts.getMutable(valueCount, this.bitsPerValue, format);
+  }
+
+  @Override
+  protected PagedMutable newUnfilledCopy(long newSize) {
+    return new PagedMutable(newSize, pageSize(), bitsPerValue, format);
+  }
+
+  @Override
+  protected long baseRamBytesUsed() {
+    return super.baseRamBytesUsed() + RamUsageEstimator.NUM_BYTES_OBJECT_REF;
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/package.html b/lucene/core/src/java/org/apache/lucene/util/packed/package.html
index 1696033..50470dd 100644
--- a/lucene/core/src/java/org/apache/lucene/util/packed/package.html
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/package.html
@@ -47,6 +47,11 @@
         <li>Same as PackedInts.Mutable but grows the number of bits per values when needed.</li>
         <li>Useful to build a PackedInts.Mutable from a read-once stream of longs.</li>
     </ul></li>
+    <li><b>{@link org.apache.lucene.util.packed.PagedGrowableWriter}</b><ul>
+        <li>Slices data into fixed-size blocks stored in GrowableWriters.</li>
+        <li>Supports more than 2B values.</li>
+        <li>You should use AppendingLongBuffer instead if you don't need random write access.</li>
+    </ul></li>
     <li><b>{@link org.apache.lucene.util.packed.AppendingLongBuffer}</b><ul>
         <li>Can store any sequence of longs.</li>
         <li>Compression is good when values are close to each other.</li>
diff --git a/lucene/core/src/test/org/apache/lucene/TestSearch.java b/lucene/core/src/test/org/apache/lucene/TestSearch.java
index afba51e..f2bedd4 100644
--- a/lucene/core/src/test/org/apache/lucene/TestSearch.java
+++ b/lucene/core/src/test/org/apache/lucene/TestSearch.java
@@ -112,10 +112,7 @@
       Analyzer analyzer = new MockAnalyzer(random);
       IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
       MergePolicy mp = conf.getMergePolicy();
-      if (mp instanceof LogMergePolicy) {
-        ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
-      }
-      
+      mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
       IndexWriter writer = new IndexWriter(directory, conf);
 
       String[] docs = {
@@ -130,7 +127,7 @@
       for (int j = 0; j < docs.length; j++) {
         Document d = new Document();
         d.add(newTextField("contents", docs[j], Field.Store.YES));
-        d.add(newStringField("id", ""+j, Field.Store.NO));
+        d.add(new IntField("id", j, Field.Store.NO));
         writer.addDocument(d);
       }
       writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java b/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
index ff2f49c..3d942dc 100644
--- a/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
+++ b/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
@@ -72,9 +72,7 @@
       Analyzer analyzer = new MockAnalyzer(random);
       IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
       final MergePolicy mp = conf.getMergePolicy();
-      if (mp instanceof LogMergePolicy) {
-        ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFiles);
-      }
+      mp.setNoCFSRatio(useCompoundFiles ? 1.0 : 0.0);
       IndexWriter writer = new IndexWriter(directory, conf);
       if (VERBOSE) {
         System.out.println("TEST: now build index MAX_DOCS=" + MAX_DOCS);
@@ -83,7 +81,7 @@
       for (int j = 0; j < MAX_DOCS; j++) {
         Document d = new Document();
         d.add(newTextField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES));
-        d.add(newTextField(ID_FIELD, Integer.toString(j), Field.Store.YES));
+        d.add(new IntField(ID_FIELD, j, Field.Store.YES));
         writer.addDocument(d);
       }
       writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
index eae2f94..157b353 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
@@ -49,6 +49,7 @@
     iwConf.setCodec(CompressingCodec.randomInstance(random()));
     // disable CFS because this test checks file names
     iwConf.setMergePolicy(newLogMergePolicy(false));
+    iwConf.setUseCompoundFile(false);
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf);
 
     final Document validDoc = new Document();
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
index ef3dc6d..d1eed42 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
@@ -58,7 +58,7 @@
   private IndexWriter newWriter(Directory dir, IndexWriterConfig conf)
       throws IOException {
     LogDocMergePolicy logByteSizeMergePolicy = new LogDocMergePolicy();
-    logByteSizeMergePolicy.setUseCompoundFile(false); // make sure we use plain
+    logByteSizeMergePolicy.setNoCFSRatio(0.0); // make sure we use plain
     // files
     conf.setMergePolicy(logByteSizeMergePolicy);
 
@@ -146,7 +146,7 @@
 
     iwconf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
         .setOpenMode(OpenMode.APPEND).setCodec(codec);
-    //((LogMergePolicy) iwconf.getMergePolicy()).setUseCompoundFile(false);
+    //((LogMergePolicy) iwconf.getMergePolicy()).setNoCFSRatio(0.0);
     //((LogMergePolicy) iwconf.getMergePolicy()).setMergeFactor(10);
     iwconf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
 
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
index 88bbc3a..defc848 100755
--- a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
@@ -19,6 +19,7 @@
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.file.NoSuchFileException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -596,7 +597,7 @@
 
     Directory dir = newDirectory();
     LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
-    lmp.setUseCompoundFile(false);
+    lmp.setNoCFSRatio(0.0);
     lmp.setMergeFactor(100);
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new MockAnalyzer(random()))
@@ -625,7 +626,7 @@
     Directory dir2 = newDirectory();
     lmp = new LogByteSizeMergePolicy();
     lmp.setMinMergeMB(0.0001);
-    lmp.setUseCompoundFile(false);
+    lmp.setNoCFSRatio(0.0);
     lmp.setMergeFactor(4);
     writer = new IndexWriter(dir2, newIndexWriterConfig(TEST_VERSION_CURRENT,
         new MockAnalyzer(random()))
@@ -888,7 +889,7 @@
 
       if (t instanceof AlreadyClosedException || t instanceof MergePolicy.MergeAbortedException || t instanceof NullPointerException) {
         report = !didClose;
-      } else if (t instanceof FileNotFoundException)  {
+      } else if (t instanceof FileNotFoundException || t instanceof NoSuchFileException)  {
         report = !didClose;
       } else if (t instanceof IOException)  {
         Throwable t2 = t.getCause();
@@ -1094,7 +1095,7 @@
     
     Directory dir = new MockDirectoryWrapper(random(), new RAMDirectory());
     IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy(true));
-    LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
+    MergePolicy lmp = conf.getMergePolicy();
     // Force creation of CFS:
     lmp.setNoCFSRatio(1.0);
     lmp.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java b/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java
index dad7361..644477e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java
@@ -25,18 +25,7 @@
 import org.apache.lucene.util.*;
 
 public class TestAtomicUpdate extends LuceneTestCase {
-  private static final class MockIndexWriter extends IndexWriter {
-    public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
-      super(dir, conf);
-    }
-
-    @Override
-    boolean testPoint(String name) {
-      if (LuceneTestCase.random().nextInt(4) == 2)
-        Thread.yield();
-      return true;
-    }
-  }
+  
 
   private static abstract class TimedThread extends Thread {
     volatile boolean failed;
@@ -124,7 +113,7 @@
         TEST_VERSION_CURRENT, new MockAnalyzer(random()))
         .setMaxBufferedDocs(7);
     ((TieredMergePolicy) conf.getMergePolicy()).setMaxMergeAtOnce(3);
-    IndexWriter writer = new MockIndexWriter(directory, conf);
+    IndexWriter writer = RandomIndexWriter.mockIndexWriter(directory, conf, random());
 
     // Establish a base index of 100 docs:
     for(int i=0;i<100;i++) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index 82e1947..ca85172 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -573,8 +573,7 @@
     _TestUtil.rmDir(indexDir);
     Directory dir = newFSDirectory(indexDir);
     LogByteSizeMergePolicy mp = new LogByteSizeMergePolicy();
-    mp.setUseCompoundFile(doCFS);
-    mp.setNoCFSRatio(1.0);
+    mp.setNoCFSRatio(doCFS ? 1.0 : 0.0);
     mp.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY);
     // TODO: remove randomness
     IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
@@ -593,8 +592,7 @@
     if (!fullyMerged) {
       // open fresh writer so we get no prx file in the added segment
       mp = new LogByteSizeMergePolicy();
-      mp.setUseCompoundFile(doCFS);
-      mp.setNoCFSRatio(1.0);
+      mp.setNoCFSRatio(doCFS ? 1.0 : 0.0);
       // TODO: remove randomness
       conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
         .setMaxBufferedDocs(10).setMergePolicy(mp);
@@ -626,7 +624,7 @@
     try {
       Directory dir = newFSDirectory(outputDir);
 
-      LogMergePolicy mergePolicy = newLogMergePolicy(true, 10);
+      MergePolicy mergePolicy = newLogMergePolicy(true, 10);
       
       // This test expects all of its segments to be in CFS:
       mergePolicy.setNoCFSRatio(1.0); 
@@ -637,7 +635,7 @@
           newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).
               setMaxBufferedDocs(-1).
               setRAMBufferSizeMB(16.0).
-              setMergePolicy(mergePolicy)
+              setMergePolicy(mergePolicy).setUseCompoundFile(true)
       );
       for(int i=0;i<35;i++) {
         addDoc(writer, i);
@@ -649,7 +647,7 @@
       writer = new IndexWriter(
           dir,
           newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
-            .setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES)
+            .setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES).setUseCompoundFile(true)
       );
       Term searchTerm = new Term("id", "7");
       writer.deleteDocuments(searchTerm);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java b/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
index 0c76427..b24c53a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
@@ -58,6 +58,9 @@
         boolean isClose = false;
         StackTraceElement[] trace = new Exception().getStackTrace();
         for (int i = 0; i < trace.length; i++) {
+          if (isDoFlush && isClose) {
+            break;
+          }
           if ("flush".equals(trace[i].getMethodName())) {
             isDoFlush = true;
           }
@@ -302,11 +305,7 @@
         }
       }
       };
-    if (maxMergeThreads > cms.getMaxMergeCount()) {
-      cms.setMaxMergeCount(maxMergeCount);
-    }
-    cms.setMaxThreadCount(maxMergeThreads);
-    cms.setMaxMergeCount(maxMergeCount);
+    cms.setMaxMergesAndThreads(maxMergeCount, maxMergeThreads);
     iwc.setMergeScheduler(cms);
     iwc.setMaxBufferedDocs(2);
 
@@ -332,8 +331,7 @@
     long totMergedBytes;
 
     public TrackingCMS() {
-      setMaxMergeCount(5);
-      setMaxThreadCount(5);
+      setMaxMergesAndThreads(5, 5);
     }
 
     @Override
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java
index eec412c..2a0528e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java
@@ -112,12 +112,7 @@
     }
 
     @Override
-    public ExactSimScorer exactSimScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public SloppySimScorer sloppySimScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
+    public SimScorer simScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
       throw new UnsupportedOperationException();
     }
   }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java
index 7644583..f4d3f36 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java
@@ -225,9 +225,7 @@
         new MockAnalyzer(random()))
         .setIndexDeletionPolicy(new ExpirationTimeDeletionPolicy(dir, SECONDS));
     MergePolicy mp = conf.getMergePolicy();
-    if (mp instanceof LogMergePolicy) {
-      ((LogMergePolicy) mp).setUseCompoundFile(true);
-    }
+    mp.setNoCFSRatio(1.0);
     IndexWriter writer = new IndexWriter(dir, conf);
     ExpirationTimeDeletionPolicy policy = (ExpirationTimeDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
     Map<String,String> commitData = new HashMap<String,String>();
@@ -246,9 +244,7 @@
           new MockAnalyzer(random())).setOpenMode(
           OpenMode.APPEND).setIndexDeletionPolicy(policy);
       mp = conf.getMergePolicy();
-      if (mp instanceof LogMergePolicy) {
-        ((LogMergePolicy) mp).setUseCompoundFile(true);
-      }
+      mp.setNoCFSRatio(1.0);
       writer = new IndexWriter(dir, conf);
       policy = (ExpirationTimeDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
       for(int j=0;j<17;j++) {
@@ -326,9 +322,7 @@
           .setMaxBufferedDocs(10)
           .setMergeScheduler(new SerialMergeScheduler());
       MergePolicy mp = conf.getMergePolicy();
-      if (mp instanceof LogMergePolicy) {
-        ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
-      }
+      mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
       IndexWriter writer = new IndexWriter(dir, conf);
       KeepAllDeletionPolicy policy = (KeepAllDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
       for(int i=0;i<107;i++) {
@@ -347,9 +341,7 @@
                                     new MockAnalyzer(random())).setOpenMode(
                                                                     OpenMode.APPEND).setIndexDeletionPolicy(policy);
         mp = conf.getMergePolicy();
-        if (mp instanceof LogMergePolicy) {
-          ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
-        }
+        mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
         if (VERBOSE) {
           System.out.println("TEST: open writer for forceMerge");
         }
@@ -526,9 +518,7 @@
           .setIndexDeletionPolicy(new KeepNoneOnInitDeletionPolicy())
           .setMaxBufferedDocs(10);
       MergePolicy mp = conf.getMergePolicy();
-      if (mp instanceof LogMergePolicy) {
-        ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
-      }
+      mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
       IndexWriter writer = new IndexWriter(dir, conf);
       KeepNoneOnInitDeletionPolicy policy = (KeepNoneOnInitDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
       for(int i=0;i<107;i++) {
@@ -539,9 +529,7 @@
       conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
           .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy);
       mp = conf.getMergePolicy();
-      if (mp instanceof LogMergePolicy) {
-        ((LogMergePolicy) mp).setUseCompoundFile(true);
-      }
+      mp.setNoCFSRatio(1.0);
       writer = new IndexWriter(dir, conf);
       policy = (KeepNoneOnInitDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
       writer.forceMerge(1);
@@ -581,9 +569,7 @@
             .setIndexDeletionPolicy(policy)
             .setMaxBufferedDocs(10);
         MergePolicy mp = conf.getMergePolicy();
-        if (mp instanceof LogMergePolicy) {
-          ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
-        }
+        mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
         IndexWriter writer = new IndexWriter(dir, conf);
         policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
         for(int i=0;i<17;i++) {
@@ -642,9 +628,7 @@
           .setIndexDeletionPolicy(new KeepLastNDeletionPolicy(N))
           .setMaxBufferedDocs(10);
       MergePolicy mp = conf.getMergePolicy();
-      if (mp instanceof LogMergePolicy) {
-        ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
-      }
+      mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
       IndexWriter writer = new IndexWriter(dir, conf);
       KeepLastNDeletionPolicy policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
       writer.close();
@@ -658,9 +642,7 @@
             .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy)
             .setMaxBufferedDocs(10);
         mp = conf.getMergePolicy();
-        if (mp instanceof LogMergePolicy) {
-          ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
-        }
+        mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
         writer = new IndexWriter(dir, conf);
         policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
         for(int j=0;j<17;j++) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
index 9f8890b..102b7c9 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
@@ -20,6 +20,7 @@
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.file.NoSuchFileException;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -32,6 +33,7 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.IntField;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
@@ -428,8 +430,8 @@
     }
     try {
       DirectoryReader.open(fileDirName);
-      fail("opening DirectoryReader on empty directory failed to produce FileNotFoundException");
-    } catch (FileNotFoundException e) {
+      fail("opening DirectoryReader on empty directory failed to produce FileNotFoundException/NoSuchFileException");
+    } catch (FileNotFoundException | NoSuchFileException e) {
       // GOOD
     }
     rmDir(fileDirName);
@@ -470,8 +472,8 @@
     Directory dir = newFSDirectory(dirFile);
     try {
       DirectoryReader.open(dir);
-      fail("expected FileNotFoundException");
-    } catch (FileNotFoundException e) {
+      fail("expected FileNotFoundException/NoSuchFileException");
+    } catch (FileNotFoundException | NoSuchFileException e) {
       // expected
     }
 
@@ -480,8 +482,8 @@
     // Make sure we still get a CorruptIndexException (not NPE):
     try {
       DirectoryReader.open(dir);
-      fail("expected FileNotFoundException");
-    } catch (FileNotFoundException e) {
+      fail("expected FileNotFoundException/NoSuchFileException");
+    } catch (FileNotFoundException | NoSuchFileException e) {
       // expected
     }
     
@@ -763,7 +765,7 @@
             setMergePolicy(newLogMergePolicy(10))
     );
     Document doc = new Document();
-    doc.add(newStringField("number", "17", Field.Store.NO));
+    doc.add(new IntField("number", 17, Field.Store.NO));
     writer.addDocument(doc);
     writer.commit();
   
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java
index ff34c22..17ce72b 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java
@@ -87,23 +87,17 @@
               BytesRef scratch2 = new BytesRef();
               for(int iter=0;iter<iters;iter++) {
                 int docID = threadRandom.nextInt(numDocs);
-                switch(threadRandom.nextInt(6)) {
+                switch(threadRandom.nextInt(4)) {
                 case 0:
-                  assertEquals((byte) numbers.get(docID).longValue(), FieldCache.DEFAULT.getBytes(ar, "number", false).get(docID));
-                  break;
-                case 1:
-                  assertEquals((short) numbers.get(docID).longValue(), FieldCache.DEFAULT.getShorts(ar, "number", false).get(docID));
-                  break;
-                case 2:
                   assertEquals((int) numbers.get(docID).longValue(), FieldCache.DEFAULT.getInts(ar, "number", false).get(docID));
                   break;
-                case 3:
+                case 1:
                   assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getLongs(ar, "number", false).get(docID));
                   break;
-                case 4:
+                case 2:
                   assertEquals(Float.intBitsToFloat((int) numbers.get(docID).longValue()), FieldCache.DEFAULT.getFloats(ar, "number", false).get(docID), 0.0f);
                   break;
-                case 5:
+                case 3:
                   assertEquals(Double.longBitsToDouble(numbers.get(docID).longValue()), FieldCache.DEFAULT.getDoubles(ar, "number", false).get(docID), 0.0);
                   break;
                 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterStallControl.java b/lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterStallControl.java
index 4069223..319d7bc 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterStallControl.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterStallControl.java
@@ -339,6 +339,7 @@
       for (Thread thread : threads) {
         if (thread.getState() != state) {
           done = false;
+          break;
         }
       }
       if (done) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java b/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java
index c2ad1e9..052ad6f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java
@@ -61,7 +61,7 @@
     }
     dir = newDirectory();
     IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy());
-    ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(false);
+    conf.getMergePolicy().setNoCFSRatio(0.0);
     IndexWriter writer = new IndexWriter(dir, conf);
     writer.addDocument(testDoc);
     writer.close();
@@ -170,12 +170,11 @@
     @Override
     public void readInternal(byte[] b, int offset, int length) throws IOException {
       simOutage();
+      delegate.seek(getFilePointer());
       delegate.readBytes(b, offset, length);
     }
     @Override
     public void seekInternal(long pos) throws IOException {
-      //simOutage();
-      delegate.seek(pos);
     }
     @Override
     public long length() {
@@ -187,7 +186,14 @@
     }
     @Override
     public FaultyIndexInput clone() {
-      return new FaultyIndexInput(delegate.clone());
+      FaultyIndexInput i = new FaultyIndexInput(delegate.clone());
+      // seek the clone to our current position
+      try {
+        i.seek(getFilePointer());
+      } catch (IOException e) {
+        throw new RuntimeException();
+      }
+      return i;
     }
   }
 
@@ -197,8 +203,9 @@
 
     try {
       Directory dir = new FaultyFSDirectory(indexDir);
-      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( 
-          TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE));
+      IndexWriterConfig iwc = newIndexWriterConfig( 
+          TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE);
+      IndexWriter writer = new IndexWriter(dir, iwc);
       for(int i=0;i<2;i++)
         writer.addDocument(testDoc);
       writer.forceMerge(1);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
index 79a1d8b..a34d2f8 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
@@ -44,7 +44,7 @@
       ((MockDirectoryWrapper)dir).setPreventDoubleWrite(false);
     }
 
-    LogMergePolicy mergePolicy = newLogMergePolicy(true, 10);
+    MergePolicy mergePolicy = newLogMergePolicy(true, 10);
     
     // This test expects all of its segments to be in CFS
     mergePolicy.setNoCFSRatio(1.0);
@@ -54,14 +54,15 @@
         dir,
         newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).
             setMaxBufferedDocs(10).
-            setMergePolicy(mergePolicy)
+            setMergePolicy(mergePolicy).setUseCompoundFile(true)
     );
 
     int i;
     for(i=0;i<35;i++) {
       addDoc(writer, i);
     }
-    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false);
+    writer.getConfig().getMergePolicy().setNoCFSRatio(0.0);
+    writer.getConfig().setUseCompoundFile(false);
     for(;i<45;i++) {
       addDoc(writer, i);
     }
@@ -71,7 +72,7 @@
     writer = new IndexWriter(
         dir,
         newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).
-            setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES)
+            setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES).setUseCompoundFile(true)
     );
     Term searchTerm = new Term("id", "7");
     writer.deleteDocuments(searchTerm);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index 67defdd..8caafd6 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -704,7 +704,7 @@
       IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy()));
       //LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
       //lmp.setMergeFactor(2);
-      //lmp.setUseCompoundFile(false);
+      //lmp.setNoCFSRatio(0.0);
       Document doc = new Document();
       String contents = "aa bb cc dd ee ff gg hh ii jj kk";
 
@@ -732,7 +732,7 @@
       if (0 == i % 4) {
         writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())));
         //LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy();
-        //lmp2.setUseCompoundFile(false);
+        //lmp2.setNoCFSRatio(0.0);
         writer.forceMerge(1);
         writer.close();
       }
@@ -1339,7 +1339,7 @@
     for(int iter=0;iter<2;iter++) {
       Directory dir = newMockDirectory(); // relies on windows semantics
 
-      LogMergePolicy mergePolicy = newLogMergePolicy(true);
+      MergePolicy mergePolicy = newLogMergePolicy(true);
       
       // This test expects all of its segments to be in CFS
       mergePolicy.setNoCFSRatio(1.0);
@@ -1348,7 +1348,7 @@
       IndexWriter w = new IndexWriter(
           dir,
           newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).
-              setMergePolicy(mergePolicy)
+              setMergePolicy(mergePolicy).setUseCompoundFile(true)
       );
       Document doc = new Document();
       doc.add(newTextField("field", "go", Field.Store.NO));
@@ -1468,7 +1468,7 @@
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new MockAnalyzer(random()))
-                                         .setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy()));
+                                         .setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy()).setUseCompoundFile(false));
     String[] files = dir.listAll();
 
     // Creating over empty dir should not create any files,
@@ -1550,7 +1550,7 @@
 
     Directory dir = newDirectory();
     IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setRAMBufferSizeMB(0.01).setMergePolicy(newLogMergePolicy()));
-    ((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false);
+    indexWriter.getConfig().getMergePolicy().setNoCFSRatio(0.0);
 
     String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
     BIG=BIG+BIG+BIG+BIG;
@@ -2209,4 +2209,27 @@
       dir.close();
     }
   }
+
+  public void testHasUncommittedChanges() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())));
+    assertTrue(writer.hasUncommittedChanges());  // this will be true because a commit will create an empty index
+    Document doc = new Document();
+    doc.add(newTextField("myfield", "a b c", Field.Store.NO));
+    writer.addDocument(doc);
+    assertTrue(writer.hasUncommittedChanges());
+    writer.commit();
+    assertFalse(writer.hasUncommittedChanges());
+    writer.addDocument(doc);
+    assertTrue(writer.hasUncommittedChanges());
+    writer.close();
+
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())));
+    assertFalse(writer.hasUncommittedChanges());
+    writer.addDocument(doc);
+    assertTrue(writer.hasUncommittedChanges());
+
+    writer.close();
+    dir.close();
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
index 2f65d10..9bc0802 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
@@ -20,11 +20,14 @@
 import java.lang.reflect.Field;
 import java.lang.reflect.Method;
 import java.lang.reflect.Modifier;
+import java.util.Arrays;
 import java.util.HashSet;
 import java.util.Set;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.FieldInfosFormat;
+import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.index.DocumentsWriterPerThread.IndexingChain;
@@ -78,6 +81,7 @@
     assertEquals(IndexWriterConfig.DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB, conf.getRAMPerThreadHardLimitMB());
     assertEquals(Codec.getDefault(), conf.getCodec());
     assertEquals(InfoStream.getDefault(), conf.getInfoStream());
+    assertEquals(IndexWriterConfig.DEFAULT_USE_COMPOUND_FILE_SYSTEM, conf.getUseCompoundFile());
     // Sanity check - validate that all getters are covered.
     Set<String> getters = new HashSet<String>();
     getters.add("getAnalyzer");
@@ -104,6 +108,7 @@
     getters.add("getRAMPerThreadHardLimitMB");
     getters.add("getCodec");
     getters.add("getInfoStream");
+    getters.add("getUseCompoundFile");
     
     for (Method m : IndexWriterConfig.class.getDeclaredMethods()) {
       if (m.getDeclaringClass() == IndexWriterConfig.class && m.getName().startsWith("get")) {
@@ -188,6 +193,7 @@
     assertEquals(IndexWriterConfig.DISABLE_AUTO_FLUSH, IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS);
     assertEquals(16.0, IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB, 0.0);
     assertEquals(false, IndexWriterConfig.DEFAULT_READER_POOLING);
+    assertEquals(true, IndexWriterConfig.DEFAULT_USE_COMPOUND_FILE_SYSTEM);
     assertEquals(DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, IndexWriterConfig.DEFAULT_READER_TERMS_INDEX_DIVISOR);
   }
 
@@ -370,26 +376,36 @@
     Directory dir = newDirectory();
     IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
     iwc.setMergePolicy(newLogMergePolicy(true));
-
     // Start false:
-    ((LogMergePolicy) iwc.getMergePolicy()).setUseCompoundFile(false); 
+    iwc.setUseCompoundFile(false); 
+    iwc.getMergePolicy().setNoCFSRatio(0.0d);
     IndexWriter w = new IndexWriter(dir, iwc);
-
     // Change to true:
-    LogMergePolicy lmp = ((LogMergePolicy) w.getConfig().getMergePolicy());
-    lmp.setNoCFSRatio(1.0);
-    lmp.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY);
-    lmp.setUseCompoundFile(true);
+    w.getConfig().setUseCompoundFile(true);
 
     Document doc = new Document();
     doc.add(newStringField("field", "foo", Store.NO));
     w.addDocument(doc);
     w.commit();
-
-    for(String file : dir.listAll()) {
-      // frq file should be stuck into CFS
-      assertFalse(file.endsWith(".frq"));
-    }
+    assertTrue("Expected CFS after commit", w.newestSegment().info.getUseCompoundFile());
+    
+    doc.add(newStringField("field", "foo", Store.NO));
+    w.addDocument(doc);
+    w.commit();
+    w.forceMerge(1);
+    w.commit();
+   
+    // no compound files after merge
+    assertFalse("Expected Non-CFS after merge", w.newestSegment().info.getUseCompoundFile());
+    
+    MergePolicy lmp = w.getConfig().getMergePolicy();
+    lmp.setNoCFSRatio(1.0);
+    lmp.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY);
+    
+    w.addDocument(doc);
+    w.forceMerge(1);
+    w.commit();
+    assertTrue("Expected CFS after merge", w.newestSegment().info.getUseCompoundFile());
     w.close();
     dir.close();
   }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
index bcb154d..db94cb9 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
@@ -768,8 +768,8 @@
     IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
                                                                      TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(2).setReaderPooling(false).setMergePolicy(newLogMergePolicy()));
 
-    LogMergePolicy lmp = (LogMergePolicy) modifier.getConfig().getMergePolicy();
-    lmp.setUseCompoundFile(true);
+    MergePolicy lmp = modifier.getConfig().getMergePolicy();
+    lmp.setNoCFSRatio(1.0);
 
     dir.failOn(failure.reset());
 
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
index c7cd0f0..5c23b80 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
@@ -21,6 +21,7 @@
 import java.io.IOException;
 import java.io.Reader;
 import java.io.StringReader;
+import java.nio.file.NoSuchFileException;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
@@ -210,15 +211,10 @@
 
   ThreadLocal<Thread> doFail = new ThreadLocal<Thread>();
 
-  private class MockIndexWriter extends IndexWriter {
+  private class TestPoint1 implements RandomIndexWriter.TestPoint {
     Random r = new Random(random().nextLong());
-
-    public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
-      super(dir, conf);
-    }
-
     @Override
-    boolean testPoint(String name) {
+    public void apply(String name) {
       if (doFail.get() != null && !name.equals("startDoFlush") && r.nextInt(40) == 17) {
         if (VERBOSE) {
           System.out.println(Thread.currentThread().getName() + ": NOW FAIL: " + name);
@@ -226,7 +222,6 @@
         }
         throw new RuntimeException(Thread.currentThread().getName() + ": intentionally failing at " + name);
       }
-      return true;
     }
   }
 
@@ -238,8 +233,9 @@
 
     MockAnalyzer analyzer = new MockAnalyzer(random());
     analyzer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
-    MockIndexWriter writer  = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)
-        .setRAMBufferSizeMB(0.1).setMergeScheduler(new ConcurrentMergeScheduler()));
+    
+    IndexWriter writer  = RandomIndexWriter.mockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)
+        .setRAMBufferSizeMB(0.1).setMergeScheduler(new ConcurrentMergeScheduler()), new TestPoint1());
     ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
     //writer.setMaxBufferedDocs(10);
     if (VERBOSE) {
@@ -281,8 +277,8 @@
     Directory dir = newDirectory();
     MockAnalyzer analyzer = new MockAnalyzer(random());
     analyzer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
-    MockIndexWriter writer  = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)
-        .setRAMBufferSizeMB(0.2).setMergeScheduler(new ConcurrentMergeScheduler()));
+    IndexWriter writer  = RandomIndexWriter.mockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)
+        .setRAMBufferSizeMB(0.2).setMergeScheduler(new ConcurrentMergeScheduler()), new TestPoint1());
     ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
     //writer.setMaxBufferedDocs(10);
     writer.commit();
@@ -324,19 +320,13 @@
   }
 
   // LUCENE-1198
-  private static final class MockIndexWriter2 extends IndexWriter {
-
-    public MockIndexWriter2(Directory dir, IndexWriterConfig conf) throws IOException {
-      super(dir, conf);
-    }
-
+  private static final class TestPoint2 implements RandomIndexWriter.TestPoint {
     boolean doFail;
 
     @Override
-    boolean testPoint(String name) {
+    public void apply(String name) {
       if (doFail && name.equals("DocumentsWriterPerThread addDocument start"))
         throw new RuntimeException("intentionally failing");
-      return true;
     }
   }
 
@@ -367,11 +357,12 @@
 
   public void testExceptionDocumentsWriterInit() throws IOException {
     Directory dir = newDirectory();
-    MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())));
+    TestPoint2 testPoint = new TestPoint2();
+    IndexWriter w = RandomIndexWriter.mockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())), testPoint);
     Document doc = new Document();
     doc.add(newTextField("field", "a field", Field.Store.YES));
     w.addDocument(doc);
-    w.doFail = true;
+    testPoint.doFail = true;
     try {
       w.addDocument(doc);
       fail("did not hit exception");
@@ -385,7 +376,7 @@
   // LUCENE-1208
   public void testExceptionJustBeforeFlush() throws IOException {
     Directory dir = newDirectory();
-    MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(2));
+    IndexWriter w = RandomIndexWriter.mockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(2), new TestPoint1());
     Document doc = new Document();
     doc.add(newTextField("field", "a field", Field.Store.YES));
     w.addDocument(doc);
@@ -412,22 +403,15 @@
     dir.close();
   }
 
-  private static final class MockIndexWriter3 extends IndexWriter {
-
-    public MockIndexWriter3(Directory dir, IndexWriterConfig conf) throws IOException {
-      super(dir, conf);
-    }
-
+  private static final class TestPoint3 implements RandomIndexWriter.TestPoint {
     boolean doFail;
     boolean failed;
-
     @Override
-    boolean testPoint(String name) {
+    public void apply(String name) {
       if (doFail && name.equals("startMergeInit")) {
         failed = true;
         throw new RuntimeException("intentionally failing");
       }
-      return true;
     }
   }
 
@@ -441,8 +425,9 @@
     cms.setSuppressExceptions();
     conf.setMergeScheduler(cms);
     ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
-    MockIndexWriter3 w = new MockIndexWriter3(dir, conf);
-    w.doFail = true;
+    TestPoint3 testPoint = new TestPoint3();
+    IndexWriter w = RandomIndexWriter.mockIndexWriter(dir, conf, testPoint);
+    testPoint.doFail = true;
     Document doc = new Document();
     doc.add(newTextField("field", "a field", Field.Store.YES));
     for(int i=0;i<10;i++)
@@ -453,7 +438,7 @@
       }
 
     ((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).sync();
-    assertTrue(w.failed);
+    assertTrue(testPoint.failed);
     w.close();
     dir.close();
   }
@@ -555,10 +540,15 @@
         boolean sawAppend = false;
         boolean sawFlush = false;
         for (int i = 0; i < trace.length; i++) {
-          if (FreqProxTermsWriterPerField.class.getName().equals(trace[i].getClassName()) && "flush".equals(trace[i].getMethodName()))
+          if (sawAppend && sawFlush) {
+            break;
+          }
+          if (FreqProxTermsWriterPerField.class.getName().equals(trace[i].getClassName()) && "flush".equals(trace[i].getMethodName())) {
             sawAppend = true;
-          if ("flush".equals(trace[i].getMethodName()))
+          }
+          if ("flush".equals(trace[i].getMethodName())) {
             sawFlush = true;
+          }
         }
 
         if (sawAppend && sawFlush && count++ >= 30) {
@@ -892,12 +882,18 @@
       boolean isDelete = false;
       boolean isInGlobalFieldMap = false;
       for (int i = 0; i < trace.length; i++) {
-        if (SegmentInfos.class.getName().equals(trace[i].getClassName()) && stage.equals(trace[i].getMethodName()))
+        if (isCommit && isDelete && isInGlobalFieldMap) {
+          break;
+        }
+        if (SegmentInfos.class.getName().equals(trace[i].getClassName()) && stage.equals(trace[i].getMethodName())) {
           isCommit = true;
-        if (MockDirectoryWrapper.class.getName().equals(trace[i].getClassName()) && "deleteFile".equals(trace[i].getMethodName()))
+        }
+        if (MockDirectoryWrapper.class.getName().equals(trace[i].getClassName()) && "deleteFile".equals(trace[i].getMethodName())) {
           isDelete = true;
-        if (SegmentInfos.class.getName().equals(trace[i].getClassName()) && "writeGlobalFieldMap".equals(trace[i].getMethodName()))
+        }
+        if (SegmentInfos.class.getName().equals(trace[i].getClassName()) && "writeGlobalFieldMap".equals(trace[i].getMethodName())) {
           isInGlobalFieldMap = true;
+        }
           
       }
       if (isInGlobalFieldMap && dontFailDuringGlobalFieldMap) {
@@ -1014,29 +1010,26 @@
   }
 
   // LUCENE-1347
-  private static final class MockIndexWriter4 extends IndexWriter {
-
-    public MockIndexWriter4(Directory dir, IndexWriterConfig conf) throws IOException {
-      super(dir, conf);
-    }
+  private static final class TestPoint4 implements RandomIndexWriter.TestPoint {
 
     boolean doFail;
 
     @Override
-    boolean testPoint(String name) {
+    public void apply(String name) {
       if (doFail && name.equals("rollback before checkpoint"))
         throw new RuntimeException("intentionally failing");
-      return true;
     }
   }
 
   // LUCENE-1347
   public void testRollbackExceptionHang() throws Throwable {
     Directory dir = newDirectory();
-    MockIndexWriter4 w = new MockIndexWriter4(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())));
+    TestPoint4 testPoint = new TestPoint4();
+    IndexWriter w = RandomIndexWriter.mockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())), testPoint);
+    
 
     addDoc(w);
-    w.doFail = true;
+    testPoint.doFail = true;
     try {
       w.rollback();
       fail("did not hit intentional RuntimeException");
@@ -1044,7 +1037,7 @@
       // expected
     }
 
-    w.doFail = false;
+    testPoint.doFail = false;
     w.rollback();
     dir.close();
   }
@@ -1150,9 +1143,9 @@
     writer  = new IndexWriter(
                               dir,
                               newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).
-                              setMergePolicy(newLogMergePolicy(true))
+                              setMergePolicy(newLogMergePolicy(true)).setUseCompoundFile(true)
                               );
-    LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
+    MergePolicy lmp = writer.getConfig().getMergePolicy();
     // Force creation of CFS:
     lmp.setNoCFSRatio(1.0);
     lmp.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY);
@@ -1342,6 +1335,7 @@
       for (int i = 0; i < trace.length; i++) {
         if (TermVectorsConsumer.class.getName().equals(trace[i].getClassName()) && stage.equals(trace[i].getMethodName())) {
           fail = true;
+          break;
         }
       }
       
@@ -1678,7 +1672,7 @@
       } catch (CorruptIndexException ex) {
         // Exceptions are fine - we are running out of file handlers here
         continue;
-      } catch (FileNotFoundException ex) {
+      } catch (FileNotFoundException | NoSuchFileException ex) {
         continue;
       }
       failure.clearDoFail();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java
index aaa0fb1..e6467f1 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java
@@ -19,6 +19,7 @@
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.file.NoSuchFileException;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
@@ -37,10 +38,10 @@
     Directory dir = newFSDirectory(_TestUtil.getTempDir("testLockRelease"));
     try {
       new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND));
-    } catch (FileNotFoundException e) {
+    } catch (FileNotFoundException | NoSuchFileException e) {
       try {
         new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND));
-      } catch (FileNotFoundException e1) {
+      } catch (FileNotFoundException | NoSuchFileException e1) {
       }
     } finally {
       dir.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
index d014b64..a05580e 100755
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
@@ -271,7 +271,7 @@
     assertSetters(new LogDocMergePolicy());
   }
 
-  private void assertSetters(LogMergePolicy lmp) {
+  private void assertSetters(MergePolicy lmp) {
     lmp.setMaxCFSSegmentSizeMB(2.0);
     assertEquals(2.0, lmp.getMaxCFSSegmentSizeMB(), EPSILON);
     
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
index cc646b1..9e4e5a3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
@@ -358,6 +358,9 @@
         boolean sawClose = false;
         boolean sawMerge = false;
         for (int i = 0; i < trace.length; i++) {
+          if (sawAbortOrFlushDoc && sawMerge && sawClose) {
+            break;
+          }
           if ("abort".equals(trace[i].getMethodName()) ||
               "finishDocument".equals(trace[i].getMethodName())) {
             sawAbortOrFlushDoc = true;
@@ -370,8 +373,9 @@
           }
         }
         if (sawAbortOrFlushDoc && !sawClose && !sawMerge) {
-          if (onlyOnce)
+          if (onlyOnce) {
             doFail = false;
+          }
           //System.out.println(Thread.currentThread().getName() + ": now fail");
           //new Throwable().printStackTrace(System.out);
           throw new IOException("now failing on purpose");
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java b/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
index 371bb9f..97dc661 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
@@ -26,8 +26,9 @@
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.search.similarities.DefaultSimilarity;
+import org.apache.lucene.search.similarities.TFIDFSimilarity;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util._TestUtil;
 
@@ -97,21 +98,28 @@
   /**
    * Simple similarity that encodes maxTermFrequency directly as a byte
    */
-  class TestSimilarity extends DefaultSimilarity {
-
-    @Override
-    public byte encodeNormValue(float f) {
-      return (byte) f;
-    }
-    
-    @Override
-    public float decodeNormValue(byte b) {
-      return (float) b;
-    }
+  class TestSimilarity extends TFIDFSimilarity {
 
     @Override
     public float lengthNorm(FieldInvertState state) {
       return state.getMaxTermFrequency();
     }
+
+    @Override
+    public long encodeNormValue(float f) {
+      return (byte) f;
+    }
+
+    @Override
+    public float decodeNormValue(long norm) {
+      return norm;
+    }
+
+    @Override public float coord(int overlap, int maxOverlap) { return 0; }
+    @Override public float queryNorm(float sumOfSquaredWeights) { return 0; }
+    @Override public float tf(float freq) { return 0; }
+    @Override public float idf(long docFreq, long numDocs) { return 0; }
+    @Override public float sloppyFreq(int distance) { return 0; }
+    @Override public float scorePayload(int doc, int start, int end, BytesRef payload) { return 0; }
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNoMergePolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestNoMergePolicy.java
index 187409e..9293fc7 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestNoMergePolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestNoMergePolicy.java
@@ -65,8 +65,8 @@
       if (m.getName().equals("clone")) {
         continue;
       }
-      if (m.getDeclaringClass() != Object.class) {
-        assertTrue(m + " is not overridden !", m.getDeclaringClass() == NoMergePolicy.class);
+      if (m.getDeclaringClass() != Object.class && !Modifier.isFinal(m.getModifiers())) {
+        assertTrue(m + " is not overridden ! ", m.getDeclaringClass() == NoMergePolicy.class);
       }
     }
   }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestNorms.java
index bf7f13c..bb91737 100755
--- a/lucene/core/src/test/org/apache/lucene/index/TestNorms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestNorms.java
@@ -29,7 +29,9 @@
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper;
 import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.search.similarities.TFIDFSimilarity;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LineFileDocs;
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
@@ -45,21 +47,29 @@
 public class TestNorms extends LuceneTestCase {
   final String byteTestField = "normsTestByte";
 
-  class CustomNormEncodingSimilarity extends DefaultSimilarity {
+  class CustomNormEncodingSimilarity extends TFIDFSimilarity {
+
     @Override
-    public byte encodeNormValue(float f) {
-      return (byte) f;
+    public long encodeNormValue(float f) {
+      return (long) f;
     }
     
     @Override
-    public float decodeNormValue(byte b) {
-      return (float) b;
+    public float decodeNormValue(long norm) {
+      return norm;
     }
 
     @Override
     public float lengthNorm(FieldInvertState state) {
       return state.getLength();
     }
+
+    @Override public float coord(int overlap, int maxOverlap) { return 0; }
+    @Override public float queryNorm(float sumOfSquaredWeights) { return 0; }
+    @Override public float tf(float freq) { return 0; }
+    @Override public float idf(long docFreq, long numDocs) { return 0; }
+    @Override public float sloppyFreq(int distance) { return 0; }
+    @Override public float scorePayload(int doc, int start, int end, BytesRef payload) { return 0; }
   }
   
   // LUCENE-1260
@@ -179,12 +189,7 @@
     }
 
     @Override
-    public ExactSimScorer exactSimScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public SloppySimScorer sloppySimScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
+    public SimScorer simScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
       throw new UnsupportedOperationException();
     }
   } 
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java
index c5459a6..c400fc5 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java
@@ -193,7 +193,7 @@
             TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(3).setMergePolicy(newLogMergePolicy()));
     LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
     lmp.setMergeFactor(2);
-    lmp.setUseCompoundFile(false);
+    lmp.setNoCFSRatio(0.0);
     Document d = new Document();
 
     FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java
index 3c47770..aeee6e6 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java
@@ -194,7 +194,7 @@
                                                                    TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(3).setMergePolicy(newLogMergePolicy()));
     LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
     lmp.setMergeFactor(2);
-    lmp.setUseCompoundFile(false);
+    lmp.setNoCFSRatio(0.0);
     Document d = new Document();
 
     FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java b/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java
index 16eebb0..47abea2 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java
@@ -18,26 +18,35 @@
  */
 
 import java.io.IOException;
-import java.util.concurrent.ExecutionException;
 
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.TextField;
-import org.apache.lucene.search.*;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.CollectionStatistics;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TermStatistics;
 import org.apache.lucene.search.similarities.TFIDFSimilarity;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
 
 
 public class TestOmitTf extends LuceneTestCase {
   
   public static class SimpleSimilarity extends TFIDFSimilarity {
+    @Override public float decodeNormValue(long norm) { return norm; }
+    @Override public long encodeNormValue(float f) { return (long) f; }
     @Override
     public float queryNorm(float sumOfSquaredWeights) { return 1.0f; }
     @Override
@@ -215,7 +224,7 @@
                                                                    TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(3).setMergePolicy(newLogMergePolicy()));
     LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
     lmp.setMergeFactor(2);
-    lmp.setUseCompoundFile(false);
+    lmp.setNoCFSRatio(0.0);
     Document d = new Document();
         
     Field f1 = newField("f1", "This field has term freqs", omitType);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestRollingUpdates.java b/lucene/core/src/test/org/apache/lucene/index/TestRollingUpdates.java
index 6aa44dd..dfeb393 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestRollingUpdates.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestRollingUpdates.java
@@ -59,7 +59,7 @@
     // TODO: sometimes update ids not in order...
     for(int docIter=0;docIter<numUpdates;docIter++) {
       final Document doc = docs.nextDoc();
-      final String myID = ""+id;
+      final String myID = Integer.toString(id);
       if (id == SIZE-1) {
         id = 0;
       } else {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
index 906f80cf..bc78249 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
@@ -47,21 +47,16 @@
   static int maxBufferedDocs=3;
   static int seed=0;
 
-  public class MockIndexWriter extends IndexWriter {
-
-    public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException {
-      super(dir, conf);
-    }
+  public final class YieldTestPoint implements RandomIndexWriter.TestPoint {
 
     @Override
-    boolean testPoint(String name) {
+    public void apply(String name) {
       //      if (name.equals("startCommit")) {
       if (random().nextInt(4) == 2)
         Thread.yield();
-      return true;
     }
   }
-  
+//  
   public void testRandomIWReader() throws Throwable {
     Directory dir = newDirectory();
     
@@ -151,12 +146,12 @@
   
   public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
     Map<String,Document> docs = new HashMap<String,Document>();
-    IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig(
+    IndexWriter w = RandomIndexWriter.mockIndexWriter(dir, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE).setRAMBufferSizeMB(
-                                                                                                  0.1).setMaxBufferedDocs(maxBufferedDocs).setMergePolicy(newLogMergePolicy()));
+            0.1).setMaxBufferedDocs(maxBufferedDocs).setMergePolicy(newLogMergePolicy()), new YieldTestPoint());
     w.commit();
     LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy();
-    lmp.setUseCompoundFile(false);
+    lmp.setNoCFSRatio(0.0);
     lmp.setMergeFactor(mergeFactor);
     /***
         w.setMaxMergeDocs(Integer.MAX_VALUE);
@@ -202,12 +197,12 @@
   public Map<String,Document> indexRandom(int nThreads, int iterations, int range, Directory dir, int maxThreadStates,
                                           boolean doReaderPooling) throws IOException, InterruptedException {
     Map<String,Document> docs = new HashMap<String,Document>();
-    IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig(
+    IndexWriter w = RandomIndexWriter.mockIndexWriter(dir, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)
              .setRAMBufferSizeMB(0.1).setMaxBufferedDocs(maxBufferedDocs).setIndexerThreadPool(new ThreadAffinityDocumentsWriterThreadPool(maxThreadStates))
-             .setReaderPooling(doReaderPooling).setMergePolicy(newLogMergePolicy()));
+             .setReaderPooling(doReaderPooling).setMergePolicy(newLogMergePolicy()), new YieldTestPoint());
     LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy();
-    lmp.setUseCompoundFile(false);
+    lmp.setNoCFSRatio(0.0);
     lmp.setMergeFactor(mergeFactor);
 
     threads = new IndexingThread[nThreads];
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
index a57ace2..63018a1 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
@@ -93,7 +93,7 @@
         dir,
         newIndexWriterConfig(TEST_VERSION_CURRENT, new MyAnalyzer()).
             setMaxBufferedDocs(-1).
-            setMergePolicy(newLogMergePolicy(false, 10))
+            setMergePolicy(newLogMergePolicy(false, 10)).setUseCompoundFile(false)
     );
 
     Document doc = new Document();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTryDelete.java b/lucene/core/src/test/org/apache/lucene/index/TestTryDelete.java
index 64fae63..c0d9b60 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTryDelete.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTryDelete.java
@@ -40,7 +40,7 @@
   private static IndexWriter getWriter (Directory directory)
     throws IOException
   {
-    LogMergePolicy policy = new LogByteSizeMergePolicy();
+    MergePolicy policy = new LogByteSizeMergePolicy();
     IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT,
                                                    new MockAnalyzer(random()));
     conf.setMergePolicy(policy);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java b/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java
index 5e1da90..3afa9a2 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java
@@ -110,12 +110,7 @@
     }
 
     @Override
-    public ExactSimScorer exactSimScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public SloppySimScorer sloppySimScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
+    public SimScorer simScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
       throw new UnsupportedOperationException();
     }
   }
diff --git a/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java b/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
index 72da35b..861e541 100644
--- a/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
@@ -22,7 +22,12 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoubleField;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.LongField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -115,9 +120,17 @@
     
     Document doc = new Document();
     Field idField = newStringField(random, "id", "", Field.Store.YES);
+    Field intIdField = new IntField("id_int", 0, Store.YES);
+    Field floatIdField = new FloatField("id_float", 0, Store.YES);
+    Field longIdField = new LongField("id_long", 0, Store.YES);
+    Field doubleIdField = new DoubleField("id_double", 0, Store.YES);
     Field randField = newStringField(random, "rand", "", Field.Store.YES);
     Field bodyField = newStringField(random, "body", "", Field.Store.NO);
     doc.add(idField);
+    doc.add(intIdField);
+    doc.add(floatIdField);
+    doc.add(longIdField);
+    doc.add(doubleIdField);
     doc.add(randField);
     doc.add(bodyField);
 
@@ -133,6 +146,10 @@
 
       for (int d = minId; d <= maxId; d++) {
         idField.setStringValue(pad(d));
+        intIdField.setIntValue(d);
+        floatIdField.setFloatValue(d);
+        longIdField.setLongValue(d);
+        doubleIdField.setDoubleValue(d);
         int r = index.allowNegativeRandomInts ? random.nextInt() : random
           .nextInt(Integer.MAX_VALUE);
         if (index.maxR < r) {
diff --git a/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java b/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java
index 7171cb1..b4bf0a4 100644
--- a/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java
+++ b/lucene/core/src/test/org/apache/lucene/search/JustCompileSearch.java
@@ -270,12 +270,7 @@
     }
 
     @Override
-    public ExactSimScorer exactSimScorer(SimWeight stats, AtomicReaderContext context) {
-      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
-    }
-
-    @Override
-    public SloppySimScorer sloppySimScorer(SimWeight stats, AtomicReaderContext context) {
+    public SimScorer simScorer(SimWeight stats, AtomicReaderContext context) {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestConjunctions.java b/lucene/core/src/test/org/apache/lucene/search/TestConjunctions.java
index a326e78..1c9497f 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestConjunctions.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestConjunctions.java
@@ -109,18 +109,8 @@
     }
 
     @Override
-    public ExactSimScorer exactSimScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
-      return new ExactSimScorer() {
-        @Override
-        public float score(int doc, int freq) {
-          return freq;
-        }
-      };
-    }
-
-    @Override
-    public SloppySimScorer sloppySimScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
-      return new SloppySimScorer() {
+    public SimScorer simScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
+      return new SimScorer() {
         @Override
         public float score(int doc, float freq) {
           return freq;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java
index 5df6a43..e3a5369 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java
@@ -156,34 +156,11 @@
     }
 
     @Override
-    public ExactSimScorer exactSimScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
-      final ExactSimScorer sub = sim.exactSimScorer(stats, context);
-      final FieldCache.Floats values = FieldCache.DEFAULT.getFloats(context.reader(), boostField, false);
-
-      return new ExactSimScorer() {
-        @Override
-        public float score(int doc, int freq) {
-          return values.get(doc) * sub.score(doc, freq);
-        }
-
-        @Override
-        public Explanation explain(int doc, Explanation freq) {
-          Explanation boostExplanation = new Explanation(values.get(doc), "indexDocValue(" + boostField + ")");
-          Explanation simExplanation = sub.explain(doc, freq);
-          Explanation expl = new Explanation(boostExplanation.getValue() * simExplanation.getValue(), "product of:");
-          expl.addDetail(boostExplanation);
-          expl.addDetail(simExplanation);
-          return expl;
-        }
-      };
-    }
-
-    @Override
-    public SloppySimScorer sloppySimScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
-      final SloppySimScorer sub = sim.sloppySimScorer(stats, context);
+    public SimScorer simScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
+      final SimScorer sub = sim.simScorer(stats, context);
       final FieldCache.Floats values = FieldCache.DEFAULT.getFloats(context.reader(), boostField, false);
       
-      return new SloppySimScorer() {
+      return new SimScorer() {
         @Override
         public float score(int doc, float freq) {
           return values.get(doc) * sub.score(doc, freq);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java
index c2ed9a0..bbfa622 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java
@@ -23,33 +23,45 @@
 import java.util.Arrays;
 import java.util.LinkedHashSet;
 import java.util.List;
-import java.util.Locale;
 import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoubleField;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.FloatField;
 import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.LongField;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.*;
-import org.apache.lucene.search.FieldCache.Bytes;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocTermOrds;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.search.FieldCache.Doubles;
 import org.apache.lucene.search.FieldCache.Floats;
 import org.apache.lucene.search.FieldCache.Ints;
 import org.apache.lucene.search.FieldCache.Longs;
-import org.apache.lucene.search.FieldCache.Shorts;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
 import org.apache.lucene.util._TestUtil;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -70,8 +82,6 @@
     RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
     long theLong = Long.MAX_VALUE;
     double theDouble = Double.MAX_VALUE;
-    byte theByte = Byte.MAX_VALUE;
-    short theShort = Short.MAX_VALUE;
     int theInt = Integer.MAX_VALUE;
     float theFloat = Float.MAX_VALUE;
     unicodeStrings = new String[NUM_DOCS];
@@ -81,14 +91,12 @@
     }
     for (int i = 0; i < NUM_DOCS; i++){
       Document doc = new Document();
-      doc.add(newStringField("theLong", String.valueOf(theLong--), Field.Store.NO));
-      doc.add(newStringField("theDouble", String.valueOf(theDouble--), Field.Store.NO));
-      doc.add(newStringField("theByte", String.valueOf(theByte--), Field.Store.NO));
-      doc.add(newStringField("theShort", String.valueOf(theShort--), Field.Store.NO));
-      doc.add(newStringField("theInt", String.valueOf(theInt--), Field.Store.NO));
-      doc.add(newStringField("theFloat", String.valueOf(theFloat--), Field.Store.NO));
+      doc.add(new LongField("theLong", theLong--, Field.Store.NO));
+      doc.add(new DoubleField("theDouble", theDouble--, Field.Store.NO));
+      doc.add(new IntField("theInt", theInt--, Field.Store.NO));
+      doc.add(new FloatField("theFloat", theFloat--, Field.Store.NO));
       if (i%2 == 0) {
-        doc.add(newStringField("sparse", String.valueOf(i), Field.Store.NO));
+        doc.add(new IntField("sparse", i, Field.Store.NO));
       }
 
       if (i%2 == 0) {
@@ -133,7 +141,16 @@
       ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
       cache.setInfoStream(new PrintStream(bos, false, "UTF-8"));
       cache.getDoubles(reader, "theDouble", false);
-      cache.getFloats(reader, "theDouble", false);
+      cache.getFloats(reader, "theDouble", new FieldCache.FloatParser() {
+        @Override
+        public TermsEnum termsEnum(Terms terms) throws IOException {
+          return NumericUtils.filterPrefixCodedLongs(terms.iterator(null));
+        }
+        @Override
+        public float parseFloat(BytesRef term) {
+          return NumericUtils.sortableIntToFloat((int) NumericUtils.prefixCodedToLong(term));
+        }
+      }, false);
       assertTrue(bos.toString("UTF-8").indexOf("WARNING") != -1);
     } finally {
       FieldCache.DEFAULT.purgeAllCaches();
@@ -144,42 +161,28 @@
     FieldCache cache = FieldCache.DEFAULT;
     FieldCache.Doubles doubles = cache.getDoubles(reader, "theDouble", random().nextBoolean());
     assertSame("Second request to cache return same array", doubles, cache.getDoubles(reader, "theDouble", random().nextBoolean()));
-    assertSame("Second request with explicit parser return same array", doubles, cache.getDoubles(reader, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER, random().nextBoolean()));
+    assertSame("Second request with explicit parser return same array", doubles, cache.getDoubles(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
       assertTrue(doubles.get(i) + " does not equal: " + (Double.MAX_VALUE - i), doubles.get(i) == (Double.MAX_VALUE - i));
     }
     
     FieldCache.Longs longs = cache.getLongs(reader, "theLong", random().nextBoolean());
     assertSame("Second request to cache return same array", longs, cache.getLongs(reader, "theLong", random().nextBoolean()));
-    assertSame("Second request with explicit parser return same array", longs, cache.getLongs(reader, "theLong", FieldCache.DEFAULT_LONG_PARSER, random().nextBoolean()));
+    assertSame("Second request with explicit parser return same array", longs, cache.getLongs(reader, "theLong", FieldCache.NUMERIC_UTILS_LONG_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
       assertTrue(longs.get(i) + " does not equal: " + (Long.MAX_VALUE - i) + " i=" + i, longs.get(i) == (Long.MAX_VALUE - i));
     }
-    
-    FieldCache.Bytes bytes = cache.getBytes(reader, "theByte", random().nextBoolean());
-    assertSame("Second request to cache return same array", bytes, cache.getBytes(reader, "theByte", random().nextBoolean()));
-    assertSame("Second request with explicit parser return same array", bytes, cache.getBytes(reader, "theByte", FieldCache.DEFAULT_BYTE_PARSER, random().nextBoolean()));
-    for (int i = 0; i < NUM_DOCS; i++) {
-      assertTrue(bytes.get(i) + " does not equal: " + (Byte.MAX_VALUE - i), bytes.get(i) == (byte) (Byte.MAX_VALUE - i));
-    }
-    
-    FieldCache.Shorts shorts = cache.getShorts(reader, "theShort", random().nextBoolean());
-    assertSame("Second request to cache return same array", shorts, cache.getShorts(reader, "theShort", random().nextBoolean()));
-    assertSame("Second request with explicit parser return same array", shorts, cache.getShorts(reader, "theShort", FieldCache.DEFAULT_SHORT_PARSER, random().nextBoolean()));
-    for (int i = 0; i < NUM_DOCS; i++) {
-      assertTrue(shorts.get(i) + " does not equal: " + (Short.MAX_VALUE - i), shorts.get(i) == (short) (Short.MAX_VALUE - i));
-    }
-    
+
     FieldCache.Ints ints = cache.getInts(reader, "theInt", random().nextBoolean());
     assertSame("Second request to cache return same array", ints, cache.getInts(reader, "theInt", random().nextBoolean()));
-    assertSame("Second request with explicit parser return same array", ints, cache.getInts(reader, "theInt", FieldCache.DEFAULT_INT_PARSER, random().nextBoolean()));
+    assertSame("Second request with explicit parser return same array", ints, cache.getInts(reader, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
       assertTrue(ints.get(i) + " does not equal: " + (Integer.MAX_VALUE - i), ints.get(i) == (Integer.MAX_VALUE - i));
     }
     
     FieldCache.Floats floats = cache.getFloats(reader, "theFloat", random().nextBoolean());
     assertSame("Second request to cache return same array", floats, cache.getFloats(reader, "theFloat", random().nextBoolean()));
-    assertSame("Second request with explicit parser return same array", floats, cache.getFloats(reader, "theFloat", FieldCache.DEFAULT_FLOAT_PARSER, random().nextBoolean()));
+    assertSame("Second request with explicit parser return same array", floats, cache.getFloats(reader, "theFloat", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, random().nextBoolean()));
     for (int i = 0; i < NUM_DOCS; i++) {
       assertTrue(floats.get(i) + " does not equal: " + (Float.MAX_VALUE - i), floats.get(i) == (Float.MAX_VALUE - i));
     }
@@ -587,12 +590,6 @@
     cache.purgeAllCaches();
     assertEquals(0, cache.getCacheEntries().length);
     
-    Bytes bytes = cache.getBytes(ar, "bogusbytes", true);
-    assertEquals(0, bytes.get(0));
-
-    Shorts shorts = cache.getShorts(ar, "bogusshorts", true);
-    assertEquals(0, shorts.get(0));
-    
     Ints ints = cache.getInts(ar, "bogusints", true);
     assertEquals(0, ints.get(0));
     
@@ -652,12 +649,6 @@
     cache.purgeAllCaches();
     assertEquals(0, cache.getCacheEntries().length);
     
-    Bytes bytes = cache.getBytes(ar, "bogusbytes", true);
-    assertEquals(0, bytes.get(0));
-
-    Shorts shorts = cache.getShorts(ar, "bogusshorts", true);
-    assertEquals(0, shorts.get(0));
-    
     Ints ints = cache.getInts(ar, "bogusints", true);
     assertEquals(0, ints.get(0));
     
@@ -692,4 +683,97 @@
     ir.close();
     dir.close();
   }
+
+  // Make sure that the use of GrowableWriter doesn't prevent from using the full long range
+  public void testLongFieldCache() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+    cfg.setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
+    Document doc = new Document();
+    LongField field = new LongField("f", 0L, Store.YES);
+    doc.add(field);
+    final long[] values = new long[_TestUtil.nextInt(random(), 1, 10)];
+    for (int i = 0; i < values.length; ++i) {
+      final long v;
+      switch (random().nextInt(10)) {
+        case 0:
+          v = Long.MIN_VALUE;
+          break;
+        case 1:
+          v = 0;
+          break;
+        case 2:
+          v = Long.MAX_VALUE;
+          break;
+        default:
+          v = _TestUtil.nextLong(random(), -10, 10);
+          break;
+      }
+      values[i] = v;
+      if (v == 0 && random().nextBoolean()) {
+        // missing
+        iw.addDocument(new Document());
+      } else {
+        field.setLongValue(v);
+        iw.addDocument(doc);
+      }
+    }
+    iw.forceMerge(1);
+    final DirectoryReader reader = iw.getReader();
+    final FieldCache.Longs longs = FieldCache.DEFAULT.getLongs(getOnlySegmentReader(reader), "f", false);
+    for (int i = 0; i < values.length; ++i) {
+      assertEquals(values[i], longs.get(i));
+    }
+    reader.close();
+    iw.close();
+    dir.close();
+  }
+
+  // Make sure that the use of GrowableWriter doesn't prevent from using the full int range
+  public void testIntFieldCache() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+    cfg.setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
+    Document doc = new Document();
+    IntField field = new IntField("f", 0, Store.YES);
+    doc.add(field);
+    final int[] values = new int[_TestUtil.nextInt(random(), 1, 10)];
+    for (int i = 0; i < values.length; ++i) {
+      final int v;
+      switch (random().nextInt(10)) {
+        case 0:
+          v = Integer.MIN_VALUE;
+          break;
+        case 1:
+          v = 0;
+          break;
+        case 2:
+          v = Integer.MAX_VALUE;
+          break;
+        default:
+          v = _TestUtil.nextInt(random(), -10, 10);
+          break;
+      }
+      values[i] = v;
+      if (v == 0 && random().nextBoolean()) {
+        // missing
+        iw.addDocument(new Document());
+      } else {
+        field.setIntValue(v);
+        iw.addDocument(doc);
+      }
+    }
+    iw.forceMerge(1);
+    final DirectoryReader reader = iw.getReader();
+    final FieldCache.Ints ints = FieldCache.DEFAULT.getInts(getOnlySegmentReader(reader), "f", false);
+    for (int i = 0; i < values.length; ++i) {
+      assertEquals(values[i], ints.get(i));
+    }
+    reader.close();
+    iw.close();
+    dir.close();
+  }
+
 }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
index b54983f..bc25b58 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
@@ -19,15 +19,17 @@
 
 import java.io.IOException;
 
-
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.NumericUtils;
 import org.junit.Test;
 
 /**
@@ -187,98 +189,6 @@
     assertEquals("max,nul,T,T", 1, result.length);
   }
   
-  // byte-ranges cannot be tested, because all ranges are too big for bytes, need an extra range for that
-
-  @Test
-  public void testFieldCacheRangeFilterShorts() throws IOException {
-
-    IndexReader reader = signedIndexReader;
-    IndexSearcher search = newSearcher(reader);
-
-    int numDocs = reader.numDocs();
-    int medId = ((maxId - minId) / 2);
-    Short minIdO = Short.valueOf((short) minId);
-    Short maxIdO = Short.valueOf((short) maxId);
-    Short medIdO = Short.valueOf((short) medId);
-        
-    assertEquals("num of docs", numDocs, 1+ maxId - minId);
-        
-    ScoreDoc[] result;
-    Query q = new TermQuery(new Term("body","body"));
-
-    // test id, bounded on both ends
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
-    assertEquals("find all", numDocs, result.length);
-
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs;
-    assertEquals("all but last", numDocs-1, result.length);
-
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,F,T), numDocs).scoreDocs;
-    assertEquals("all but first", numDocs-1, result.length);
-        
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,F,F), numDocs).scoreDocs;
-    assertEquals("all but ends", numDocs-2, result.length);
-    
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",medIdO,maxIdO,T,T), numDocs).scoreDocs;
-    assertEquals("med and up", 1+ maxId-medId, result.length);
-        
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,medIdO,T,T), numDocs).scoreDocs;
-    assertEquals("up to med", 1+ medId-minId, result.length);
-    
-    // unbounded id
-
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",null,null,T,T), numDocs).scoreDocs;
-    assertEquals("find all", numDocs, result.length);
-
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,null,T,F), numDocs).scoreDocs;
-    assertEquals("min and up", numDocs, result.length);
-
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",null,maxIdO,F,T), numDocs).scoreDocs;
-    assertEquals("max and down", numDocs, result.length);
-
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,null,F,F), numDocs).scoreDocs;
-    assertEquals("not min, but up", numDocs-1, result.length);
-        
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",null,maxIdO,F,F), numDocs).scoreDocs;
-    assertEquals("not max, but down", numDocs-1, result.length);
-        
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",medIdO,maxIdO,T,F), numDocs).scoreDocs;
-    assertEquals("med and up, not max", maxId-medId, result.length);
-        
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,medIdO,F,T), numDocs).scoreDocs;
-    assertEquals("not min, up to med", medId-minId, result.length);
-
-    // very small sets
-
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,minIdO,F,F), numDocs).scoreDocs;
-    assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",medIdO,medIdO,F,F), numDocs).scoreDocs;
-    assertEquals("med,med,F,F", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",maxIdO,maxIdO,F,F), numDocs).scoreDocs;
-    assertEquals("max,max,F,F", 0, result.length);
-                     
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,minIdO,T,T), numDocs).scoreDocs;
-    assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",null,minIdO,F,T), numDocs).scoreDocs;
-    assertEquals("nul,min,F,T", 1, result.length);
-
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",maxIdO,maxIdO,T,T), numDocs).scoreDocs;
-    assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",maxIdO,null,T,F), numDocs).scoreDocs;
-    assertEquals("max,nul,T,T", 1, result.length);
-
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",medIdO,medIdO,T,T), numDocs).scoreDocs;
-    assertEquals("med,med,T,T", 1, result.length);
-    
-    // special cases
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",Short.valueOf(Short.MAX_VALUE),null,F,F), numDocs).scoreDocs;
-    assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",null,Short.valueOf(Short.MIN_VALUE),F,F), numDocs).scoreDocs;
-    assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newShortRange("id",maxIdO,minIdO,T,T), numDocs).scoreDocs;
-    assertEquals("inverse range", 0, result.length);
-  }
-  
   @Test
   public void testFieldCacheRangeFilterInts() throws IOException {
 
@@ -298,75 +208,75 @@
 
     // test id, bounded on both ends
         
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,maxIdO,T,F), numDocs).scoreDocs;
     assertEquals("all but last", numDocs-1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,maxIdO,F,T), numDocs).scoreDocs;
     assertEquals("all but first", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,maxIdO,F,F), numDocs).scoreDocs;
     assertEquals("all but ends", numDocs-2, result.length);
     
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",medIdO,maxIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",medIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertEquals("med and up", 1+ maxId-medId, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,medIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,medIdO,T,T), numDocs).scoreDocs;
     assertEquals("up to med", 1+ medId-minId, result.length);
     
     // unbounded id
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",null,null,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",null,null,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,null,T,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,null,T,F), numDocs).scoreDocs;
     assertEquals("min and up", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",null,maxIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",null,maxIdO,F,T), numDocs).scoreDocs;
     assertEquals("max and down", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,null,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,null,F,F), numDocs).scoreDocs;
     assertEquals("not min, but up", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",null,maxIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",null,maxIdO,F,F), numDocs).scoreDocs;
     assertEquals("not max, but down", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",medIdO,maxIdO,T,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",medIdO,maxIdO,T,F), numDocs).scoreDocs;
     assertEquals("med and up, not max", maxId-medId, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,medIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,medIdO,F,T), numDocs).scoreDocs;
     assertEquals("not min, up to med", medId-minId, result.length);
 
     // very small sets
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,minIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,minIdO,F,F), numDocs).scoreDocs;
     assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",medIdO,medIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",medIdO,medIdO,F,F), numDocs).scoreDocs;
     assertEquals("med,med,F,F", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",maxIdO,maxIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",maxIdO,maxIdO,F,F), numDocs).scoreDocs;
     assertEquals("max,max,F,F", 0, result.length);
                      
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,minIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",minIdO,minIdO,T,T), numDocs).scoreDocs;
     assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",null,minIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",null,minIdO,F,T), numDocs).scoreDocs;
     assertEquals("nul,min,F,T", 1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",maxIdO,maxIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",maxIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",maxIdO,null,T,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",maxIdO,null,T,F), numDocs).scoreDocs;
     assertEquals("max,nul,T,T", 1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",medIdO,medIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",medIdO,medIdO,T,T), numDocs).scoreDocs;
     assertEquals("med,med,T,T", 1, result.length);
     
     // special cases
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",Integer.valueOf(Integer.MAX_VALUE),null,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",Integer.valueOf(Integer.MAX_VALUE),null,F,F), numDocs).scoreDocs;
     assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",null,Integer.valueOf(Integer.MIN_VALUE),F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",null,Integer.valueOf(Integer.MIN_VALUE),F,F), numDocs).scoreDocs;
     assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newIntRange("id",maxIdO,minIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",maxIdO,minIdO,T,T), numDocs).scoreDocs;
     assertEquals("inverse range", 0, result.length);
   }
   
@@ -389,75 +299,75 @@
 
     // test id, bounded on both ends
         
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,maxIdO,T,F), numDocs).scoreDocs;
     assertEquals("all but last", numDocs-1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,maxIdO,F,T), numDocs).scoreDocs;
     assertEquals("all but first", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,maxIdO,F,F), numDocs).scoreDocs;
     assertEquals("all but ends", numDocs-2, result.length);
     
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",medIdO,maxIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",medIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertEquals("med and up", 1+ maxId-medId, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,medIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,medIdO,T,T), numDocs).scoreDocs;
     assertEquals("up to med", 1+ medId-minId, result.length);
     
     // unbounded id
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",null,null,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",null,null,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,null,T,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,null,T,F), numDocs).scoreDocs;
     assertEquals("min and up", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",null,maxIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",null,maxIdO,F,T), numDocs).scoreDocs;
     assertEquals("max and down", numDocs, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,null,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,null,F,F), numDocs).scoreDocs;
     assertEquals("not min, but up", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",null,maxIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",null,maxIdO,F,F), numDocs).scoreDocs;
     assertEquals("not max, but down", numDocs-1, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",medIdO,maxIdO,T,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",medIdO,maxIdO,T,F), numDocs).scoreDocs;
     assertEquals("med and up, not max", maxId-medId, result.length);
         
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,medIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,medIdO,F,T), numDocs).scoreDocs;
     assertEquals("not min, up to med", medId-minId, result.length);
 
     // very small sets
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,minIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,minIdO,F,F), numDocs).scoreDocs;
     assertEquals("min,min,F,F", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",medIdO,medIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",medIdO,medIdO,F,F), numDocs).scoreDocs;
     assertEquals("med,med,F,F", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",maxIdO,maxIdO,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",maxIdO,maxIdO,F,F), numDocs).scoreDocs;
     assertEquals("max,max,F,F", 0, result.length);
                      
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,minIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",minIdO,minIdO,T,T), numDocs).scoreDocs;
     assertEquals("min,min,T,T", 1, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",null,minIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",null,minIdO,F,T), numDocs).scoreDocs;
     assertEquals("nul,min,F,T", 1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",maxIdO,maxIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",maxIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertEquals("max,max,T,T", 1, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",maxIdO,null,T,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",maxIdO,null,T,F), numDocs).scoreDocs;
     assertEquals("max,nul,T,T", 1, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",medIdO,medIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",medIdO,medIdO,T,T), numDocs).scoreDocs;
     assertEquals("med,med,T,T", 1, result.length);
     
     // special cases
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",Long.valueOf(Long.MAX_VALUE),null,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",Long.valueOf(Long.MAX_VALUE),null,F,F), numDocs).scoreDocs;
     assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",null,Long.valueOf(Long.MIN_VALUE),F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",null,Long.valueOf(Long.MIN_VALUE),F,F), numDocs).scoreDocs;
     assertEquals("overflow special case", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newLongRange("id",maxIdO,minIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newLongRange("id_long",maxIdO,minIdO,T,T), numDocs).scoreDocs;
     assertEquals("inverse range", 0, result.length);
   }
   
@@ -476,19 +386,19 @@
     ScoreDoc[] result;
     Query q = new TermQuery(new Term("body","body"));
 
-    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id",minIdO,medIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id_float",minIdO,medIdO,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs/2, result.length);
     int count = 0;
-    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id",null,medIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id_float",null,medIdO,F,T), numDocs).scoreDocs;
     count += result.length;
-    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id",medIdO,null,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id_float",medIdO,null,F,F), numDocs).scoreDocs;
     count += result.length;
     assertEquals("sum of two concenatted ranges", numDocs, count);
-    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id",null,null,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id_float",null,null,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id",Float.valueOf(Float.POSITIVE_INFINITY),null,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id_float",Float.valueOf(Float.POSITIVE_INFINITY),null,F,F), numDocs).scoreDocs;
     assertEquals("infinity special case", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id",null,Float.valueOf(Float.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newFloatRange("id_float",null,Float.valueOf(Float.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs;
     assertEquals("infinity special case", 0, result.length);
   }
   
@@ -505,19 +415,19 @@
     ScoreDoc[] result;
     Query q = new TermQuery(new Term("body","body"));
 
-    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id",minIdO,medIdO,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id_double",minIdO,medIdO,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs/2, result.length);
     int count = 0;
-    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id",null,medIdO,F,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id_double",null,medIdO,F,T), numDocs).scoreDocs;
     count += result.length;
-    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id",medIdO,null,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id_double",medIdO,null,F,F), numDocs).scoreDocs;
     count += result.length;
     assertEquals("sum of two concenatted ranges", numDocs, count);
-    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id",null,null,T,T), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id_double",null,null,T,T), numDocs).scoreDocs;
     assertEquals("find all", numDocs, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id",Double.valueOf(Double.POSITIVE_INFINITY),null,F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id_double",Double.valueOf(Double.POSITIVE_INFINITY),null,F,F), numDocs).scoreDocs;
     assertEquals("infinity special case", 0, result.length);
-    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id",null, Double.valueOf(Double.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id_double",null, Double.valueOf(Double.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs;
     assertEquals("infinity special case", 0, result.length);
   }
   
@@ -529,13 +439,15 @@
 
     for (int d = -20; d <= 20; d++) {
       Document doc = new Document();
-      doc.add(newStringField("id", Integer.toString(d), Field.Store.NO));
+      doc.add(new IntField("id_int", d, Field.Store.NO));
       doc.add(newStringField("body", "body", Field.Store.NO));
       writer.addDocument(doc);
     }
     
     writer.forceMerge(1);
-    writer.deleteDocuments(new Term("id","0"));
+    BytesRef term0 = new BytesRef();
+    NumericUtils.intToPrefixCoded(0, 0, term0);
+    writer.deleteDocuments(new Term("id_int", term0));
     writer.close();
 
     IndexReader reader = DirectoryReader.open(dir);
@@ -545,19 +457,19 @@
     ScoreDoc[] result;
     Query q = new TermQuery(new Term("body","body"));
 
-    result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",-20,20,T,T), 100).scoreDocs;
     assertEquals("find all", 40, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 0),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",0,20,T,T), 100).scoreDocs;
     assertEquals("find all", 20, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 0),T,T), 100).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",-20,0,T,T), 100).scoreDocs;
     assertEquals("find all", 20, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 10),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",10,20,T,T), 100).scoreDocs;
     assertEquals("find all", 11, result.length);
 
-    result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) -10),T,T), 100).scoreDocs;
+    result = search.search(q,FieldCacheRangeFilter.newIntRange("id_int",-20,-10,T,T), 100).scoreDocs;
     assertEquals("find all", 11, result.length);
     reader.close();
     dir.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
index 2d294c7..57db46a 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
@@ -37,7 +37,7 @@
 import org.apache.lucene.index.TermContext;
 import org.apache.lucene.search.BooleanQuery.BooleanWeight;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
-import org.apache.lucene.search.similarities.Similarity.ExactSimScorer;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.search.similarities.Similarity.SimWeight;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -274,7 +274,7 @@
     final int maxDoc;
 
     final Set<Long> ords = new HashSet<Long>();
-    final ExactSimScorer[] sims;
+    final SimScorer[] sims;
     final int minNrShouldMatch;
     
     double score = Float.NaN;
@@ -285,7 +285,7 @@
       this.maxDoc = reader.maxDoc();
       BooleanQuery bq = (BooleanQuery) weight.getQuery();
       this.minNrShouldMatch = bq.getMinimumNumberShouldMatch();
-      this.sims = new ExactSimScorer[(int)dv.getValueCount()];
+      this.sims = new SimScorer[(int)dv.getValueCount()];
       for (BooleanClause clause : bq.getClauses()) {
         assert !clause.isProhibited();
         assert !clause.isRequired();
@@ -300,7 +300,7 @@
                         searcher.termStatistics(term, context));
           w.getValueForNormalization(); // ignored
           w.normalize(1F, 1F);
-          sims[(int)ord] = weight.similarity.exactSimScorer(w, reader.getContext());
+          sims[(int)ord] = weight.similarity.simScorer(w, reader.getContext());
         }
       }
     }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
index a2a7643..e9d5338 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
@@ -41,7 +41,7 @@
     super.setUp();
     directory = newDirectory();
     IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
-    //writer.setUseCompoundFile(false);
+    //writer.setNoCFSRatio(0.0);
     //writer.infoStream = System.out;
     FieldType customType = new FieldType(TextField.TYPE_STORED);
     customType.setTokenized(false);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java b/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java
index 0bd23cb..7ae1f294 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java
@@ -111,8 +111,6 @@
     assertQuery(query, filter, Sort.INDEXORDER);
     for(int rev=0;rev<2;rev++) {
       boolean reversed = rev == 1;
-      assertQuery(query, filter, new Sort(new SortField[] {new SortField("byte", SortField.Type.BYTE, reversed)}));
-      assertQuery(query, filter, new Sort(new SortField[] {new SortField("short", SortField.Type.SHORT, reversed)}));
       assertQuery(query, filter, new Sort(new SortField[] {new SortField("int", SortField.Type.INT, reversed)}));
       assertQuery(query, filter, new Sort(new SortField[] {new SortField("long", SortField.Type.LONG, reversed)}));
       assertQuery(query, filter, new Sort(new SortField[] {new SortField("float", SortField.Type.FLOAT, reversed)}));
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java b/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java
index a657363..6589f5a 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java
@@ -219,7 +219,7 @@
                 //sort = new Sort(SortField.FIELD_DOC);
                 sort = null;
               } else if (what == 2) {
-                sort = new Sort(new SortField[] {new SortField("docid", SortField.Type.INT, random().nextBoolean())});
+                sort = new Sort(new SortField[] {new SortField("docid_int", SortField.Type.INT, random().nextBoolean())});
               } else {
                 sort = new Sort(new SortField[] {new SortField("title", SortField.Type.STRING, random().nextBoolean())});
               }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java b/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java
index d3d3a1d..3014f72 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java
@@ -108,6 +108,16 @@
   private class Sim1 extends TFIDFSimilarity {
     
     @Override
+    public long encodeNormValue(float f) {
+      return (long) f;
+    }
+    
+    @Override
+    public float decodeNormValue(long norm) {
+      return norm;
+    }
+    
+    @Override
     public float coord(int overlap, int maxOverlap) {
       return 1f;
     }
@@ -146,6 +156,16 @@
   private class Sim2 extends TFIDFSimilarity {
     
     @Override
+    public long encodeNormValue(float f) {
+      return (long) f;
+    }
+    
+    @Override
+    public float decodeNormValue(long norm) {
+      return norm;
+    }
+    
+    @Override
     public float coord(int overlap, int maxOverlap) {
       return 1f;
     }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSort.java b/lucene/core/src/test/org/apache/lucene/search/TestSort.java
index 6c0c269..7b8ef10 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSort.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSort.java
@@ -24,7 +24,11 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoubleField;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.LongField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
@@ -329,259 +333,19 @@
     ir.close();
     dir.close();
   }
-  
-  /** Tests sorting on type byte */
-  public void testByte() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "23", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.close();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.BYTE));
 
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // numeric order
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("23", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type byte with a missing value */
-  public void testByteMissing() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.close();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.BYTE));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // null value is treated as a 0
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type byte, specifying the missing value should be treated as Byte.MAX_VALUE */
-  public void testByteMissingLast() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.close();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortField("value", SortField.Type.BYTE);
-    sortField.setMissingValue(Byte.MAX_VALUE);
-    Sort sort = new Sort(sortField);
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // null value is treated Byte.MAX_VALUE
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type byte in reverse */
-  public void testByteReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "23", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.close();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.BYTE, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // reverse numeric order
-    assertEquals("23", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type short */
-  public void testShort() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "300", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.close();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.SHORT));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // numeric order
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("300", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type short with a missing value */
-  public void testShortMissing() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.close();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.SHORT));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // null is treated as a 0
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type short, specifying the missing value should be treated as Short.MAX_VALUE */
-  public void testShortMissingLast() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.close();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    SortField sortField = new SortField("value", SortField.Type.SHORT);
-    sortField.setMissingValue(Short.MAX_VALUE);
-    Sort sort = new Sort(sortField);
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // null is treated as Short.MAX_VALUE
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type short in reverse */
-  public void testShortReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("value", "300", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.close();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.SHORT, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // reverse numeric order
-    assertEquals("300", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
-
-    ir.close();
-    dir.close();
-  }
-  
   /** Tests sorting on type int */
   public void testInt() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(newStringField("value", "300000", Field.Store.YES));
+    doc.add(new IntField("value", 300000, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
+    doc.add(new IntField("value", -1, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
+    doc.add(new IntField("value", 4, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -607,10 +371,10 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
+    doc.add(new IntField("value", -1, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
+    doc.add(new IntField("value", 4, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -636,10 +400,10 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
+    doc.add(new IntField("value", -1, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
+    doc.add(new IntField("value", 4, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -665,13 +429,13 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(newStringField("value", "300000", Field.Store.YES));
+    doc.add(new IntField("value", 300000, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
+    doc.add(new IntField("value", -1, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
+    doc.add(new IntField("value", 4, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -695,13 +459,13 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(newStringField("value", "3000000000", Field.Store.YES));
+    doc.add(new LongField("value", 3000000000L, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
+    doc.add(new LongField("value", -1, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
+    doc.add(new LongField("value", 4, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -727,10 +491,10 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
+    doc.add(new LongField("value", -1, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
+    doc.add(new LongField("value", 4, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -756,10 +520,10 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
+    doc.add(new LongField("value", -1, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
+    doc.add(new LongField("value", 4, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -785,13 +549,13 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(newStringField("value", "3000000000", Field.Store.YES));
+    doc.add(new LongField("value", 3000000000L, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1", Field.Store.YES));
+    doc.add(new LongField("value", -1, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4", Field.Store.YES));
+    doc.add(new LongField("value", 4, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -815,13 +579,13 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(newStringField("value", "30.1", Field.Store.YES));
+    doc.add(new FloatField("value", 30.1f, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
+    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4.2", Field.Store.YES));
+    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -847,10 +611,10 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
+    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4.2", Field.Store.YES));
+    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -876,10 +640,10 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
+    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4.2", Field.Store.YES));
+    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -905,13 +669,13 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(newStringField("value", "30.1", Field.Store.YES));
+    doc.add(new FloatField("value", 30.1f, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
+    doc.add(new FloatField("value", -1.3f, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4.2", Field.Store.YES));
+    doc.add(new FloatField("value", 4.2f, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -935,16 +699,16 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(newStringField("value", "30.1", Field.Store.YES));
+    doc.add(new DoubleField("value", 30.1, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
+    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
+    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
+    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -969,10 +733,10 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(newStringField("value", "+0", Field.Store.YES));
+    doc.add(new DoubleField("value", +0d, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-0", Field.Store.YES));
+    doc.add(new DoubleField("value", -0d, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
     IndexReader ir = writer.getReader();
@@ -984,8 +748,13 @@
     TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
     assertEquals(2, td.totalHits);
     // numeric order
-    assertEquals("-0", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("+0", searcher.doc(td.scoreDocs[1].doc).get("value"));
+    double v0 = searcher.doc(td.scoreDocs[0].doc).getField("value").numericValue().doubleValue();
+    double v1 = searcher.doc(td.scoreDocs[1].doc).getField("value").numericValue().doubleValue();
+    assertEquals(0, v0, 0d);
+    assertEquals(0, v1, 0d);
+    // check sign bits
+    assertEquals(1, Double.doubleToLongBits(v0) >>> 63);
+    assertEquals(0, Double.doubleToLongBits(v1) >>> 63);
 
     ir.close();
     dir.close();
@@ -998,13 +767,13 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
+    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
+    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
+    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -1031,13 +800,13 @@
     Document doc = new Document();
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
+    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
+    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
+    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -1064,16 +833,16 @@
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     Document doc = new Document();
-    doc.add(newStringField("value", "30.1", Field.Store.YES));
+    doc.add(new DoubleField("value", 30.1, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "-1.3", Field.Store.YES));
+    doc.add(new DoubleField("value", -1.3, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
+    doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
+    doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
     writer.addDocument(doc);
     IndexReader ir = writer.getReader();
     writer.close();
@@ -1150,7 +919,7 @@
     for(int seg=0;seg<2;seg++) {
       for(int docIDX=0;docIDX<10;docIDX++) {
         Document doc = new Document();
-        doc.add(newStringField("id", ""+docIDX, Field.Store.YES));
+        doc.add(new IntField("id", docIDX, Field.Store.YES));
         StringBuilder sb = new StringBuilder();
         for(int i=0;i<id;i++) {
           sb.append(' ');
@@ -1251,94 +1020,6 @@
   }
   
   /** 
-   * test sorts for a custom byte parser that uses a simple char encoding 
-   */
-  public void testCustomByteParser() throws Exception {
-    List<String> letters = Arrays.asList(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J" });
-    Collections.shuffle(letters, random());
-
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    for (String letter : letters) {
-      Document doc = new Document();
-      doc.add(newStringField("parser", letter, Field.Store.YES));
-      iw.addDocument(doc);
-    }
-    
-    IndexReader ir = iw.getReader();
-    iw.close();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("parser", new FieldCache.ByteParser() {
-      @Override
-      public byte parseByte(BytesRef term) {
-        return (byte) (term.bytes[term.offset]-'A');
-      }
-      
-      @Override
-      public TermsEnum termsEnum(Terms terms) throws IOException {
-        return terms.iterator(null);
-      }
-    }), SortField.FIELD_DOC );
-    
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-
-    // results should be in alphabetical order
-    assertEquals(10, td.totalHits);
-    Collections.sort(letters);
-    for (int i = 0; i < letters.size(); i++) {
-      assertEquals(letters.get(i), searcher.doc(td.scoreDocs[i].doc).get("parser"));
-    }
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** 
-   * test sorts for a custom short parser that uses a simple char encoding 
-   */
-  public void testCustomShortParser() throws Exception {
-    List<String> letters = Arrays.asList(new String[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J" });
-    Collections.shuffle(letters, random());
-
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    for (String letter : letters) {
-      Document doc = new Document();
-      doc.add(newStringField("parser", letter, Field.Store.YES));
-      iw.addDocument(doc);
-    }
-    
-    IndexReader ir = iw.getReader();
-    iw.close();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("parser", new FieldCache.ShortParser() {
-      @Override
-      public short parseShort(BytesRef term) {
-        return (short) (term.bytes[term.offset]-'A');
-      }
-      
-      @Override
-      public TermsEnum termsEnum(Terms terms) throws IOException {
-        return terms.iterator(null);
-      }
-    }), SortField.FIELD_DOC );
-    
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-
-    // results should be in alphabetical order
-    assertEquals(10, td.totalHits);
-    Collections.sort(letters);
-    for (int i = 0; i < letters.size(); i++) {
-      assertEquals(letters.get(i), searcher.doc(td.scoreDocs[i].doc).get("parser"));
-    }
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** 
    * test sorts for a custom long parser that uses a simple char encoding 
    */
   public void testCustomLongParser() throws Exception {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortDocValues.java b/lucene/core/src/test/org/apache/lucene/search/TestSortDocValues.java
index 595b857..49f954d 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSortDocValues.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSortDocValues.java
@@ -223,142 +223,6 @@
     dir.close();
   }
   
-  /** Tests sorting on type byte */
-  public void testByte() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("value", 23));
-    doc.add(newStringField("value", "23", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.close();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.BYTE));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // numeric order
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("23", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type byte in reverse */
-  public void testByteReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("value", 23));
-    doc.add(newStringField("value", "23", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.close();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.BYTE, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // reverse numeric order
-    assertEquals("23", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type short */
-  public void testShort() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("value", 300));
-    doc.add(newStringField("value", "300", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.close();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.SHORT));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // numeric order
-    assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("300", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
-  /** Tests sorting on type short in reverse */
-  public void testShortReverse() throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new NumericDocValuesField("value", 300));
-    doc.add(newStringField("value", "300", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", -1));
-    doc.add(newStringField("value", "-1", Field.Store.YES));
-    writer.addDocument(doc);
-    doc = new Document();
-    doc.add(new NumericDocValuesField("value", 4));
-    doc.add(newStringField("value", "4", Field.Store.YES));
-    writer.addDocument(doc);
-    IndexReader ir = writer.getReader();
-    writer.close();
-    
-    IndexSearcher searcher = newSearcher(ir);
-    Sort sort = new Sort(new SortField("value", SortField.Type.SHORT, true));
-
-    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
-    assertEquals(3, td.totalHits);
-    // reverse numeric order
-    assertEquals("300", searcher.doc(td.scoreDocs[0].doc).get("value"));
-    assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
-    assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
-    assertNoFieldCaches();
-
-    ir.close();
-    dir.close();
-  }
-  
   /** Tests sorting on type int */
   public void testInt() throws IOException {
     Directory dir = newDirectory();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java b/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java
index ebef3bd..23d8a54 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java
@@ -50,7 +50,7 @@
   public static void beforeClass() throws Exception {                  
     directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy()));
-    //writer.setUseCompoundFile(true);
+    //writer.setNoCFSRatio(1.0);
     //writer.infoStream = System.out;
     for (int i = 0; i < 1000; i++) {
       Document doc = new Document();
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java b/lucene/core/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java
index 9cb4376..177874f 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java
@@ -148,7 +148,7 @@
   static final class JustCompileSpanScorer extends SpanScorer {
 
     protected JustCompileSpanScorer(Spans spans, Weight weight,
-        Similarity.SloppySimScorer docScorer) throws IOException {
+        Similarity.SimScorer docScorer) throws IOException {
       super(spans, weight, docScorer);
     }
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
index 40172ba..f4b3535 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
@@ -379,11 +379,11 @@
     PayloadSpanUtil psu = new PayloadSpanUtil(searcher.getTopReaderContext());
     
     Collection<byte[]> payloads = psu.getPayloadsForQuery(new TermQuery(new Term(PayloadHelper.FIELD, "rr")));
-    if(VERBOSE)
+    if(VERBOSE) {
       System.out.println("Num payloads:" + payloads.size());
-    for (final byte [] bytes : payloads) {
-      if(VERBOSE)
+      for (final byte [] bytes : payloads) {
         System.out.println(new String(bytes, "UTF-8"));
+      }
     }
     reader.close();
     directory.close();
@@ -451,12 +451,12 @@
         System.out.println("\nSpans Dump --");
       if (spans.isPayloadAvailable()) {
         Collection<byte[]> payload = spans.getPayload();
-        if(VERBOSE)
+        if(VERBOSE) {
           System.out.println("payloads for span:" + payload.size());
-        for (final byte [] bytes : payload) {
-          if(VERBOSE)
+          for (final byte [] bytes : payload) {
             System.out.println("doc:" + spans.doc() + " s:" + spans.start() + " e:" + spans.end() + " "
               + new String(bytes, "UTF-8"));
+          }
         }
 
         assertEquals(numPayloads[cnt],payload.size());
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java
index 540362d..9e779f2 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java
@@ -20,6 +20,7 @@
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.file.NoSuchFileException;
 import java.util.Arrays;
 
 import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
@@ -98,7 +99,7 @@
              try {
               IndexInput input = dir.openInput(file, newIOContext(random()));
               input.close();
-              } catch (FileNotFoundException e) {
+              } catch (FileNotFoundException | NoSuchFileException e) {
                 // ignore
               } catch (IOException e) {
                 if (e.getMessage().contains("still open for writing")) {
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
index c18d31e..044ca31 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
@@ -54,7 +54,7 @@
     IndexWriter writer = new IndexWriter(
         fsd,
         new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).
-            setMergePolicy(newLogMergePolicy(false)).setCodec(Codec.forName("Lucene40"))
+            setMergePolicy(newLogMergePolicy(false)).setCodec(Codec.forName("Lucene40")).setUseCompoundFile(false)
     );
     TestIndexWriterReader.createIndexNoClose(true, "ram", writer);
     IndexReader reader = DirectoryReader.open(writer, true);
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java b/lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
index fbfbded..895eaa6 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
@@ -16,21 +16,25 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoubleField;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.LongField;
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
+import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.FieldCacheSanityChecker.Insanity;
 import org.apache.lucene.util.FieldCacheSanityChecker.InsanityType;
 
-import java.io.IOException;
-
 public class TestFieldCacheSanityChecker extends LuceneTestCase {
 
   protected AtomicReader readerA;
@@ -51,18 +55,14 @@
 
     long theLong = Long.MAX_VALUE;
     double theDouble = Double.MAX_VALUE;
-    byte theByte = Byte.MAX_VALUE;
-    short theShort = Short.MAX_VALUE;
     int theInt = Integer.MAX_VALUE;
     float theFloat = Float.MAX_VALUE;
     for (int i = 0; i < NUM_DOCS; i++){
       Document doc = new Document();
-      doc.add(newStringField("theLong", String.valueOf(theLong--), Field.Store.NO));
-      doc.add(newStringField("theDouble", String.valueOf(theDouble--), Field.Store.NO));
-      doc.add(newStringField("theByte", String.valueOf(theByte--), Field.Store.NO));
-      doc.add(newStringField("theShort", String.valueOf(theShort--), Field.Store.NO));
-      doc.add(newStringField("theInt", String.valueOf(theInt--), Field.Store.NO));
-      doc.add(newStringField("theFloat", String.valueOf(theFloat--), Field.Store.NO));
+      doc.add(new LongField("theLong", theLong--, Field.Store.NO));
+      doc.add(new DoubleField("theDouble", theDouble--, Field.Store.NO));
+      doc.add(new IntField("theInt", theInt--, Field.Store.NO));
+      doc.add(new FloatField("theFloat", theFloat--, Field.Store.NO));
       if (0 == i % 3) {
         wA.addDocument(doc);
       } else {
@@ -95,12 +95,12 @@
     cache.purgeAllCaches();
 
     cache.getDoubles(readerA, "theDouble", false);
-    cache.getDoubles(readerA, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER, false);
-    cache.getDoubles(readerAclone, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER, false);
-    cache.getDoubles(readerB, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER, false);
+    cache.getDoubles(readerA, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
+    cache.getDoubles(readerAclone, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
+    cache.getDoubles(readerB, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
 
     cache.getInts(readerX, "theInt", false);
-    cache.getInts(readerX, "theInt", FieldCache.DEFAULT_INT_PARSER, false);
+    cache.getInts(readerX, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
 
     // // // 
 
@@ -119,9 +119,8 @@
     FieldCache cache = FieldCache.DEFAULT;
     cache.purgeAllCaches();
 
-    cache.getInts(readerX, "theInt", FieldCache.DEFAULT_INT_PARSER, false);
+    cache.getInts(readerX, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
     cache.getTerms(readerX, "theInt");
-    cache.getBytes(readerX, "theByte", false);
 
     // // // 
 
@@ -147,8 +146,6 @@
     cache.getTerms(readerB, "theInt");
     cache.getTerms(readerX, "theInt");
 
-    cache.getBytes(readerX, "theByte", false);
-
 
     // // // 
 
diff --git a/lucene/core/src/test/org/apache/lucene/util/fst/Test2BFST.java b/lucene/core/src/test/org/apache/lucene/util/fst/Test2BFST.java
index 701e921..a149ed6 100644
--- a/lucene/core/src/test/org/apache/lucene/util/fst/Test2BFST.java
+++ b/lucene/core/src/test/org/apache/lucene/util/fst/Test2BFST.java
@@ -34,7 +34,7 @@
 import org.junit.Ignore;
 import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
 
-@Ignore("Requires tons of heap to run (10G works)")
+@Ignore("Requires tons of heap to run (420G works)")
 @TimeoutSuite(millis = 100 * TimeUnits.HOUR)
 public class Test2BFST extends LuceneTestCase {
 
@@ -50,12 +50,12 @@
     for(int doPackIter=0;doPackIter<2;doPackIter++) {
       boolean doPack = doPackIter == 1;
 
-      // Build FST w/ NoOutputs and stop when nodeCount > 3B
+      // Build FST w/ NoOutputs and stop when nodeCount > 2.2B
       if (!doPack) {
         System.out.println("\nTEST: 3B nodes; doPack=false output=NO_OUTPUTS");
         Outputs<Object> outputs = NoOutputs.getSingleton();
         Object NO_OUTPUT = outputs.getNoOutput();
-        final Builder<Object> b = new Builder<Object>(FST.INPUT_TYPE.BYTE1, 0, 0, false, false, Integer.MAX_VALUE, outputs,
+        final Builder<Object> b = new Builder<Object>(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs,
                                                       null, doPack, PackedInts.COMPACT, true, 15);
 
         int count = 0;
@@ -72,7 +72,7 @@
           if (count % 100000 == 0) {
             System.out.println(count + ": " + b.fstSizeInBytes() + " bytes; " + b.getTotStateCount() + " nodes");
           }
-          if (b.getTotStateCount() > LIMIT) {
+          if (b.getTotStateCount() > Integer.MAX_VALUE + 100L * 1024 * 1024) {
             break;
           }
           nextInput(r, ints2);
diff --git a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
index fd7e8ac..fe21e0a 100644
--- a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
+++ b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
@@ -126,7 +126,7 @@
 
       // FST ord pos int
       {
-        final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+        final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
         final List<FSTTester.InputOutput<Long>> pairs = new ArrayList<FSTTester.InputOutput<Long>>(terms2.length);
         for(int idx=0;idx<terms2.length;idx++) {
           pairs.add(new FSTTester.InputOutput<Long>(terms2[idx], (long) idx));
@@ -171,7 +171,7 @@
 
     // PositiveIntOutput (ord)
     {
-      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
       final List<FSTTester.InputOutput<Long>> pairs = new ArrayList<FSTTester.InputOutput<Long>>(terms.length);
       for(int idx=0;idx<terms.length;idx++) {
         pairs.add(new FSTTester.InputOutput<Long>(terms[idx], (long) idx));
@@ -181,8 +181,7 @@
 
     // PositiveIntOutput (random monotonically increasing positive number)
     {
-      final boolean doShare = random().nextBoolean();
-      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(doShare);
+      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
       final List<FSTTester.InputOutput<Long>> pairs = new ArrayList<FSTTester.InputOutput<Long>>(terms.length);
       long lastOutput = 0;
       for(int idx=0;idx<terms.length;idx++) {
@@ -190,12 +189,12 @@
         lastOutput = value;
         pairs.add(new FSTTester.InputOutput<Long>(terms[idx], value));
       }
-      new FSTTester<Long>(random(), dir, inputMode, pairs, outputs, doShare).doTest(true);
+      new FSTTester<Long>(random(), dir, inputMode, pairs, outputs, true).doTest(true);
     }
 
     // PositiveIntOutput (random positive number)
     {
-      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(random().nextBoolean());
+      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
       final List<FSTTester.InputOutput<Long>> pairs = new ArrayList<FSTTester.InputOutput<Long>>(terms.length);
       for(int idx=0;idx<terms.length;idx++) {
         pairs.add(new FSTTester.InputOutput<Long>(terms[idx], _TestUtil.nextLong(random(), 0, Long.MAX_VALUE)));
@@ -205,8 +204,8 @@
 
     // Pair<ord, (random monotonically increasing positive number>
     {
-      final PositiveIntOutputs o1 = PositiveIntOutputs.getSingleton(random().nextBoolean());
-      final PositiveIntOutputs o2 = PositiveIntOutputs.getSingleton(random().nextBoolean());
+      final PositiveIntOutputs o1 = PositiveIntOutputs.getSingleton();
+      final PositiveIntOutputs o2 = PositiveIntOutputs.getSingleton();
       final PairOutputs<Long,Long> outputs = new PairOutputs<Long,Long>(o1, o2);
       final List<FSTTester.InputOutput<PairOutputs.Pair<Long,Long>>> pairs = new ArrayList<FSTTester.InputOutput<PairOutputs.Pair<Long,Long>>>(terms.length);
       long lastOutput = 0;
@@ -306,7 +305,7 @@
     }
     IndexReader r = DirectoryReader.open(writer, true);
     writer.close();
-    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(random().nextBoolean());
+    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
 
     final boolean doRewrite = random().nextBoolean();
 
@@ -653,8 +652,8 @@
 
     if (storeOrds && storeDocFreqs) {
       // Store both ord & docFreq:
-      final PositiveIntOutputs o1 = PositiveIntOutputs.getSingleton(true);
-      final PositiveIntOutputs o2 = PositiveIntOutputs.getSingleton(false);
+      final PositiveIntOutputs o1 = PositiveIntOutputs.getSingleton();
+      final PositiveIntOutputs o2 = PositiveIntOutputs.getSingleton();
       final PairOutputs<Long,Long> outputs = new PairOutputs<Long,Long>(o1, o2);
       new VisitTerms<PairOutputs.Pair<Long,Long>>(dirOut, wordsFileIn, inputMode, prune, outputs, doPack, noArcArrays) {
         Random rand;
@@ -669,7 +668,7 @@
       }.run(limit, verify, false);
     } else if (storeOrds) {
       // Store only ords
-      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
       new VisitTerms<Long>(dirOut, wordsFileIn, inputMode, prune, outputs, doPack, noArcArrays) {
         @Override
         public Long getOutput(IntsRef input, int ord) {
@@ -678,7 +677,7 @@
       }.run(limit, verify, true);
     } else if (storeDocFreqs) {
       // Store only docFreq
-      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(false);
+      final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
       new VisitTerms<Long>(dirOut, wordsFileIn, inputMode, prune, outputs, doPack, noArcArrays) {
         Random rand;
         @Override
@@ -781,7 +780,7 @@
     // smaller FST if the outputs grow monotonically.  But
     // if numbers are "random", false should give smaller
     // final size:
-    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
 
     // Build an FST mapping BytesRef -> Long
     final Builder<Long> builder = new Builder<Long>(FST.INPUT_TYPE.BYTE1, outputs);
@@ -1100,7 +1099,7 @@
   }
 
   public void testFinalOutputOnEndState() throws Exception {
-    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
 
     final Builder<Long> builder = new Builder<Long>(FST.INPUT_TYPE.BYTE4, 2, 0, true, true, Integer.MAX_VALUE, outputs, null, random().nextBoolean(), PackedInts.DEFAULT, true, 15);
     builder.add(Util.toUTF32("stat", new IntsRef()), 17L);
@@ -1115,7 +1114,7 @@
   }
 
   public void testInternalFinalState() throws Exception {
-    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
     final boolean willRewrite = random().nextBoolean();
     final Builder<Long> builder = new Builder<Long>(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, willRewrite, PackedInts.DEFAULT, true, 15);
     builder.add(Util.toIntsRef(new BytesRef("stat"), new IntsRef()), outputs.getNoOutput());
@@ -1136,7 +1135,7 @@
   // Make sure raw FST can differentiate between final vs
   // non-final end nodes
   public void testNonFinalStopNode() throws Exception {
-    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
     final Long nothing = outputs.getNoOutput();
     final Builder<Long> b = new Builder<Long>(FST.INPUT_TYPE.BYTE1, outputs);
 
@@ -1216,7 +1215,7 @@
   };
 
   public void testShortestPaths() throws Exception {
-    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
     final Builder<Long> builder = new Builder<Long>(FST.INPUT_TYPE.BYTE1, outputs);
 
     final IntsRef scratch = new IntsRef();
@@ -1258,8 +1257,8 @@
   public void testShortestPathsWFST() throws Exception {
 
     PairOutputs<Long,Long> outputs = new PairOutputs<Long,Long>(
-        PositiveIntOutputs.getSingleton(true), // weight
-        PositiveIntOutputs.getSingleton(true)  // output
+        PositiveIntOutputs.getSingleton(), // weight
+        PositiveIntOutputs.getSingleton()  // output
     );
     
     final Builder<Pair<Long,Long>> builder = new Builder<Pair<Long,Long>>(FST.INPUT_TYPE.BYTE1, outputs);
@@ -1301,7 +1300,7 @@
     final TreeMap<String,Long> slowCompletor = new TreeMap<String,Long>();
     final TreeSet<String> allPrefixes = new TreeSet<String>();
     
-    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+    final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
     final Builder<Long> builder = new Builder<Long>(FST.INPUT_TYPE.BYTE1, outputs);
     final IntsRef scratch = new IntsRef();
     
@@ -1416,8 +1415,8 @@
     final TreeSet<String> allPrefixes = new TreeSet<String>();
     
     PairOutputs<Long,Long> outputs = new PairOutputs<Long,Long>(
-        PositiveIntOutputs.getSingleton(true), // weight
-        PositiveIntOutputs.getSingleton(true)  // output
+        PositiveIntOutputs.getSingleton(), // weight
+        PositiveIntOutputs.getSingleton()  // output
     );
     final Builder<Pair<Long,Long>> builder = new Builder<Pair<Long,Long>>(FST.INPUT_TYPE.BYTE1, outputs);
     final IntsRef scratch = new IntsRef();
diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailOnFieldCacheInsanity.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailOnFieldCacheInsanity.java
index 202e9fe..20c8e47 100644
--- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailOnFieldCacheInsanity.java
+++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestFailOnFieldCacheInsanity.java
@@ -69,6 +69,7 @@
     for(Failure f : r.getFailures()) {
       if (f.getMessage().indexOf("Insane") != -1) {
         insane = true;
+        break;
       }
     }
     Assert.assertTrue(insane);
diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestReproduceMessageWithRepeated.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestReproduceMessageWithRepeated.java
new file mode 100644
index 0000000..8b07c6a
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestReproduceMessageWithRepeated.java
@@ -0,0 +1,53 @@
+package org.apache.lucene.util.junitcompat;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.JUnitCore;
+
+import com.carrotsearch.randomizedtesting.annotations.Repeat;
+
+/**
+ * Test reproduce message is right with {@link Repeat} annotation.
+ */
+public class TestReproduceMessageWithRepeated extends WithNestedTests {
+  public static class Nested extends AbstractNestedTest {
+    @Test
+    @Repeat(iterations = 10)
+    public void testMe() {
+      throw new RuntimeException("bad");
+    }
+  }
+
+  public TestReproduceMessageWithRepeated() {
+    super(true);
+  }
+
+  @Test
+  public void testRepeatedMessage() throws Exception { 
+    String syserr = runAndReturnSyserr();
+    Assert.assertTrue(syserr.contains(" -Dtests.method=testMe "));
+  }
+
+  private String runAndReturnSyserr() {
+    JUnitCore.runClasses(Nested.class);
+    String err = getSysErr();
+    return err;
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java b/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java
index 10e7b01..c60ce16 100644
--- a/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java
+++ b/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java
@@ -650,13 +650,133 @@
     wrt.set(99, (1 << 23) - 1);
     assertEquals(1 << 10, wrt.get(valueCount - 1));
     wrt.set(1, Long.MAX_VALUE);
+    wrt.set(2, -3);
+    assertEquals(64, wrt.getBitsPerValue());
     assertEquals(1 << 10, wrt.get(valueCount - 1));
     assertEquals(Long.MAX_VALUE, wrt.get(1));
+    assertEquals(-3L, wrt.get(2));
     assertEquals(2, wrt.get(4));
     assertEquals((1 << 23) - 1, wrt.get(99));
     assertEquals(10, wrt.get(7));
     assertEquals(99, wrt.get(valueCount - 10));
     assertEquals(1 << 10, wrt.get(valueCount - 1));
+    assertEquals(RamUsageEstimator.sizeOf(wrt), wrt.ramBytesUsed());
+  }
+
+  public void testPagedGrowableWriter() {
+    int pageSize = 1 << (_TestUtil.nextInt(random(), 6, 30));
+    // supports 0 values?
+    PagedGrowableWriter writer = new PagedGrowableWriter(0, pageSize, _TestUtil.nextInt(random(), 1, 64), random().nextFloat());
+    assertEquals(0, writer.size());
+
+    // compare against AppendingLongBuffer
+    AppendingLongBuffer buf = new AppendingLongBuffer();
+    int size = random().nextInt(1000000);
+    long max = 5;
+    for (int i = 0; i < size; ++i) {
+      buf.add(_TestUtil.nextLong(random(), 0, max));
+      if (rarely()) {
+        max = PackedInts.maxValue(rarely() ? _TestUtil.nextInt(random(), 0, 63) : _TestUtil.nextInt(random(), 0, 31));
+      }
+    }
+    writer = new PagedGrowableWriter(size, pageSize, _TestUtil.nextInt(random(), 1, 64), random().nextFloat());
+    assertEquals(size, writer.size());
+    for (int i = size - 1; i >= 0; --i) {
+      writer.set(i, buf.get(i));
+    }
+    for (int i = 0; i < size; ++i) {
+      assertEquals(buf.get(i), writer.get(i));
+    }
+
+    // test ramBytesUsed
+    assertEquals(RamUsageEstimator.sizeOf(writer), writer.ramBytesUsed(), 8);
+
+    // test copy
+    PagedGrowableWriter copy = writer.resize(_TestUtil.nextLong(random(), writer.size() / 2, writer.size() * 3 / 2));
+    for (long i = 0; i < copy.size(); ++i) {
+      if (i < writer.size()) {
+        assertEquals(writer.get(i), copy.get(i));
+      } else {
+        assertEquals(0, copy.get(i));
+      }
+    }
+
+    // test grow
+    PagedGrowableWriter grow = writer.grow(_TestUtil.nextLong(random(), writer.size() / 2, writer.size() * 3 / 2));
+    for (long i = 0; i < grow.size(); ++i) {
+      if (i < writer.size()) {
+        assertEquals(writer.get(i), grow.get(i));
+      } else {
+        assertEquals(0, grow.get(i));
+      }
+    }
+  }
+
+  public void testPagedMutable() {
+    final int bitsPerValue = _TestUtil.nextInt(random(), 1, 64);
+    final long max = PackedInts.maxValue(bitsPerValue);
+    int pageSize = 1 << (_TestUtil.nextInt(random(), 6, 30));
+    // supports 0 values?
+    PagedMutable writer = new PagedMutable(0, pageSize, bitsPerValue, random().nextFloat() / 2);
+    assertEquals(0, writer.size());
+
+    // compare against AppendingLongBuffer
+    AppendingLongBuffer buf = new AppendingLongBuffer();
+    int size = random().nextInt(1000000);
+    
+    for (int i = 0; i < size; ++i) {
+      buf.add(bitsPerValue == 64 ? random().nextLong() : _TestUtil.nextLong(random(), 0, max));
+    }
+    writer = new PagedMutable(size, pageSize, bitsPerValue, random().nextFloat());
+    assertEquals(size, writer.size());
+    for (int i = size - 1; i >= 0; --i) {
+      writer.set(i, buf.get(i));
+    }
+    for (int i = 0; i < size; ++i) {
+      assertEquals(buf.get(i), writer.get(i));
+    }
+
+    // test ramBytesUsed
+    assertEquals(RamUsageEstimator.sizeOf(writer) - RamUsageEstimator.sizeOf(writer.format), writer.ramBytesUsed());
+
+    // test copy
+    PagedMutable copy = writer.resize(_TestUtil.nextLong(random(), writer.size() / 2, writer.size() * 3 / 2));
+    for (long i = 0; i < copy.size(); ++i) {
+      if (i < writer.size()) {
+        assertEquals(writer.get(i), copy.get(i));
+      } else {
+        assertEquals(0, copy.get(i));
+      }
+    }
+
+    // test grow
+    PagedMutable grow = writer.grow(_TestUtil.nextLong(random(), writer.size() / 2, writer.size() * 3 / 2));
+    for (long i = 0; i < grow.size(); ++i) {
+      if (i < writer.size()) {
+        assertEquals(writer.get(i), grow.get(i));
+      } else {
+        assertEquals(0, grow.get(i));
+      }
+    }
+  }
+
+  // memory hole
+  @Ignore
+  public void testPagedGrowableWriterOverflow() {
+    final long size = _TestUtil.nextLong(random(), 2 * (long) Integer.MAX_VALUE, 3 * (long) Integer.MAX_VALUE);
+    final int pageSize = 1 << (_TestUtil.nextInt(random(), 16, 30));
+    final PagedGrowableWriter writer = new PagedGrowableWriter(size, pageSize, 1, random().nextFloat());
+    final long index = _TestUtil.nextLong(random(), (long) Integer.MAX_VALUE, size - 1);
+    writer.set(index, 2);
+    assertEquals(2, writer.get(index));
+    for (int i = 0; i < 1000000; ++i) {
+      final long idx = _TestUtil.nextLong(random(), 0, size);
+      if (idx == index) {
+        assertEquals(2, writer.get(idx));
+      } else {
+        assertEquals(0, writer.get(idx));
+      }
+    }
   }
 
   public void testSave() throws IOException {
@@ -808,13 +928,15 @@
     final long[] arr = new long[RandomInts.randomIntBetween(random(), 1, 1000000)];
     for (int bpv : new int[] {0, 1, 63, 64, RandomInts.randomIntBetween(random(), 2, 62)}) {
       for (boolean monotonic : new boolean[] {true, false}) {
+        final int pageSize = 1 << _TestUtil.nextInt(random(), 6, 20);
+        final int initialPageCount = _TestUtil.nextInt(random(), 0, 16);
         AbstractAppendingLongBuffer buf;
         final int inc;
         if (monotonic) {
-          buf = new MonotonicAppendingLongBuffer();
+          buf = new MonotonicAppendingLongBuffer(initialPageCount, pageSize);
           inc = _TestUtil.nextInt(random(), -1000, 1000);
         } else {
-          buf = new AppendingLongBuffer();
+          buf = new AppendingLongBuffer(initialPageCount, pageSize);
           inc = 0;
         }
         if (bpv == 0) {
diff --git a/lucene/demo/build.xml b/lucene/demo/build.xml
index b1cd4b4..074601a 100644
--- a/lucene/demo/build.xml
+++ b/lucene/demo/build.xml
@@ -58,7 +58,7 @@
     <echo>Compiling XML QueryParser Demo WAR</echo>
 
     <war destfile="${build.dir}/lucene-xml-query-demo.war" webxml="src/resources/org/apache/lucene/demo/xmlparser/WEB-INF/web.xml">
-      <fileset dir="src/resources/org/apache/lucene/demo/xmlparser/"/>
+      <fileset dir="${resources.dir}/org/apache/lucene/demo/xmlparser/"/>
       <lib dir="${build.dir}">
         <include name="${final.name}.jar"/>
       </lib>
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/RangeAccumulator.java b/lucene/facet/src/java/org/apache/lucene/facet/range/RangeAccumulator.java
index 5cdf33e..fffcb77 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/range/RangeAccumulator.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/RangeAccumulator.java
@@ -95,6 +95,10 @@
         int doc = 0;
         while (doc < length && (doc = hits.bits.nextSetBit(doc)) != -1) {
           long v = ndv.get(doc);
+          // TODO: if all ranges are non-overlapping, we
+          // should instead do a bin-search up front
+          // (really, a specialized case of the interval
+          // tree)
           // TODO: use interval tree instead of linear search:
           for(int j=0;j<ranges.ranges.length;j++) {
             if (ranges.ranges[j].accept(v)) {
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSideways.java b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSideways.java
index c409180..f3318ed 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSideways.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSideways.java
@@ -208,9 +208,9 @@
           requests.add(fr);
         }
       }
-      if (requests.isEmpty()) {
-        throw new IllegalArgumentException("could not find FacetRequest for drill-sideways dimension \"" + dim + "\"");
-      }
+      // We already moved all drill-downs that didn't have a
+      // FacetRequest, in moveDrillDownOnlyClauses above:
+      assert !requests.isEmpty();
       drillSidewaysCollectors[idx++] = FacetsCollector.create(getDrillSidewaysAccumulator(dim, new FacetSearchParams(fsp.indexingParams, requests)));
     }
 
@@ -402,8 +402,13 @@
       query = new DrillDownQuery(filter, query);
     }
     if (sort != null) {
+      int limit = searcher.getIndexReader().maxDoc();
+      if (limit == 0) {
+        limit = 1; // the collector does not alow numHits = 0
+      }
+      topN = Math.min(topN, limit);
       final TopFieldCollector hitCollector = TopFieldCollector.create(sort,
-                                                                      Math.min(topN, searcher.getIndexReader().maxDoc()),
+                                                                      topN,
                                                                       after,
                                                                       true,
                                                                       doDocScores,
@@ -422,7 +427,12 @@
    */
   public DrillSidewaysResult search(ScoreDoc after,
                                     DrillDownQuery query, int topN, FacetSearchParams fsp) throws IOException {
-    TopScoreDocCollector hitCollector = TopScoreDocCollector.create(Math.min(topN, searcher.getIndexReader().maxDoc()), after, true);
+    int limit = searcher.getIndexReader().maxDoc();
+    if (limit == 0) {
+      limit = 1; // the collector does not alow numHits = 0
+    }
+    topN = Math.min(topN, limit);
+    TopScoreDocCollector hitCollector = TopScoreDocCollector.create(topN, after, true);
     DrillSidewaysResult r = search(query, hitCollector, fsp);
     return new DrillSidewaysResult(r.facetResults, hitCollector.topDocs());
   }
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysQuery.java b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysQuery.java
index afa0906..fd5d160 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysQuery.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysQuery.java
@@ -119,17 +119,19 @@
             Terms terms = reader.terms(field);
             if (terms != null) {
               termsEnum = terms.iterator(null);
+            } else {
+              termsEnum = null;
             }
             lastField = field;
           }
+          dims[dim].docsEnums = new DocsEnum[drillDownTerms[dim].length];
           if (termsEnum == null) {
             nullCount++;
             continue;
           }
-          dims[dim].docsEnums = new DocsEnum[drillDownTerms[dim].length];
           for(int i=0;i<drillDownTerms[dim].length;i++) {
             if (termsEnum.seekExact(drillDownTerms[dim][i].bytes(), false)) {
-              DocsEnum docsEnum = termsEnum.docs(null, null);
+              DocsEnum docsEnum = termsEnum.docs(null, null, 0);
               if (docsEnum != null) {
                 dims[dim].docsEnums[i] = docsEnum;
                 dims[dim].maxCost = Math.max(dims[dim].maxCost, docsEnum.cost());
@@ -138,7 +140,7 @@
           }
         }
 
-        if (nullCount > 1) {
+        if (nullCount > 1 || (nullCount == 1 && dims.length == 1)) {
           return null;
         }
 
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysScorer.java b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysScorer.java
index 5db28f2..42f5390 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysScorer.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysScorer.java
@@ -80,7 +80,7 @@
     // Position all scorers to their first matching doc:
     baseScorer.nextDoc();
     for(DocsEnumsAndFreq dim : dims) {
-      for(DocsEnum docsEnum : dim.docsEnums) {
+      for (DocsEnum docsEnum : dim.docsEnums) {
         if (docsEnum != null) {
           docsEnum.nextDoc();
         }
@@ -95,7 +95,7 @@
     for(int dim=0;dim<numDims;dim++) {
       docsEnums[dim] = dims[dim].docsEnums;
       sidewaysCollectors[dim] = dims[dim].sidewaysCollector;
-      for(DocsEnum de : dims[dim].docsEnums) {
+      for (DocsEnum de : dims[dim].docsEnums) {
         if (de != null) {
           drillDownCost += de.cost();
         }
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/CategoryPath.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/CategoryPath.java
index d31556e..7fe3650 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/CategoryPath.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/CategoryPath.java
@@ -17,6 +17,8 @@
  * limitations under the License.
  */
 
+import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE;
+
 import java.util.Arrays;
 import java.util.regex.Pattern;
 
@@ -28,6 +30,18 @@
  */
 public class CategoryPath implements Comparable<CategoryPath> {
 
+  /*
+   * copied from DocumentWriterPerThread -- if a CategoryPath is resolved to a
+   * drill-down term which is encoded to a larger term than that length, it is
+   * silently dropped! Therefore we limit the number of characters to MAX/4 to
+   * be on the safe side.
+   */
+  /**
+   * The maximum number of characters a {@link CategoryPath} can have. That is
+   * {@link CategoryPath#toString(char)} length must not exceed that limit.
+   */
+  public final static int MAX_CATEGORY_PATH_LENGTH = (BYTE_BLOCK_SIZE - 2) / 4;
+
   /** An empty {@link CategoryPath} */
   public static final CategoryPath EMPTY = new CategoryPath();
 
@@ -63,10 +77,18 @@
   /** Construct from the given path components. */
   public CategoryPath(final String... components) {
     assert components.length > 0 : "use CategoryPath.EMPTY to create an empty path";
+    long len = 0;
     for (String comp : components) {
       if (comp == null || comp.isEmpty()) {
         throw new IllegalArgumentException("empty or null components not allowed: " + Arrays.toString(components));
       }
+      len += comp.length();
+    }
+    len += components.length - 1; // add separators
+    if (len > MAX_CATEGORY_PATH_LENGTH) {
+      throw new IllegalArgumentException("category path exceeds maximum allowed path length: max="
+          + MAX_CATEGORY_PATH_LENGTH + " len=" + len
+          + " path=" + Arrays.toString(components).substring(0, 30) + "...");
     }
     this.components = components;
     length = components.length;
@@ -74,6 +96,12 @@
 
   /** Construct from a given path, separating path components with {@code delimiter}. */
   public CategoryPath(final String pathString, final char delimiter) {
+    if (pathString.length() > MAX_CATEGORY_PATH_LENGTH) {
+      throw new IllegalArgumentException("category path exceeds maximum allowed path length: max="
+              + MAX_CATEGORY_PATH_LENGTH + " len=" + pathString.length()
+              + " path=" + pathString.substring(0, 30) + "...");
+    }
+
     String[] comps = pathString.split(Pattern.quote(Character.toString(delimiter)));
     if (comps.length == 1 && comps[0].isEmpty()) {
       components = null;
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/cl2o/CategoryPathUtils.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/cl2o/CategoryPathUtils.java
index 2ceff4c..f1d2e96 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/cl2o/CategoryPathUtils.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/cl2o/CategoryPathUtils.java
@@ -39,14 +39,14 @@
    * {@link #serialize(CategoryPath, CharBlockArray)}.
    */
   public static int hashCodeOfSerialized(CharBlockArray charBlockArray, int offset) {
-    int length = (short) charBlockArray.charAt(offset++);
+    int length = charBlockArray.charAt(offset++);
     if (length == 0) {
       return 0;
     }
     
     int hash = length;
     for (int i = 0; i < length; i++) {
-      int len = (short) charBlockArray.charAt(offset++);
+      int len = charBlockArray.charAt(offset++);
       hash = hash * 31 + charBlockArray.subSequence(offset, offset + len).hashCode();
       offset += len;
     }
@@ -67,7 +67,7 @@
     }
     
     for (int i = 0; i < cp.length; i++) {
-      int len = (short) charBlockArray.charAt(offset++);
+      int len = charBlockArray.charAt(offset++);
       if (len != cp.components[i].length()) {
         return false;
       }
diff --git a/lucene/facet/src/resources/META-INF/services/org.apache.lucene.codecs.Codec b/lucene/facet/src/resources/META-INF/services/org.apache.lucene.codecs.Codec
new file mode 100644
index 0000000..343b4cd
--- /dev/null
+++ b/lucene/facet/src/resources/META-INF/services/org.apache.lucene.codecs.Codec
@@ -0,0 +1,16 @@
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#  contributor license agreements.  See the NOTICE file distributed with
+#  this work for additional information regarding copyright ownership.
+#  The ASF licenses this file to You under the Apache License, Version 2.0
+#  (the "License"); you may not use this file except in compliance with
+#  the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+org.apache.lucene.facet.codecs.facet42.Facet42Codec
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/complements/TestFacetsAccumulatorWithComplement.java b/lucene/facet/src/test/org/apache/lucene/facet/complements/TestFacetsAccumulatorWithComplement.java
index 8cb229a..f3de1ca 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/complements/TestFacetsAccumulatorWithComplement.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/complements/TestFacetsAccumulatorWithComplement.java
@@ -121,8 +121,8 @@
     
     // Results are ready, printing them...
     int i = 0;
-    for (FacetResult facetResult : res) {
-      if (VERBOSE) {
+    if (VERBOSE) {
+      for (FacetResult facetResult : res) {
         System.out.println("Res "+(i++)+": "+facetResult);
       }
     }
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/search/TestDrillSideways.java b/lucene/facet/src/test/org/apache/lucene/facet/search/TestDrillSideways.java
index 79b62c7..9772f90 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/search/TestDrillSideways.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/search/TestDrillSideways.java
@@ -28,11 +28,15 @@
 import java.util.Set;
 
 import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.facet.FacetTestCase;
 import org.apache.lucene.facet.FacetTestUtils;
+import org.apache.lucene.facet.codecs.facet42.Facet42DocValuesFormat;
 import org.apache.lucene.facet.index.FacetFields;
 import org.apache.lucene.facet.params.FacetIndexingParams;
 import org.apache.lucene.facet.params.FacetSearchParams;
@@ -58,6 +62,7 @@
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField.Type;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
@@ -65,9 +70,11 @@
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.InPlaceMergeSorter;
 import org.apache.lucene.util.InfoStream;
 import org.apache.lucene.util._TestUtil;
+import org.junit.Test;
 
 public class TestDrillSideways extends FacetTestCase {
 
@@ -426,6 +433,16 @@
 
     boolean canUseDV = defaultCodecSupportsSortedSet();
 
+    // TestRuleSetupAndRestoreClassEnv can sometimes
+    // randomly pick the non-general Facet42DocValuesFormat:
+    DocValuesFormat dvf = Codec.getDefault().docValuesFormat();
+    if (dvf instanceof PerFieldDocValuesFormat) {
+      dvf = ((PerFieldDocValuesFormat) dvf).getDocValuesFormatForField("$facets");
+    }
+    if (dvf instanceof Facet42DocValuesFormat) {
+      canUseDV = false;
+    }
+
     while (aChance == 0.0) {
       aChance = random().nextDouble();
     }
@@ -1144,5 +1161,33 @@
     }
     return b.toString();
   }
+  
+  @Test
+  public void testEmptyIndex() throws Exception {
+    // LUCENE-5045: make sure DrillSideways works with an empty index
+    Directory dir = newDirectory();
+    Directory taxoDir = newDirectory();
+    writer = new RandomIndexWriter(random(), dir);
+    taxoWriter = new DirectoryTaxonomyWriter(taxoDir, IndexWriterConfig.OpenMode.CREATE);
+    IndexSearcher searcher = newSearcher(writer.getReader());
+    writer.close();
+    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoWriter);
+    taxoWriter.close();
+
+    // Count "Author"
+    FacetSearchParams fsp = new FacetSearchParams(new CountFacetRequest(new CategoryPath("Author"), 10));
+
+    DrillSideways ds = new DrillSideways(searcher, taxoReader);
+    DrillDownQuery ddq = new DrillDownQuery(fsp.indexingParams, new MatchAllDocsQuery());
+    ddq.add(new CategoryPath("Author", "Lisa"));
+    
+    DrillSidewaysResult r = ds.search(null, ddq, 10, fsp); // this used to fail on IllegalArgEx
+    assertEquals(0, r.hits.totalHits);
+
+    r = ds.search(ddq, null, null, 10, new Sort(new SortField("foo", Type.INT)), false, false, fsp); // this used to fail on IllegalArgEx
+    assertEquals(0, r.hits.totalHits);
+    
+    IOUtils.close(searcher.getIndexReader(), taxoReader, dir, taxoDir);
+  }
 }
 
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestCategoryPath.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestCategoryPath.java
index 8091c6a..de5d3cb 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestCategoryPath.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestCategoryPath.java
@@ -3,6 +3,7 @@
 import java.util.Arrays;
 
 import org.apache.lucene.facet.FacetTestCase;
+import org.apache.lucene.util._TestUtil;
 import org.junit.Test;
 
 /*
@@ -274,5 +275,32 @@
       // expected
     }
   }
+
+  @Test
+  public void testLongPath() throws Exception {
+    String bigComp = null;
+    while (true) {
+      int len = CategoryPath.MAX_CATEGORY_PATH_LENGTH;
+      bigComp = _TestUtil.randomSimpleString(random(), len, len);
+      if (bigComp.indexOf('\u001f') != -1) {
+        continue;
+      }
+      break;
+    }
+
+    try {
+      assertNotNull(new CategoryPath("dim", bigComp));
+      fail("long paths should not be allowed; len=" + bigComp.length());
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+
+    try {
+      assertNotNull(new CategoryPath("dim\u001f" + bigComp, '\u001f'));
+      fail("long paths should not be allowed; len=" + bigComp.length());
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+  }
   
 }
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java
index 9aeb72d..fd515eb 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java
@@ -1,13 +1,19 @@
 package org.apache.lucene.facet.taxonomy.directory;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Random;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.facet.FacetTestCase;
+import org.apache.lucene.facet.index.FacetFields;
+import org.apache.lucene.facet.params.FacetIndexingParams;
+import org.apache.lucene.facet.search.DrillDownQuery;
 import org.apache.lucene.facet.taxonomy.CategoryPath;
 import org.apache.lucene.facet.taxonomy.TaxonomyReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter.MemoryOrdinalMap;
@@ -20,8 +26,11 @@
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.SegmentInfos;
+import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util._TestUtil;
 import org.junit.Test;
 
 /*
@@ -412,5 +421,52 @@
     taxoWriter.close();
     dir.close();
   }
-  
+
+  @Test
+  public void testHugeLabel() throws Exception {
+    Directory indexDir = newDirectory(), taxoDir = newDirectory();
+    IndexWriter indexWriter = new IndexWriter(indexDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
+    DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir, OpenMode.CREATE, new Cl2oTaxonomyWriterCache(2, 1f, 1));
+    FacetFields facetFields = new FacetFields(taxoWriter);
+    
+    // Add one huge label:
+    String bigs = null;
+    int ordinal = -1;
+    CategoryPath cp = null;
+    while (true) {
+      int len = CategoryPath.MAX_CATEGORY_PATH_LENGTH - 4; // for the dimension and separator
+      bigs = _TestUtil.randomSimpleString(random(), len, len);
+      cp = new CategoryPath("dim", bigs);
+      ordinal = taxoWriter.addCategory(cp);
+      Document doc = new Document();
+      facetFields.addFields(doc, Collections.singletonList(cp));
+      indexWriter.addDocument(doc);
+      break;
+    }
+
+    // Add tiny ones to cause a re-hash
+    for (int i = 0; i < 3; i++) {
+      String s = _TestUtil.randomSimpleString(random(), 1, 10);
+      taxoWriter.addCategory(new CategoryPath("dim", s));
+      Document doc = new Document();
+      facetFields.addFields(doc, Collections.singletonList(new CategoryPath("dim", s)));
+      indexWriter.addDocument(doc);
+    }
+
+    // when too large components were allowed to be added, this resulted in a new added category
+    assertEquals(ordinal, taxoWriter.addCategory(cp));
+    
+    IOUtils.close(indexWriter, taxoWriter);
+    
+    DirectoryReader indexReader = DirectoryReader.open(indexDir);
+    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
+    IndexSearcher searcher = new IndexSearcher(indexReader);
+    DrillDownQuery ddq = new DrillDownQuery(FacetIndexingParams.DEFAULT);
+    ddq.add(cp);
+    assertEquals(1, searcher.search(ddq, 10).totalHits);
+    
+    IOUtils.close(indexReader, taxoReader);
+    
+    IOUtils.close(indexDir, taxoDir);
+  }
 }
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
index 6b37ce9..795a984 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
@@ -78,7 +78,7 @@
     Document doc = new Document();
     addGroupField(doc, groupField, "author1", valueType);
     doc.add(newTextField("content", "random text", Field.Store.NO));
-    doc.add(newStringField("id_1", "1", Field.Store.NO));
+    doc.add(new IntField("id_1", 1, Field.Store.NO));
     doc.add(newStringField("id_2", "1", Field.Store.NO));
     w.addDocument(doc);
 
@@ -86,7 +86,7 @@
     doc = new Document();
     addGroupField(doc, groupField, "author1", valueType);
     doc.add(newTextField("content", "some more random text blob", Field.Store.NO));
-    doc.add(newStringField("id_1", "2", Field.Store.NO));
+    doc.add(new IntField("id_1", 2, Field.Store.NO));
     doc.add(newStringField("id_2", "2", Field.Store.NO));
     w.addDocument(doc);
 
@@ -94,7 +94,7 @@
     doc = new Document();
     addGroupField(doc, groupField, "author1", valueType);
     doc.add(newTextField("content", "some more random textual data", Field.Store.NO));
-    doc.add(newStringField("id_1", "3", Field.Store.NO));
+    doc.add(new IntField("id_1", 3, Field.Store.NO));
     doc.add(newStringField("id_2", "3", Field.Store.NO));
     w.addDocument(doc);
     w.commit(); // To ensure a second segment
@@ -103,7 +103,7 @@
     doc = new Document();
     addGroupField(doc, groupField, "author2", valueType);
     doc.add(newTextField("content", "some random text", Field.Store.NO));
-    doc.add(newStringField("id_1", "4", Field.Store.NO));
+    doc.add(new IntField("id_1", 4, Field.Store.NO));
     doc.add(newStringField("id_2", "4", Field.Store.NO));
     w.addDocument(doc);
 
@@ -111,7 +111,7 @@
     doc = new Document();
     addGroupField(doc, groupField, "author3", valueType);
     doc.add(newTextField("content", "some more random text", Field.Store.NO));
-    doc.add(newStringField("id_1", "5", Field.Store.NO));
+    doc.add(new IntField("id_1", 5, Field.Store.NO));
     doc.add(newStringField("id_2", "5", Field.Store.NO));
     w.addDocument(doc);
 
@@ -119,21 +119,21 @@
     doc = new Document();
     addGroupField(doc, groupField, "author3", valueType);
     doc.add(newTextField("content", "random blob", Field.Store.NO));
-    doc.add(newStringField("id_1", "6", Field.Store.NO));
+    doc.add(new IntField("id_1", 6, Field.Store.NO));
     doc.add(newStringField("id_2", "6", Field.Store.NO));
     w.addDocument(doc);
 
     // 6 -- no author field
     doc = new Document();
     doc.add(newTextField("content", "random word stuck in alot of other text", Field.Store.NO));
-    doc.add(newStringField("id_1", "6", Field.Store.NO));
+    doc.add(new IntField("id_1", 6, Field.Store.NO));
     doc.add(newStringField("id_2", "6", Field.Store.NO));
     w.addDocument(doc);
 
     // 7 -- no author field
     doc = new Document();
     doc.add(newTextField("content", "random word stuck in alot of other text", Field.Store.NO));
-    doc.add(newStringField("id_1", "7", Field.Store.NO));
+    doc.add(new IntField("id_1", 7, Field.Store.NO));
     doc.add(newStringField("id_2", "7", Field.Store.NO));
     w.addDocument(doc);
 
@@ -406,6 +406,7 @@
       for (int a : actual) {
         if (e == a) {
           found = true;
+          break;
         }
       }
 
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
index 7f435d4..5ddf60f 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
@@ -827,12 +827,14 @@
           for(SortField sf : docSort.getSort()) {
             if (sf.getType() == SortField.Type.SCORE) {
               getScores = true;
+              break;
             }
           }
 
           for(SortField sf : groupSort.getSort()) {
             if (sf.getType() == SortField.Type.SCORE) {
               getScores = true;
+              break;
             }
           }
 
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
index 850c77a..715efeb 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
@@ -369,7 +369,11 @@
    *  identical to what was indexed. */
   protected String[][] loadFieldValues(IndexSearcher searcher, String[] fields, int[] docids, int maxLength) throws IOException {
     String contents[][] = new String[fields.length][docids.length];
-    LimitedStoredFieldVisitor visitor = new LimitedStoredFieldVisitor(fields, maxLength);
+    char valueSeparators[] = new char[fields.length];
+    for (int i = 0; i < fields.length; i++) {
+      valueSeparators[i] = getMultiValuedSeparator(fields[i]);
+    }
+    LimitedStoredFieldVisitor visitor = new LimitedStoredFieldVisitor(fields, valueSeparators, maxLength);
     for (int i = 0; i < docids.length; i++) {
       searcher.doc(docids[i], visitor);
       for (int j = 0; j < fields.length; j++) {
@@ -379,6 +383,16 @@
     }
     return contents;
   }
+  
+  /** 
+   * Returns the logical separator between values for multi-valued fields.
+   * The default value is a space character, which means passages can span across values,
+   * but a subclass can override, for example with {@code U+2029 PARAGRAPH SEPARATOR (PS)}
+   * if each value holds a discrete passage for highlighting.
+   */
+  protected char getMultiValuedSeparator(String field) {
+    return ' ';
+  }
     
   private Map<Integer,String> highlightField(String field, String contents[], BreakIterator bi, BytesRef terms[], int[] docids, List<AtomicReaderContext> leaves, int maxPassages) throws IOException {  
     Map<Integer,String> highlights = new HashMap<Integer,String>();
@@ -652,12 +666,15 @@
   
   private static class LimitedStoredFieldVisitor extends StoredFieldVisitor {
     private final String fields[];
+    private final char valueSeparators[];
     private final int maxLength;
     private final StringBuilder builders[];
     private int currentField = -1;
     
-    public LimitedStoredFieldVisitor(String fields[], int maxLength) {
+    public LimitedStoredFieldVisitor(String fields[], char valueSeparators[], int maxLength) {
+      assert fields.length == valueSeparators.length;
       this.fields = fields;
+      this.valueSeparators = valueSeparators;
       this.maxLength = maxLength;
       builders = new StringBuilder[fields.length];
       for (int i = 0; i < builders.length; i++) {
@@ -669,8 +686,8 @@
     public void stringField(FieldInfo fieldInfo, String value) throws IOException {
       assert currentField >= 0;
       StringBuilder builder = builders[currentField];
-      if (builder.length() > 0) {
-        builder.append(' '); // for the offset gap, TODO: make this configurable
+      if (builder.length() > 0 && builder.length() < maxLength) {
+        builder.append(valueSeparators[currentField]);
       }
       if (builder.length() + value.length() > maxLength) {
         builder.append(value, 0, maxLength - builder.length());
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java
index 699950e..f72c59c 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java
@@ -123,6 +123,43 @@
     dir.close();
   }
   
+  // simple test with multiple values that make a result longer than maxLength.
+  public void testMaxLengthWithMultivalue() throws Exception {
+    Directory dir = newDirectory();
+    // use simpleanalyzer for more natural tokenization (else "test." is a token)
+    IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+    iwc.setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+    
+    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+    Document doc = new Document();
+    
+    for(int i = 0; i < 3 ; i++) {
+      Field body = new Field("body", "", offsetsType);
+      body.setStringValue("This is a multivalued field");
+      doc.add(body);
+    }
+    
+    iw.addDocument(doc);
+    
+    IndexReader ir = iw.getReader();
+    iw.close();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    PostingsHighlighter highlighter = new PostingsHighlighter(40);
+    Query query = new TermQuery(new Term("body", "field"));
+    TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+    assertEquals(1, topDocs.totalHits);
+    String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+    assertEquals(1, snippets.length);
+    assertTrue("Snippet should have maximum 40 characters plus the pre and post tags",
+        snippets[0].length() == (40 + "<b></b>".length()));
+    
+    ir.close();
+    dir.close();
+  }
+  
   public void testMultipleFields() throws Exception {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
@@ -885,4 +922,48 @@
     ir.close();
     dir.close();
   }
+  
+  /** customizing the gap separator to force a sentence break */
+  public void testGapSeparator() throws Exception {
+    Directory dir = newDirectory();
+    // use simpleanalyzer for more natural tokenization (else "test." is a token)
+    IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+    iwc.setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+    
+    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+    Document doc = new Document();
+    
+    Field body1 = new Field("body", "", offsetsType);
+    body1.setStringValue("This is a multivalued field");
+    doc.add(body1);
+    
+    Field body2 = new Field("body", "", offsetsType);
+    body2.setStringValue("This is something different");
+    doc.add(body2);
+    
+    iw.addDocument(doc);
+    
+    IndexReader ir = iw.getReader();
+    iw.close();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    PostingsHighlighter highlighter = new PostingsHighlighter() {
+      @Override
+      protected char getMultiValuedSeparator(String field) {
+        assert field.equals("body");
+        return '\u2029';
+      }
+    };
+    Query query = new TermQuery(new Term("body", "field"));
+    TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+    assertEquals(1, topDocs.totalHits);
+    String snippets[] = highlighter.highlight("body", query, searcher, topDocs);
+    assertEquals(1, snippets.length);
+    assertEquals("This is a multivalued <b>field</b>\u2029", snippets[0]);
+    
+    ir.close();
+    dir.close();
+  }
 }
diff --git a/lucene/join/build.xml b/lucene/join/build.xml
index 1cb7133..80d384c 100644
--- a/lucene/join/build.xml
+++ b/lucene/join/build.xml
@@ -1,4 +1,22 @@
 <?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one
+   or more contributor license agreements.  See the NOTICE file
+   distributed with this work for additional information
+   regarding copyright ownership.  The ASF licenses this file
+   to you under the Apache License, Version 2.0 (the
+   "License"); you may not use this file except in compliance
+   with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing,
+   software distributed under the License is distributed on an
+   "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+   KIND, either express or implied.  See the License for the
+   specific language governing permissions and limitations
+   under the License.
+-->
 <project name="join" default="default">
   <description>
     Index-time and Query-time joins for normalized content
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/TermsQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/TermsQuery.java
index f4f2eb1..81d5ddd 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/TermsQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/TermsQuery.java
@@ -38,6 +38,7 @@
 class TermsQuery extends MultiTermQuery {
 
   private final BytesRefHash terms;
+  private final int[] ords;
   private final Query fromQuery; // Used for equals() only
 
   /**
@@ -48,6 +49,7 @@
     super(field);
     this.fromQuery = fromQuery;
     this.terms = terms;
+    ords = terms.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
   }
 
   @Override
@@ -56,7 +58,7 @@
       return TermsEnum.EMPTY;
     }
 
-    return new SeekingTermSetTermsEnum(terms.iterator(null), this.terms);
+    return new SeekingTermSetTermsEnum(terms.iterator(null), this.terms, ords);
   }
 
   @Override
@@ -104,12 +106,12 @@
     private BytesRef seekTerm;
     private int upto = 0;
 
-    SeekingTermSetTermsEnum(TermsEnum tenum, BytesRefHash terms) {
+    SeekingTermSetTermsEnum(TermsEnum tenum, BytesRefHash terms, int[] ords) {
       super(tenum);
       this.terms = terms;
-
+      this.ords = ords;
+      comparator = BytesRef.getUTF8SortedAsUnicodeComparator();
       lastElement = terms.size() - 1;
-      ords = terms.sort(comparator = tenum.getComparator());
       lastTerm = terms.get(ords[lastElement], new BytesRef());
       seekTerm = terms.get(ords[upto], spare);
     }
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
index 5dfd746..147b4a5 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
@@ -459,7 +459,7 @@
     for(int parentDocID=0;parentDocID<numParentDocs;parentDocID++) {
       Document parentDoc = new Document();
       Document parentJoinDoc = new Document();
-      Field id = newStringField("parentID", ""+parentDocID, Field.Store.YES);
+      Field id = new IntField("parentID", parentDocID, Field.Store.YES);
       parentDoc.add(id);
       parentJoinDoc.add(id);
       parentJoinDoc.add(newStringField("isParent", "x", Field.Store.NO));
@@ -472,8 +472,8 @@
       }
 
       if (doDeletes) {
-        parentDoc.add(newStringField("blockID", ""+parentDocID, Field.Store.NO));
-        parentJoinDoc.add(newStringField("blockID", ""+parentDocID, Field.Store.NO));
+        parentDoc.add(new IntField("blockID", parentDocID, Field.Store.NO));
+        parentJoinDoc.add(new IntField("blockID", parentDocID, Field.Store.NO));
       }
 
       final List<Document> joinDocs = new ArrayList<>();
@@ -497,7 +497,7 @@
         Document joinChildDoc = new Document();
         joinDocs.add(joinChildDoc);
 
-        Field childID = newStringField("childID", ""+childDocID, Field.Store.YES);
+        Field childID = new IntField("childID", childDocID, Field.Store.YES);
         childDoc.add(childID);
         joinChildDoc.add(childID);
 
@@ -522,7 +522,7 @@
         }
 
         if (doDeletes) {
-          joinChildDoc.add(newStringField("blockID", ""+parentDocID, Field.Store.NO));
+          joinChildDoc.add(new IntField("blockID", parentDocID, Field.Store.NO));
         }
 
         w.addDocument(childDoc);
@@ -541,8 +541,10 @@
       if (VERBOSE) {
         System.out.println("DELETE parentID=" + deleteID);
       }
-      w.deleteDocuments(new Term("blockID", ""+deleteID));
-      joinW.deleteDocuments(new Term("blockID", ""+deleteID));
+      BytesRef term = new BytesRef();
+      NumericUtils.intToPrefixCodedBytes(deleteID, 0, term);
+      w.deleteDocuments(new Term("blockID", term));
+      joinW.deleteDocuments(new Term("blockID", term));
     }
 
     final IndexReader r = w.getReader();
diff --git a/lucene/licenses/morfologik-fsa-1.5.5.jar.sha1 b/lucene/licenses/morfologik-fsa-1.5.5.jar.sha1
deleted file mode 100644
index 3a8935a..0000000
--- a/lucene/licenses/morfologik-fsa-1.5.5.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-7965a39db114f7c404b71d38bc7f0e6a332c4e73
diff --git a/lucene/licenses/morfologik-fsa-1.6.0.jar.sha1 b/lucene/licenses/morfologik-fsa-1.6.0.jar.sha1
new file mode 100644
index 0000000..8041cb4
--- /dev/null
+++ b/lucene/licenses/morfologik-fsa-1.6.0.jar.sha1
@@ -0,0 +1 @@
+397a99307020797e6790f2faf8cf865983b52559
diff --git a/lucene/licenses/morfologik-fsa-LICENSE-BSD.txt b/lucene/licenses/morfologik-fsa-LICENSE-BSD.txt
index f97fb7d..4daba47 100644
--- a/lucene/licenses/morfologik-fsa-LICENSE-BSD.txt
+++ b/lucene/licenses/morfologik-fsa-LICENSE-BSD.txt
@@ -1,6 +1,6 @@
 
 Copyright (c) 2006 Dawid Weiss
-Copyright (c) 2007-2012 Dawid Weiss, Marcin Miłkowski
+Copyright (c) 2007-2013 Dawid Weiss, Marcin Miłkowski
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without modification, 
@@ -26,4 +26,4 @@
 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 
 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/lucene/licenses/morfologik-polish-1.5.5.jar.sha1 b/lucene/licenses/morfologik-polish-1.5.5.jar.sha1
deleted file mode 100644
index 10c14c0..0000000
--- a/lucene/licenses/morfologik-polish-1.5.5.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b4a3a9746cab8b2c99c33d2ceeda2ece3f8d8ef2
diff --git a/lucene/licenses/morfologik-polish-1.6.0.jar.sha1 b/lucene/licenses/morfologik-polish-1.6.0.jar.sha1
new file mode 100644
index 0000000..b44ead1
--- /dev/null
+++ b/lucene/licenses/morfologik-polish-1.6.0.jar.sha1
@@ -0,0 +1 @@
+ca0663530971b54420fc1cea00a6338f68428232
diff --git a/lucene/licenses/morfologik-polish-LICENSE-BSD.txt b/lucene/licenses/morfologik-polish-LICENSE-BSD.txt
index 04ffd07..660f633 100644
--- a/lucene/licenses/morfologik-polish-LICENSE-BSD.txt
+++ b/lucene/licenses/morfologik-polish-LICENSE-BSD.txt
@@ -1,62 +1,26 @@
 BSD-licensed dictionary of Polish (Morfologik)
 
-Copyright (c) 2012, Marcin Miłkowski
+Morfologik Polish dictionary.
+Version: 2.0 PoliMorf
+Copyright (c) 2013, Marcin Miłkowski
 All rights reserved.
 
-Redistribution and  use in  source and binary  forms, with  or without
-modification, are permitted provided that the following conditions are
-met:
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
 
-1. Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
+1. Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution. 
 
-2. Redistributions in binary form must reproduce the above copyright
-   notice, this list of conditions and the following disclaimer in the
-   documentation and/or other materials provided with the
-   distribution.
-
-THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS “AS IS” AND ANY EXPRESS
-OR  IMPLIED WARRANTIES,  INCLUDING, BUT  NOT LIMITED  TO,  THE IMPLIED
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED.  IN NO EVENT  SHALL COPYRIGHT  HOLDERS OR  CONTRIBUTORS BE
-LIABLE FOR  ANY DIRECT,  INDIRECT, INCIDENTAL, SPECIAL,  EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES  (INCLUDING, BUT NOT LIMITED  TO, PROCUREMENT OF
-SUBSTITUTE  GOODS OR  SERVICES;  LOSS  OF USE,  DATA,  OR PROFITS;  OR
-BUSINESS INTERRUPTION) HOWEVER CAUSED  AND ON ANY THEORY OF LIABILITY,
-WHETHER IN  CONTRACT, STRICT LIABILITY, OR  TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
---
-
-BSD-licensed dictionary of Polish (SGJP)
-http://sgjp.pl/morfeusz/
-
-Copyright © 2011 Zygmunt Saloni, Włodzimierz Gruszczyński, 
-	    	 Marcin Woliński, Robert Wołosz
-
-All rights reserved.
-
-Redistribution and  use in  source and binary  forms, with  or without
-modification, are permitted provided that the following conditions are
-met:
-
-1. Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright
-   notice, this list of conditions and the following disclaimer in the
-   documentation and/or other materials provided with the
-   distribution.
-
-THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS “AS IS” AND ANY EXPRESS
-OR  IMPLIED WARRANTIES,  INCLUDING, BUT  NOT LIMITED  TO,  THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED.  IN NO EVENT  SHALL COPYRIGHT  HOLDERS OR  CONTRIBUTORS BE
-LIABLE FOR  ANY DIRECT,  INDIRECT, INCIDENTAL, SPECIAL,  EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES  (INCLUDING, BUT NOT LIMITED  TO, PROCUREMENT OF
-SUBSTITUTE  GOODS OR  SERVICES;  LOSS  OF USE,  DATA,  OR PROFITS;  OR
-BUSINESS INTERRUPTION) HOWEVER CAUSED  AND ON ANY THEORY OF LIABILITY,
-WHETHER IN  CONTRACT, STRICT LIABILITY, OR  TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/lucene/licenses/morfologik-polish-NOTICE.txt b/lucene/licenses/morfologik-polish-NOTICE.txt
index a8a3aa1..45d4cba 100644
--- a/lucene/licenses/morfologik-polish-NOTICE.txt
+++ b/lucene/licenses/morfologik-polish-NOTICE.txt
@@ -1,6 +1,3 @@
 
-This product includes data from BSD-licensed dictionary of Polish (Morfologik)
-(http://morfologik.blogspot.com/)
-
-This product includes data from BSD-licensed dictionary of Polish (SGJP)
-(http://sgjp.pl/morfeusz/)
+This product includes data from BSD-licensed dictionary of Polish (Morfologik, PoliMorf)
+(http://morfologik.blogspot.com/)
\ No newline at end of file
diff --git a/lucene/licenses/morfologik-stemming-1.5.5.jar.sha1 b/lucene/licenses/morfologik-stemming-1.5.5.jar.sha1
deleted file mode 100644
index c9824e4..0000000
--- a/lucene/licenses/morfologik-stemming-1.5.5.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e5dc913adeba3b89539cd5f82e5b88d136a1d85b
diff --git a/lucene/licenses/morfologik-stemming-1.6.0.jar.sha1 b/lucene/licenses/morfologik-stemming-1.6.0.jar.sha1
new file mode 100644
index 0000000..4ba5467
--- /dev/null
+++ b/lucene/licenses/morfologik-stemming-1.6.0.jar.sha1
@@ -0,0 +1 @@
+8a284571bea2cdd305cd86fbac9bab6deef31c7f
diff --git a/lucene/licenses/morfologik-stemming-LICENSE-BSD.txt b/lucene/licenses/morfologik-stemming-LICENSE-BSD.txt
index f97fb7d..4daba47 100644
--- a/lucene/licenses/morfologik-stemming-LICENSE-BSD.txt
+++ b/lucene/licenses/morfologik-stemming-LICENSE-BSD.txt
@@ -1,6 +1,6 @@
 
 Copyright (c) 2006 Dawid Weiss
-Copyright (c) 2007-2012 Dawid Weiss, Marcin Miłkowski
+Copyright (c) 2007-2013 Dawid Weiss, Marcin Miłkowski
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without modification, 
@@ -26,4 +26,4 @@
 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 
 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/lucene/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java b/lucene/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java
index 06f3cab..5ebff53 100644
--- a/lucene/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java
+++ b/lucene/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java
@@ -158,7 +158,7 @@
    * @see #baselineTf
    */
   @Override
-  public float tf(int freq) {
+  public float tf(float freq) {
     return baselineTf(freq);
   }
   
diff --git a/lucene/misc/src/java/org/apache/lucene/util/fst/ListOfOutputs.java b/lucene/misc/src/java/org/apache/lucene/util/fst/ListOfOutputs.java
index 8db654a..99b2f3f 100644
--- a/lucene/misc/src/java/org/apache/lucene/util/fst/ListOfOutputs.java
+++ b/lucene/misc/src/java/org/apache/lucene/util/fst/ListOfOutputs.java
@@ -38,6 +38,15 @@
  * more than one output, as this requires pushing all
  * multi-output values to a final state.
  *
+ * <p>NOTE: the only way to create multiple outputs is to
+ * add the same input to the FST multiple times in a row.  This is
+ * how the FST maps a single input to multiple outputs (e.g. you
+ * cannot pass a List&lt;Object&gt; to {@link Builder#add}).  If
+ * your outputs are longs, and you need at most 2, then use
+ * {@link UpToTwoPositiveIntOutputs} instead since it stores
+ * the outputs more compactly (by stealing a bit from each
+ * long value).
+ *
  * <p>NOTE: this cannot wrap itself (ie you cannot make an
  * FST with List&lt;List&lt;Object&gt;&gt; outputs using this).
  *
diff --git a/lucene/misc/src/java/org/apache/lucene/util/fst/UpToTwoPositiveIntOutputs.java b/lucene/misc/src/java/org/apache/lucene/util/fst/UpToTwoPositiveIntOutputs.java
index 04cbbf1..78e2715 100644
--- a/lucene/misc/src/java/org/apache/lucene/util/fst/UpToTwoPositiveIntOutputs.java
+++ b/lucene/misc/src/java/org/apache/lucene/util/fst/UpToTwoPositiveIntOutputs.java
@@ -17,21 +17,6 @@
  * limitations under the License.
  */
 
-/**
- * An FST {@link Outputs} implementation where each output
- * is one or two non-negative long values.  If it's a
- * single output, Long is returned; else, TwoLongs.  Order
- * is preserved in the TwoLongs case, ie .first is the first
- * input/output added to Builder, and .second is the
- * second.  You cannot store 0 output with this (that's
- * reserved to mean "no output")!
- *
- * NOTE: the resulting FST is not guaranteed to be minimal!
- * See {@link Builder}.
- *
- * @lucene.experimental
- */
-
 import java.io.IOException;
 
 import org.apache.lucene.store.DataInput;
@@ -46,11 +31,21 @@
  * second.  You cannot store 0 output with this (that's
  * reserved to mean "no output")!
  *
- * NOTE: the resulting FST is not guaranteed to be minimal!
+ * <p>NOTE: the only way to create a TwoLongs output is to
+ * add the same input to the FST twice in a row.  This is
+ * how the FST maps a single input to two outputs (e.g. you
+ * cannot pass a TwoLongs to {@link Builder#add}.  If you
+ * need more than two then use {@link ListOfOutputs}, but if
+ * you only have at most 2 then this implementation will
+ * require fewer bytes as it steals one bit from each long
+ * value.
+ *
+ * <p>NOTE: the resulting FST is not guaranteed to be minimal!
  * See {@link Builder}.
  *
  * @lucene.experimental
  */
+
 public final class UpToTwoPositiveIntOutputs extends Outputs<Object> {
 
   /** Holds two long outputs. */
diff --git a/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java b/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
index 1ed3bf1..080f2fe 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
@@ -41,7 +41,7 @@
       ((MockDirectoryWrapper)fsDir).setAssertNoUnrefencedFilesOnClose(false);
     }
 
-    LogMergePolicy mergePolicy = new LogByteSizeMergePolicy();
+    MergePolicy mergePolicy = new LogByteSizeMergePolicy();
     mergePolicy.setNoCFSRatio(1.0);
     mergePolicy.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY);
     IndexWriter iw = new IndexWriter(
diff --git a/lucene/misc/src/test/org/apache/lucene/index/sorter/SorterTestBase.java b/lucene/misc/src/test/org/apache/lucene/index/sorter/SorterTestBase.java
index 1597c703..38456a5 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/sorter/SorterTestBase.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/sorter/SorterTestBase.java
@@ -98,13 +98,8 @@
     }
     
     @Override
-    public ExactSimScorer exactSimScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
-      return in.exactSimScorer(weight, context);
-    }
-    
-    @Override
-    public SloppySimScorer sloppySimScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
-      return in.sloppySimScorer(weight, context);
+    public SimScorer simScorer(SimWeight weight, AtomicReaderContext context) throws IOException {
+      return in.simScorer(weight, context);
     }
     
   }
diff --git a/lucene/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java b/lucene/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java
index f238ba7..f5e35f2 100644
--- a/lucene/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java
+++ b/lucene/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java
@@ -246,7 +246,7 @@
   
     SweetSpotSimilarity ss = new SweetSpotSimilarity() {
         @Override
-        public float tf(int freq) {
+        public float tf(float freq) {
           return hyperbolicTf(freq);
         }
       };
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ByteFieldSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ByteFieldSource.java
deleted file mode 100644
index da66797..0000000
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ByteFieldSource.java
+++ /dev/null
@@ -1,121 +0,0 @@
-package org.apache.lucene.queries.function.valuesource;
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.queries.function.FunctionValues;
-import org.apache.lucene.search.FieldCache;
-
-/**
- * Obtains int field values from the {@link org.apache.lucene.search.FieldCache}
- * using <code>getInts()</code>
- * and makes those values available as other numeric types, casting as needed. *
- *
- *
- */
-
-public class ByteFieldSource extends FieldCacheSource {
-
-  private final FieldCache.ByteParser parser;
-
-  public ByteFieldSource(String field) {
-    this(field, null);
-  }
-
-  public ByteFieldSource(String field, FieldCache.ByteParser parser) {
-    super(field);
-    this.parser = parser;
-  }
-
-  @Override
-  public String description() {
-    return "byte(" + field + ')';
-  }
-
-  @Override
-  public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
-    final FieldCache.Bytes arr = cache.getBytes(readerContext.reader(), field, parser, false);
-    
-    return new FunctionValues() {
-      @Override
-      public byte byteVal(int doc) {
-        return arr.get(doc);
-      }
-
-      @Override
-      public short shortVal(int doc) {
-        return (short) arr.get(doc);
-      }
-
-      @Override
-      public float floatVal(int doc) {
-        return (float) arr.get(doc);
-      }
-
-      @Override
-      public int intVal(int doc) {
-        return (int) arr.get(doc);
-      }
-
-      @Override
-      public long longVal(int doc) {
-        return (long) arr.get(doc);
-      }
-
-      @Override
-      public double doubleVal(int doc) {
-        return (double) arr.get(doc);
-      }
-
-      @Override
-      public String strVal(int doc) {
-        return Byte.toString(arr.get(doc));
-      }
-
-      @Override
-      public String toString(int doc) {
-        return description() + '=' + byteVal(doc);
-      }
-
-      @Override
-      public Object objectVal(int doc) {
-        return arr.get(doc);  // TODO: valid?
-      }
-
-    };
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (o.getClass() != ByteFieldSource.class) return false;
-    ByteFieldSource
-            other = (ByteFieldSource) o;
-    return super.equals(other)
-      && (this.parser == null ? other.parser == null :
-          this.parser.getClass() == other.parser.getClass());
-  }
-
-  @Override
-  public int hashCode() {
-    int h = parser == null ? Byte.class.hashCode() : parser.getClass().hashCode();
-    h += super.hashCode();
-    return h;
-  }
-}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MaxFloatFunction.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MaxFloatFunction.java
index 9f998fd..d1e1f4f 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MaxFloatFunction.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MaxFloatFunction.java
@@ -27,23 +27,18 @@
   public MaxFloatFunction(ValueSource[] sources) {
     super(sources);
   }
-
-  @Override  
+  
+  @Override
   protected String name() {
     return "max";
   }
 
   @Override
   protected float func(int doc, FunctionValues[] valsArr) {
-    boolean first = true;
-    float val = 0.0f;
+    if (valsArr.length == 0) return 0.0f;
+    float val = Float.NEGATIVE_INFINITY;
     for (FunctionValues vals : valsArr) {
-      if (first) {
-        first = false;
-        val = vals.floatVal(doc);
-      } else {
-        val = Math.max(vals.floatVal(doc),val);
-      }
+      val = Math.max(vals.floatVal(doc), val);
     }
     return val;
   }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MinFloatFunction.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MinFloatFunction.java
index abae980..4cf3587 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MinFloatFunction.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MinFloatFunction.java
@@ -35,15 +35,10 @@
 
   @Override
   protected float func(int doc, FunctionValues[] valsArr) {
-    boolean first = true;
-    float val = 0.0f;
+    if (valsArr.length == 0) return 0.0f;
+    float val = Float.POSITIVE_INFINITY;
     for (FunctionValues vals : valsArr) {
-      if (first) {
-        first = false;
-        val = vals.floatVal(doc);
-      } else {
-        val = Math.min(vals.floatVal(doc),val);
-      }
+      val = Math.min(vals.floatVal(doc), val);
     }
     return val;
   }
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/NormValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/NormValueSource.java
index b71928c..c6b86ae 100755
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/NormValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/NormValueSource.java
@@ -29,7 +29,7 @@
 import java.util.Map;
 
 /** 
- * Function that returns {@link TFIDFSimilarity#decodeNormValue(byte)}
+ * Function that returns {@link TFIDFSimilarity#decodeNormValue(long)}
  * for every document.
  * <p>
  * Note that the configured Similarity for the field must be
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ShortFieldSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ShortFieldSource.java
deleted file mode 100644
index 2909ebb..0000000
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ShortFieldSource.java
+++ /dev/null
@@ -1,114 +0,0 @@
-package org.apache.lucene.queries.function.valuesource;
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.lucene.index.AtomicReaderContext;
-import org.apache.lucene.queries.function.FunctionValues;
-import org.apache.lucene.search.FieldCache;
-
-
-/**
- * Obtains short field values from the {@link org.apache.lucene.search.FieldCache}
- * using <code>getShorts()</code>
- * and makes those values available as other numeric types, casting as needed.
- **/
-public class ShortFieldSource extends FieldCacheSource {
-
-  final FieldCache.ShortParser parser;
-
-  public ShortFieldSource(String field) {
-    this(field, null);
-  }
-
-  public ShortFieldSource(String field, FieldCache.ShortParser parser) {
-    super(field);
-    this.parser = parser;
-  }
-
-  @Override
-  public String description() {
-    return "short(" + field + ')';
-  }
-
-  @Override
-  public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
-    final FieldCache.Shorts arr = cache.getShorts(readerContext.reader(), field, parser, false);
-    
-    return new FunctionValues() {
-      @Override
-      public byte byteVal(int doc) {
-        return (byte) arr.get(doc);
-      }
-
-      @Override
-      public short shortVal(int doc) {
-        return arr.get(doc);
-      }
-
-      @Override
-      public float floatVal(int doc) {
-        return (float) arr.get(doc);
-      }
-
-      @Override
-      public int intVal(int doc) {
-        return (int) arr.get(doc);
-      }
-
-      @Override
-      public long longVal(int doc) {
-        return (long) arr.get(doc);
-      }
-
-      @Override
-      public double doubleVal(int doc) {
-        return (double) arr.get(doc);
-      }
-
-      @Override
-      public String strVal(int doc) {
-        return Short.toString(arr.get(doc));
-      }
-
-      @Override
-      public String toString(int doc) {
-        return description() + '=' + shortVal(doc);
-      }
-
-    };
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (o.getClass() != ShortFieldSource.class) return false;
-    ShortFieldSource
-            other = (ShortFieldSource) o;
-    return super.equals(other)
-      && (this.parser == null ? other.parser == null :
-          this.parser.getClass() == other.parser.getClass());
-  }
-
-  @Override
-  public int hashCode() {
-    int h = parser == null ? Short.class.hashCode() : parser.getClass().hashCode();
-    h += super.hashCode();
-    return h;
-  }
-}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
index 0fc4fc9..12554be 100755
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
@@ -29,7 +29,7 @@
 import java.util.Map;
 
 /** 
- * Function that returns {@link TFIDFSimilarity#tf(int)}
+ * Function that returns {@link TFIDFSimilarity#tf(float)}
  * for every document.
  * <p>
  * Note that the configured Similarity for the field must be
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java
index 41a903e..d976d80 100755
--- a/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java
@@ -53,26 +53,6 @@
   }
 
   /**
-   * Test that CustomScoreQuery of Type.BYTE returns the expected scores.
-   */
-  @Test
-  public void testCustomScoreByte() throws Exception {
-    // INT field values are small enough to be parsed as byte
-    doTestCustomScore(BYTE_VALUESOURCE, 1.0);
-    doTestCustomScore(BYTE_VALUESOURCE, 2.0);
-  }
-
-  /**
-   * Test that CustomScoreQuery of Type.SHORT returns the expected scores.
-   */
-  @Test
-  public void testCustomScoreShort() throws Exception {
-    // INT field values are small enough to be parsed as short
-    doTestCustomScore(SHORT_VALUESOURCE, 1.0);
-    doTestCustomScore(SHORT_VALUESOURCE, 3.0);
-  }
-
-  /**
    * Test that CustomScoreQuery of Type.INT returns the expected scores.
    */
   @Test
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java b/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java
index 416967f..4f3c012 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java
@@ -1,18 +1,25 @@
 package org.apache.lucene.queries.function;
 
+import java.io.IOException;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.document.IntField;
 import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.queries.function.valuesource.ByteFieldSource;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
 import org.apache.lucene.queries.function.valuesource.IntFieldSource;
-import org.apache.lucene.queries.function.valuesource.ShortFieldSource;
+import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util._TestUtil;
 import org.junit.AfterClass;
@@ -53,10 +60,21 @@
   protected static final String INT_FIELD = "iii";
   protected static final String FLOAT_FIELD = "fff";
 
-  protected ValueSource BYTE_VALUESOURCE = new ByteFieldSource(INT_FIELD);
-  protected ValueSource SHORT_VALUESOURCE = new ShortFieldSource(INT_FIELD);
+  private static final FieldCache.FloatParser CUSTOM_FLOAT_PARSER = new FieldCache.FloatParser() {
+    
+    @Override
+    public TermsEnum termsEnum(Terms terms) throws IOException {
+      return FieldCache.NUMERIC_UTILS_INT_PARSER.termsEnum(terms);
+    }
+    
+    @Override
+    public float parseFloat(BytesRef term) {
+      return (float) FieldCache.NUMERIC_UTILS_INT_PARSER.parseInt(term);
+    }
+  };
+
   protected ValueSource INT_VALUESOURCE = new IntFieldSource(INT_FIELD);
-  protected ValueSource INT_AS_FLOAT_VALUESOURCE = new FloatFieldSource(INT_FIELD);
+  protected ValueSource INT_AS_FLOAT_VALUESOURCE = new FloatFieldSource(INT_FIELD, CUSTOM_FLOAT_PARSER);
   protected ValueSource FLOAT_VALUESOURCE = new FloatFieldSource(FLOAT_FIELD);
 
   private static final String DOC_TEXT_LINES[] = {
@@ -140,10 +158,10 @@
     f = newField(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i), customType2); // for regular search
     d.add(f);
 
-    f = newField(INT_FIELD, "" + scoreAndID, customType); // for function scoring
+    f = new IntField(INT_FIELD, scoreAndID, Store.YES); // for function scoring
     d.add(f);
 
-    f = newField(FLOAT_FIELD, scoreAndID + ".000", customType); // for function scoring
+    f = new FloatField(FLOAT_FIELD, scoreAndID, Store.YES); // for function scoring
     d.add(f);
 
     iw.addDocument(d);
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java
index 41d3c50..7aab17e 100755
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java
@@ -19,12 +19,6 @@
 
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.queries.function.FunctionQuery;
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.valuesource.ByteFieldSource;
-import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
-import org.apache.lucene.queries.function.valuesource.IntFieldSource;
-import org.apache.lucene.queries.function.valuesource.ShortFieldSource;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.QueryUtils;
 import org.apache.lucene.search.ScoreDoc;
@@ -50,20 +44,6 @@
     createIndex(true);
   }
 
-  /** Test that FieldScoreQuery of Type.BYTE returns docs in expected order. */
-  @Test
-  public void testRankByte () throws Exception {
-    // INT field values are small enough to be parsed as byte
-    doTestRank(BYTE_VALUESOURCE);
-  }
-
-  /** Test that FieldScoreQuery of Type.SHORT returns docs in expected order. */
-  @Test
-  public void testRankShort () throws Exception {
-    // INT field values are small enough to be parsed as short
-    doTestRank(SHORT_VALUESOURCE);
-  }
-
   /** Test that FieldScoreQuery of Type.INT returns docs in expected order. */
   @Test
   public void testRankInt () throws Exception {
@@ -99,20 +79,6 @@
     r.close();
   }
 
-  /** Test that FieldScoreQuery of Type.BYTE returns the expected scores. */
-  @Test
-  public void testExactScoreByte () throws Exception {
-    // INT field values are small enough to be parsed as byte
-    doTestExactScore(BYTE_VALUESOURCE);
-  }
-
-  /** Test that FieldScoreQuery of Type.SHORT returns the expected scores. */
-  @Test
-  public void testExactScoreShort () throws  Exception {
-    // INT field values are small enough to be parsed as short
-    doTestExactScore(SHORT_VALUESOURCE);
-  }
-
   /** Test that FieldScoreQuery of Type.INT returns the expected scores. */
   @Test
   public void testExactScoreInt () throws  Exception {
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java
index 572188f..760aab3 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionQuerySort.java
@@ -18,9 +18,10 @@
  */
 
 import java.io.IOException;
+
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.IntField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -46,13 +47,13 @@
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
 
     Document doc = new Document();
-    Field field = new StringField("value", "", Field.Store.YES);
+    Field field = new IntField("value", 0, Field.Store.YES);
     doc.add(field);
 
     // Save docs unsorted (decreasing value n, n-1, ...)
     final int NUM_VALS = 5;
     for (int val = NUM_VALS; val > 0; val--) {
-      field.setStringValue(Integer.toString(val));
+      field.setIntValue(val);
       writer.addDocument(doc);
     }
 
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java
index cd856d3..e781b22 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java
@@ -22,13 +22,16 @@
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoubleField;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.LongField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.queries.function.valuesource.ByteFieldSource;
 import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
 import org.apache.lucene.queries.function.valuesource.ConstValueSource;
 import org.apache.lucene.queries.function.valuesource.DivFloatFunction;
@@ -54,7 +57,6 @@
 import org.apache.lucene.queries.function.valuesource.RangeMapFloatFunction;
 import org.apache.lucene.queries.function.valuesource.ReciprocalFloatFunction;
 import org.apache.lucene.queries.function.valuesource.ScaleFloatFunction;
-import org.apache.lucene.queries.function.valuesource.ShortFieldSource;
 import org.apache.lucene.queries.function.valuesource.SumFloatFunction;
 import org.apache.lucene.queries.function.valuesource.SumTotalTermFreqValueSource;
 import org.apache.lucene.queries.function.valuesource.TFValueSource;
@@ -85,9 +87,9 @@
   static IndexSearcher searcher;
   
   static final List<String[]> documents = Arrays.asList(new String[][] {
-      /*             id,  byte, double, float, int,    long, short, string, text */ 
-      new String[] { "0",  "5", "3.63", "5.2", "35", "4343", "945", "test", "this is a test test test" },
-      new String[] { "1", "12", "5.65", "9.3", "54", "1954", "123", "bar",  "second test" },
+      /*             id,  double, float, int,  long,   string, text */ 
+      new String[] { "0", "3.63", "5.2", "35", "4343", "test", "this is a test test test" },
+      new String[] { "1", "5.65", "9.3", "54", "1954", "bar",  "second test" },
   });
   
   @BeforeClass
@@ -99,18 +101,14 @@
     Document document = new Document();
     Field idField = new StringField("id", "", Field.Store.NO);
     document.add(idField);
-    Field byteField = new StringField("byte", "", Field.Store.NO);
-    document.add(byteField);
-    Field doubleField = new StringField("double", "", Field.Store.NO);
+    Field doubleField = new DoubleField("double", 0d, Field.Store.NO);
     document.add(doubleField);
-    Field floatField = new StringField("float", "", Field.Store.NO);
+    Field floatField = new FloatField("float", 0f, Field.Store.NO);
     document.add(floatField);
-    Field intField = new StringField("int", "", Field.Store.NO);
+    Field intField = new IntField("int", 0, Field.Store.NO);
     document.add(intField);
-    Field longField = new StringField("long", "", Field.Store.NO);
+    Field longField = new LongField("long", 0L, Field.Store.NO);
     document.add(longField);
-    Field shortField = new StringField("short", "", Field.Store.NO);
-    document.add(shortField);
     Field stringField = new StringField("string", "", Field.Store.NO);
     document.add(stringField);
     Field textField = new TextField("text", "", Field.Store.NO);
@@ -118,14 +116,12 @@
     
     for (String [] doc : documents) {
       idField.setStringValue(doc[0]);
-      byteField.setStringValue(doc[1]);
-      doubleField.setStringValue(doc[2]);
-      floatField.setStringValue(doc[3]);
-      intField.setStringValue(doc[4]);
-      longField.setStringValue(doc[5]);
-      shortField.setStringValue(doc[6]);
-      stringField.setStringValue(doc[7]);
-      textField.setStringValue(doc[8]);
+      doubleField.setDoubleValue(Double.valueOf(doc[1]));
+      floatField.setFloatValue(Float.valueOf(doc[2]));
+      intField.setIntValue(Integer.valueOf(doc[3]));
+      longField.setLongValue(Long.valueOf(doc[4]));
+      stringField.setStringValue(doc[5]);
+      textField.setStringValue(doc[6]);
       iw.addDocument(document);
     }
     
@@ -143,11 +139,6 @@
     dir = null;
   }
   
-  public void testByte() throws Exception {
-    assertHits(new FunctionQuery(new ByteFieldSource("byte")),
-        new float[] { 5f, 12f });
-  }
-  
   public void testConst() throws Exception {
     assertHits(new FunctionQuery(new ConstValueSource(0.3f)),
         new float[] { 0.3f, 0.3f });
@@ -298,11 +289,6 @@
        new float[] { 0.0f, 1.0f });
   }
   
-  public void testShort() throws Exception {
-    assertHits(new FunctionQuery(new ShortFieldSource("short")),
-        new float[] { 945f, 123f });
-  }
-  
   public void testSumFloat() throws Exception {
     assertHits(new FunctionQuery(new SumFloatFunction(new ValueSource[] {
         new ConstValueSource(1f), new ConstValueSource(2f)})),
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryTreeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryTreeBuilder.java
index 25f2ecd..7fc916e 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryTreeBuilder.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryTreeBuilder.java
@@ -150,7 +150,7 @@
         builder = getQueryBuilder(clazz);
 
         if (builder == null) {
-          Class<?>[] classes = node.getClass().getInterfaces();
+          Class<?>[] classes = clazz.getInterfaces();
 
           for (Class<?> actualClass : classes) {
             builder = getQueryBuilder(actualClass);
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java
index b9a7fe6..fd363f5 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java
@@ -195,7 +195,7 @@
       if (c == '\\' || c == '+' || c == '-' || c == '!' || c == '(' || c == ')'
           || c == ':' || c == '^' || c == '[' || c == ']' || c == '\"'
           || c == '{' || c == '}' || c == '~' || c == '*' || c == '?'
-          || c == '|' || c == '&') {
+          || c == '|' || c == '&' || c == '/') {
         sb.append('\\');
       }
       sb.append(c);
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/core/builders/TestQueryTreeBuilder.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/core/builders/TestQueryTreeBuilder.java
index 2fe5e91..e837a53 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/core/builders/TestQueryTreeBuilder.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/core/builders/TestQueryTreeBuilder.java
@@ -22,6 +22,8 @@
 import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
 import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode;
 import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
+import org.apache.lucene.queryparser.flexible.core.nodes.QueryNodeImpl;
+import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax;
 import org.apache.lucene.queryparser.flexible.core.util.UnescapedCharSequence;
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.Test;
@@ -35,6 +37,28 @@
     Object result = qtb.build(new FieldQueryNode(new UnescapedCharSequence("field"), "foo", 0, 0));
     Assert.assertEquals("OK", result);
     
+    // LUCENE-4890
+    qtb = new QueryTreeBuilder();
+    qtb.setBuilder(DummyQueryNodeInterface.class, new DummyBuilder());
+    result = qtb.build(new DummyQueryNode());
+    Assert.assertEquals("OK", result);
+  }
+  
+  private static interface DummyQueryNodeInterface extends QueryNode {
+    
+  }
+  
+  private static abstract class AbstractDummyQueryNode extends QueryNodeImpl implements DummyQueryNodeInterface {
+    
+  }
+  
+  private static class DummyQueryNode extends AbstractDummyQueryNode {
+
+    @Override
+    public CharSequence toQueryString(EscapeQuerySyntax escapeSyntaxParser) {
+      return "DummyQueryNode";
+    }
+    
   }
   
   private static class DummyBuilder implements QueryBuilder {
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/LocalReplicatorTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/LocalReplicatorTest.java
index 1fb9152..ef20967 100755
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/LocalReplicatorTest.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/LocalReplicatorTest.java
@@ -19,6 +19,7 @@
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.file.NoSuchFileException;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map.Entry;
@@ -154,7 +155,7 @@
     try {
       replicator.obtainFile(res.id, res.sourceFiles.keySet().iterator().next(), "madeUpFile");
       fail("should have failed obtaining an unrecognized file");
-    } catch (FileNotFoundException e) {
+    } catch (FileNotFoundException | NoSuchFileException e) {
       // expected
     }
   }
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java b/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java
index 2de42ad..f621408 100755
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java
@@ -17,7 +17,6 @@
  * limitations under the License.
  */
 
-import java.net.SocketException;
 import java.util.Random;
 
 import org.apache.http.conn.ClientConnectionManager;
@@ -125,12 +124,14 @@
     return server;
   }
   
-  /**
-   * Returns a {@link Server}'s port. This method assumes that no
-   * {@link Connector}s were added to the Server besides the default one.
-   */
-  public static int serverPort(Server httpServer) {
-    return httpServer.getConnectors()[0].getLocalPort();
+  /** Returns a {@link Server}'s port. */
+  public static int serverPort(Server server) {
+    return server.getConnectors()[0].getLocalPort();
+  }
+  
+  /** Returns a {@link Server}'s host. */
+  public static String serverHost(Server server) {
+    return server.getConnectors()[0].getHost();
   }
   
   /**
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java
index 7059cc6..46b5942 100755
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java
@@ -50,6 +50,7 @@
   private DirectoryReader reader;
   private Server server;
   private int port;
+  private String host;
   private Directory serverIndexDir, handlerIndexDir;
   
   private void startServer() throws Exception {
@@ -59,6 +60,7 @@
     replicationHandler.addServletWithMapping(servlet, ReplicationService.REPLICATION_CONTEXT + "/*");
     server = newHttpServer(replicationHandler);
     port = serverPort(server);
+    host = serverHost(server);
   }
   
   @Before
@@ -103,7 +105,7 @@
   
   @Test
   public void testBasic() throws Exception {
-    Replicator replicator = new HttpReplicator("localhost", port, ReplicationService.REPLICATION_CONTEXT + "/s1", 
+    Replicator replicator = new HttpReplicator(host, port, ReplicationService.REPLICATION_CONTEXT + "/s1", 
         getClientConnectionManager());
     ReplicationClient client = new ReplicationClient(replicator, new IndexReplicationHandler(handlerIndexDir, null), 
         new PerSessionDirectoryFactory(clientWorkDir));
diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowFuzzyTermsEnum.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowFuzzyTermsEnum.java
index de8539e..f63c1a1 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowFuzzyTermsEnum.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/SlowFuzzyTermsEnum.java
@@ -31,9 +31,12 @@
 import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.util.UnicodeUtil;
 
-/** Classic fuzzy TermsEnum for enumerating all terms that are similar
+/** Potentially slow fuzzy TermsEnum for enumerating all terms that are similar
  * to the specified filter term.
- *
+ * <p> If the minSimilarity or maxEdits is greater than the Automaton's
+ * allowable range, this backs off to the classic (brute force)
+ * fuzzy terms enum method by calling FuzzyTermsEnum's getAutomatonEnum.
+ * </p>
  * <p>Term enumerations are always ordered by
  * {@link #getComparator}.  Each term in the enumeration is
  * greater than all that precede it.</p>
@@ -103,18 +106,43 @@
     private final IntsRef utf32 = new IntsRef(20);
     
     /**
-     * The termCompare method in FuzzyTermEnum uses Levenshtein distance to 
+     * <p>The termCompare method in FuzzyTermEnum uses Levenshtein distance to 
      * calculate the distance between the given term and the comparing term. 
+     * </p>
+     * <p>If the minSimilarity is >= 1.0, this uses the maxEdits as the comparison.
+     * Otherwise, this method uses the following logic to calculate similarity.
+     * <pre>
+     *   similarity = 1 - ((float)distance / (float) (prefixLength + Math.min(textlen, targetlen)));
+     *   </pre>
+     * where distance is the Levenshtein distance for the two words.
+     * </p>
+     * 
      */
     @Override
     protected final AcceptStatus accept(BytesRef term) {
       if (StringHelper.startsWith(term, prefixBytesRef)) {
         UnicodeUtil.UTF8toUTF32(term, utf32);
-        final float similarity = similarity(utf32.ints, realPrefixLength, utf32.length - realPrefixLength);
-        if (similarity > minSimilarity) {
+        final int distance = calcDistance(utf32.ints, realPrefixLength, utf32.length - realPrefixLength);
+       
+        //Integer.MIN_VALUE is the sentinel that Levenshtein stopped early
+        if (distance == Integer.MIN_VALUE){
+           return AcceptStatus.NO;
+        }
+        //no need to calc similarity, if raw is true and distance > maxEdits
+        if (raw == true && distance > maxEdits){
+              return AcceptStatus.NO;
+        } 
+        final float similarity = calcSimilarity(distance, (utf32.length - realPrefixLength), text.length);
+        
+        //if raw is true, then distance must also be <= maxEdits by now
+        //given the previous if statement
+        if (raw == true ||
+              (raw == false && similarity > minSimilarity)) {
           boostAtt.setBoost((similarity - minSimilarity) * scale_factor);
           return AcceptStatus.YES;
-        } else return AcceptStatus.NO;
+        } else {
+           return AcceptStatus.NO;
+        }
       } else {
         return AcceptStatus.END;
       }
@@ -125,52 +153,34 @@
      ******************************/
     
     /**
-     * <p>Similarity returns a number that is 1.0f or less (including negative numbers)
-     * based on how similar the Term is compared to a target term.  It returns
-     * exactly 0.0f when
-     * <pre>
-     *    editDistance &gt; maximumEditDistance</pre>
-     * Otherwise it returns:
-     * <pre>
-     *    1 - (editDistance / length)</pre>
-     * where length is the length of the shortest term (text or target) including a
-     * prefix that are identical and editDistance is the Levenshtein distance for
-     * the two words.</p>
-     *
+     * <p>calcDistance returns the Levenshtein distance between the query term
+     * and the target term.</p>
+     * 
      * <p>Embedded within this algorithm is a fail-fast Levenshtein distance
      * algorithm.  The fail-fast algorithm differs from the standard Levenshtein
      * distance algorithm in that it is aborted if it is discovered that the
      * minimum distance between the words is greater than some threshold.
-     *
-     * <p>To calculate the maximum distance threshold we use the following formula:
-     * <pre>
-     *     (1 - minimumSimilarity) * length</pre>
-     * where length is the shortest term including any prefix that is not part of the
-     * similarity comparison.  This formula was derived by solving for what maximum value
-     * of distance returns false for the following statements:
-     * <pre>
-     *   similarity = 1 - ((float)distance / (float) (prefixLength + Math.min(textlen, targetlen)));
-     *   return (similarity > minimumSimilarity);</pre>
-     * where distance is the Levenshtein distance for the two words.
-     * </p>
+
      * <p>Levenshtein distance (also known as edit distance) is a measure of similarity
      * between two strings where the distance is measured as the number of character
      * deletions, insertions or substitutions required to transform one string to
      * the other string.
      * @param target the target word or phrase
-     * @return the similarity,  0.0 or less indicates that it matches less than the required
-     * threshold and 1.0 indicates that the text and target are identical
+     * @param offset the offset at which to start the comparison
+     * @param length the length of what's left of the string to compare
+     * @return the number of edits or Integer.MIN_VALUE if the edit distance is
+     * greater than maxDistance.
      */
-    private final float similarity(final int[] target, int offset, int length) {
+    private final int calcDistance(final int[] target, int offset, int length) {
       final int m = length;
       final int n = text.length;
       if (n == 0)  {
         //we don't have anything to compare.  That means if we just add
         //the letters for m we get the new word
-        return realPrefixLength == 0 ? 0.0f : 1.0f - ((float) m / realPrefixLength);
+        return m;
       }
       if (m == 0) {
-        return realPrefixLength == 0 ? 0.0f : 1.0f - ((float) n / realPrefixLength);
+        return n;
       }
       
       final int maxDistance = calculateMaxDistance(m);
@@ -183,7 +193,7 @@
         //which is 8-3 or more precisely Math.abs(3-8).
         //if our maximum edit distance is 4, then we can discard this word
         //without looking at it.
-        return Float.NEGATIVE_INFINITY;
+        return Integer.MIN_VALUE;
       }
       
       // init matrix d
@@ -214,7 +224,7 @@
         if (j > maxDistance && bestPossibleEditDistance > maxDistance) {  //equal is okay, but not greater
           //the closest the target can be to the text is just too far away.
           //this target is leaving the party early.
-          return Float.NEGATIVE_INFINITY;
+          return Integer.MIN_VALUE;
         }
 
         // copy current distance counts to 'previous row' distance counts: swap p and d
@@ -226,12 +236,17 @@
       // our last action in the above loop was to switch d and p, so p now
       // actually has the most recent cost counts
 
+      return p[n];
+    }
+    
+    private float calcSimilarity(int edits, int m, int n){
       // this will return less than 0.0 when the edit distance is
       // greater than the number of characters in the shorter word.
       // but this was the formula that was previously used in FuzzyTermEnum,
       // so it has not been changed (even though minimumSimilarity must be
       // greater than 0.0)
-      return 1.0f - ((float)p[n] / (float) (realPrefixLength + Math.min(n, m)));
+      
+      return 1.0f - ((float)edits / (float) (realPrefixLength + Math.min(n, m)));
     }
     
     /**
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java
index a4a125d..c823807 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java
@@ -43,6 +43,9 @@
 public class TestSlowFuzzyQuery extends LuceneTestCase {
 
   public void testFuzziness() throws Exception {
+    //every test with SlowFuzzyQuery.defaultMinSimilarity
+    //is exercising the Automaton, not the brute force linear method
+    
     Directory directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
     addDoc("aaaaa", writer);
@@ -194,6 +197,30 @@
     directory.close();
   }
 
+  public void testFuzzinessLong2() throws Exception {
+     //Lucene-5033
+     Directory directory = newDirectory();
+     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
+     addDoc("abcdef", writer);
+     addDoc("segment", writer);
+
+     IndexReader reader = writer.getReader();
+     IndexSearcher searcher = newSearcher(reader);
+     writer.close();
+
+     SlowFuzzyQuery query;
+     
+     query = new SlowFuzzyQuery(new Term("field", "abcxxxx"), 3f, 0);   
+     ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+     assertEquals(0, hits.length);
+     
+     query = new SlowFuzzyQuery(new Term("field", "abcxxxx"), 4f, 0);   
+     hits = searcher.search(query, null, 1000).scoreDocs;
+     assertEquals(1, hits.length);
+     reader.close();
+     directory.close();
+  }
+  
   public void testFuzzinessLong() throws Exception {
     Directory directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
@@ -385,7 +412,6 @@
   
   public void testGiga() throws Exception {
 
-    MockAnalyzer analyzer = new MockAnalyzer(random());
     Directory index = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), index);
 
@@ -440,25 +466,21 @@
     assertEquals(1, hits.length);
     assertEquals("foobar", searcher.doc(hits[0].doc).get("field"));
     
-    // TODO: cannot really be supported given the legacy scoring
-    // system which scores negative, if the distance > min term len,
-    // so such matches were always impossible with lucene 3.x, etc
-    //
-    //q = new SlowFuzzyQuery(new Term("field", "t"), 3);
-    //hits = searcher.search(q, 10).scoreDocs;
-    //assertEquals(1, hits.length);
-    //assertEquals("test", searcher.doc(hits[0].doc).get("field"));
+    q = new SlowFuzzyQuery(new Term("field", "t"), 3);
+    hits = searcher.search(q, 10).scoreDocs;
+    assertEquals(1, hits.length);
+    assertEquals("test", searcher.doc(hits[0].doc).get("field"));
     
-    // q = new SlowFuzzyQuery(new Term("field", "a"), 4f, 0, 50);
-    // hits = searcher.search(q, 10).scoreDocs;
-    // assertEquals(1, hits.length);
-    // assertEquals("test", searcher.doc(hits[0].doc).get("field"));
+    q = new SlowFuzzyQuery(new Term("field", "a"), 4f, 0, 50);
+    hits = searcher.search(q, 10).scoreDocs;
+    assertEquals(1, hits.length);
+    assertEquals("test", searcher.doc(hits[0].doc).get("field"));
     
-    // q = new SlowFuzzyQuery(new Term("field", "a"), 6f, 0, 50);
-    // hits = searcher.search(q, 10).scoreDocs;
-    // assertEquals(2, hits.length);
-    // assertEquals("test", searcher.doc(hits[0].doc).get("field"));
-    // assertEquals("foobar", searcher.doc(hits[1].doc).get("field"));
+    q = new SlowFuzzyQuery(new Term("field", "a"), 6f, 0, 50);
+    hits = searcher.search(q, 10).scoreDocs;
+    assertEquals(2, hits.length);
+    assertEquals("test", searcher.doc(hits[0].doc).get("field"));
+    assertEquals("foobar", searcher.doc(hits[1].doc).get("field"));
     
     reader.close();
     index.close();
diff --git a/lucene/spatial/build.xml b/lucene/spatial/build.xml
index 37505e6..463ae43 100644
--- a/lucene/spatial/build.xml
+++ b/lucene/spatial/build.xml
@@ -1,4 +1,22 @@
 <?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one
+   or more contributor license agreements.  See the NOTICE file
+   distributed with this work for additional information
+   regarding copyright ownership.  The ASF licenses this file
+   to you under the Apache License, Version 2.0 (the
+   "License"); you may not use this file except in compliance
+   with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing,
+   software distributed under the License is distributed on an
+   "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+   KIND, either express or implied.  See the License for the
+   specific language governing permissions and limitations
+   under the License.
+-->
 <project name="spatial" default="default">
   <description>
     Geospatial search
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java
index 74f0a68..bd0e954 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java
@@ -41,8 +41,35 @@
  */
 public class ContainsPrefixTreeFilter extends AbstractPrefixTreeFilter {
 
-  public ContainsPrefixTreeFilter(Shape queryShape, String fieldName, SpatialPrefixTree grid, int detailLevel) {
+  /*
+  Future optimizations:
+    Instead of seekExact, use seekCeil with some leap-frogging, like Intersects does.
+  */
+
+  /**
+   * If the spatial data for a document is comprised of multiple overlapping or adjacent parts,
+   * it might fail to match a query shape when doing the CONTAINS predicate when the sum of
+   * those shapes contain the query shape but none do individually.  Set this to false to
+   * increase performance if you don't care about that circumstance (such as if your indexed
+   * data doesn't even have such conditions).  See LUCENE-5062.
+   */
+  protected final boolean multiOverlappingIndexedShapes;
+
+  public ContainsPrefixTreeFilter(Shape queryShape, String fieldName, SpatialPrefixTree grid, int detailLevel, boolean multiOverlappingIndexedShapes) {
     super(queryShape, fieldName, grid, detailLevel);
+    this.multiOverlappingIndexedShapes = multiOverlappingIndexedShapes;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (!super.equals(o))
+      return false;
+    return multiOverlappingIndexedShapes == ((ContainsPrefixTreeFilter)o).multiOverlappingIndexedShapes;
+  }
+
+  @Override
+  public int hashCode() {
+    return super.hashCode() + (multiOverlappingIndexedShapes ? 1 : 0);
   }
 
   @Override
@@ -65,18 +92,25 @@
       if (termsEnum == null)//signals all done
         return null;
 
-      //Leaf docs match all query shape
+      // Leaf docs match all query shape
       SmallDocSet leafDocs = getLeafDocs(cell, acceptContains);
 
-      // Get the AND of all child results
+      // Get the AND of all child results (into combinedSubResults)
       SmallDocSet combinedSubResults = null;
-      Collection<Cell> subCells = cell.getSubCells(queryShape);
+      //   Optimization: use null subCellsFilter when we know cell is within the query shape.
+      Shape subCellsFilter = queryShape;
+      if (cell.getLevel() != 0 && ((cell.getShapeRel() == null || cell.getShapeRel() == SpatialRelation.WITHIN))) {
+        subCellsFilter = null;
+        assert cell.getShape().relate(queryShape) == SpatialRelation.WITHIN;
+      }
+      Collection <Cell> subCells = cell.getSubCells(subCellsFilter);
       for (Cell subCell : subCells) {
         if (!seekExact(subCell))
           combinedSubResults = null;
         else if (subCell.getLevel() == detailLevel)
           combinedSubResults = getDocs(subCell, acceptContains);
-        else if (subCell.getShapeRel() == SpatialRelation.WITHIN)
+        else if (!multiOverlappingIndexedShapes &&
+            subCell.getShapeRel() == SpatialRelation.WITHIN)
           combinedSubResults = getLeafDocs(subCell, acceptContains);
         else
           combinedSubResults = visit(subCell, acceptContains); //recursion
@@ -90,7 +124,7 @@
       if (combinedSubResults != null) {
         if (leafDocs == null)
           return combinedSubResults;
-        return leafDocs.union(combinedSubResults);
+        return leafDocs.union(combinedSubResults);//union is 'or'
       }
       return leafDocs;
     }
@@ -109,8 +143,12 @@
       return collectDocs(acceptContains);
     }
 
+    private Cell lastLeaf = null;//just for assertion
+
     private SmallDocSet getLeafDocs(Cell leafCell, Bits acceptContains) throws IOException {
       assert new BytesRef(leafCell.getTokenBytes()).equals(termBytes);
+      assert ! leafCell.equals(lastLeaf);//don't call for same leaf again
+      lastLeaf = leafCell;
 
       BytesRef nextTerm = termsEnum.next();
       if (nextTerm == null) {
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java
index 6c5364a..b2db296 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java
@@ -38,6 +38,13 @@
 
   private int prefixGridScanLevel;
 
+  /** True if only indexed points shall be supported.  See
+   *  {@link IntersectsPrefixTreeFilter#hasIndexedLeaves}. */
+  protected boolean pointsOnly = false;
+
+  /** See {@link ContainsPrefixTreeFilter#multiOverlappingIndexedShapes}. */
+  protected boolean multiOverlappingIndexedShapes = true;
+
   public RecursivePrefixTreeStrategy(SpatialPrefixTree grid, String fieldName) {
     super(grid, fieldName,
         true);//simplify indexed cells
@@ -69,18 +76,17 @@
 
     Shape shape = args.getShape();
     int detailLevel = grid.getLevelForDistance(args.resolveDistErr(ctx, distErrPct));
-    final boolean hasIndexedLeaves = true;
 
-    if (op == SpatialOperation.Intersects) {
+    if (pointsOnly || op == SpatialOperation.Intersects) {
       return new IntersectsPrefixTreeFilter(
-          shape, getFieldName(), grid, detailLevel, prefixGridScanLevel,
-          hasIndexedLeaves);
+          shape, getFieldName(), grid, detailLevel, prefixGridScanLevel, !pointsOnly);
     } else if (op == SpatialOperation.IsWithin) {
       return new WithinPrefixTreeFilter(
           shape, getFieldName(), grid, detailLevel, prefixGridScanLevel,
           -1);//-1 flag is slower but ensures correct results
     } else if (op == SpatialOperation.Contains) {
-      return new ContainsPrefixTreeFilter(shape, getFieldName(), grid, detailLevel);
+      return new ContainsPrefixTreeFilter(shape, getFieldName(), grid, detailLevel,
+          multiOverlappingIndexedShapes);
     }
     throw new UnsupportedSpatialOperation(op);
   }
diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/SpatialOpRecursivePrefixTreeTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/SpatialOpRecursivePrefixTreeTest.java
index 8c6a317..cbec0e2 100644
--- a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/SpatialOpRecursivePrefixTreeTest.java
+++ b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/SpatialOpRecursivePrefixTreeTest.java
@@ -110,6 +110,18 @@
     doTest(SpatialOperation.IsDisjointTo);
   }
 
+  /** See LUCENE-5062, {@link ContainsPrefixTreeFilter#multiOverlappingIndexedShapes}. */
+  @Test
+  public void testContainsPairOverlap() throws IOException {
+    mySetup(3);
+    adoc("0", new ShapePair(ctx.makeRectangle(0, 33, -128, 128), ctx.makeRectangle(33, 128, -128, 128), true));
+    commit();
+    Query query = strategy.makeQuery(new SpatialArgs(SpatialOperation.Contains,
+        ctx.makeRectangle(0, 128, -16, 128)));
+    SearchResults searchResults = executeQuery(query, 1);
+    assertEquals(1, searchResults.numFound);
+  }
+
   @Test
   public void testWithinDisjointParts() throws IOException {
     mySetup(7);
@@ -184,10 +196,10 @@
       Shape indexedShape;
       Shape indexedShapeGS; //(grid-snapped)
       int R = random().nextInt(12);
-      if (R == 0) {//1 in 10
+      if (R == 0) {//1 in 12
         indexedShape = null; //no shape for this doc
         indexedShapeGS = null;
-      } else if (R % 4 == 0) {//3 in 12
+      } else if (R % 3 == 0) {//4-1 in 12
         //comprised of more than one shape
         Rectangle shape1 = randomRectangle();
         Rectangle shape2 = randomRectangle();
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
index 66c25d7..6e797ad 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
@@ -512,7 +512,7 @@
 
       reader = new Sort.ByteSequencesReader(tempSorted);
      
-      PairOutputs<Long,BytesRef> outputs = new PairOutputs<Long,BytesRef>(PositiveIntOutputs.getSingleton(true), ByteSequenceOutputs.getSingleton());
+      PairOutputs<Long,BytesRef> outputs = new PairOutputs<Long,BytesRef>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton());
       Builder<Pair<Long,BytesRef>> builder = new Builder<Pair<Long,BytesRef>>(FST.INPUT_TYPE.BYTE1, outputs);
 
       // Build FST:
@@ -634,7 +634,7 @@
   public boolean load(InputStream input) throws IOException {
     DataInput dataIn = new InputStreamDataInput(input);
     try {
-      this.fst = new FST<Pair<Long,BytesRef>>(dataIn, new PairOutputs<Long,BytesRef>(PositiveIntOutputs.getSingleton(true), ByteSequenceOutputs.getSingleton()));
+      this.fst = new FST<Pair<Long,BytesRef>>(dataIn, new PairOutputs<Long,BytesRef>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()));
       maxAnalyzedPathsForOneInput = dataIn.readVInt();
       hasPayloads = dataIn.readByte() == 1;
     } finally {
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java
index 7b8d782..f634bee 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java
@@ -101,7 +101,7 @@
     TermFreqIterator iter = new WFSTTermFreqIteratorWrapper(iterator);
     IntsRef scratchInts = new IntsRef();
     BytesRef previous = null;
-    PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);
+    PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
     Builder<Long> builder = new Builder<Long>(FST.INPUT_TYPE.BYTE1, outputs);
     while ((scratch = iter.next()) != null) {
       long cost = iter.weight();
@@ -136,7 +136,7 @@
   @Override
   public boolean load(InputStream input) throws IOException {
     try {
-      this.fst = new FST<Long>(new InputStreamDataInput(input), PositiveIntOutputs.getSingleton(true));
+      this.fst = new FST<Long>(new InputStreamDataInput(input), PositiveIntOutputs.getSingleton());
     } finally {
       IOUtils.close(input);
     }
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java
index 1554469..803c5d9 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggesterTest.java
@@ -1164,4 +1164,33 @@
     assertEquals("[isla de muerta/8, i love lucy/7]", suggester.lookup("i", false, 3).toString());
     assertEquals("[i love lucy/7]", suggester.lookup("i ", false, 3).toString());
   }
+
+  public void testTooManyExpansions() throws Exception {
+
+    final Analyzer a = new Analyzer() {
+        @Override
+        protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+          Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.SIMPLE, true);
+        
+          return new TokenStreamComponents(tokenizer) {
+            @Override
+            public TokenStream getTokenStream() {
+              Token a = new Token("a", 0, 1);
+              a.setPositionIncrement(1);
+              Token b = new Token("b", 0, 1);
+              b.setPositionIncrement(0);
+              return new CannedTokenStream(new Token[] {a, b});
+            }
+         
+            @Override
+            protected void setReader(final Reader reader) throws IOException {
+            }
+          };
+        }
+      };
+
+    AnalyzingSuggester suggester = new AnalyzingSuggester(a, a, 0, 256, 1);
+    suggester.build(new TermFreqArrayIterator(new TermFreq[] {new TermFreq("a", 1)}));
+    assertEquals("[a/1]", suggester.lookup("a", false, 1).toString());
+  }
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastCompressingCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastCompressingCodec.java
index 252ba5d..24f41ab 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastCompressingCodec.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastCompressingCodec.java
@@ -1,5 +1,11 @@
 package org.apache.lucene.codecs.compressing;
 
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.apache.lucene.codecs.NormsFormat;
+import org.apache.lucene.codecs.lucene42.Lucene42DocValuesFormat;
+import org.apache.lucene.codecs.lucene42.Lucene42NormsFormat;
+import org.apache.lucene.util.packed.PackedInts;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -32,4 +38,13 @@
     this(1 << 14, false);
   }
 
+  @Override
+  public NormsFormat normsFormat() {
+    return new Lucene42NormsFormat(PackedInts.FAST);
+  }
+
+  @Override
+  public DocValuesFormat docValuesFormat() {
+    return new Lucene42DocValuesFormat(PackedInts.FAST);
+  }
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastDecompressionCompressingCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastDecompressionCompressingCodec.java
index 568a649..7c6ba48 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastDecompressionCompressingCodec.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/FastDecompressionCompressingCodec.java
@@ -1,5 +1,11 @@
 package org.apache.lucene.codecs.compressing;
 
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.apache.lucene.codecs.NormsFormat;
+import org.apache.lucene.codecs.lucene42.Lucene42DocValuesFormat;
+import org.apache.lucene.codecs.lucene42.Lucene42NormsFormat;
+import org.apache.lucene.util.packed.PackedInts;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -32,4 +38,13 @@
     this(1 << 14, false);
   }
 
+  @Override
+  public NormsFormat normsFormat() {
+    return new Lucene42NormsFormat(PackedInts.DEFAULT);
+  }
+
+  @Override
+  public DocValuesFormat docValuesFormat() {
+    return new Lucene42DocValuesFormat(PackedInts.DEFAULT);
+  }
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/HighCompressionCompressingCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/HighCompressionCompressingCodec.java
index fb235f9..2f1fc29 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/HighCompressionCompressingCodec.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/HighCompressionCompressingCodec.java
@@ -1,5 +1,9 @@
 package org.apache.lucene.codecs.compressing;
 
+import org.apache.lucene.codecs.NormsFormat;
+import org.apache.lucene.codecs.lucene42.Lucene42NormsFormat;
+import org.apache.lucene.util.packed.PackedInts;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -32,4 +36,8 @@
     this(1 << 14, false);
   }
 
+  @Override
+  public NormsFormat normsFormat() {
+    return new Lucene42NormsFormat(PackedInts.COMPACT);
+  }
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
index 6884db7..4c42f38 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
@@ -94,7 +94,7 @@
     Directory dir = newDirectory();
     Random rand = random();
     RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
-    //w.w.setUseCompoundFile(false);
+    //w.w.setNoCFSRatio(0.0);
     final int docCount = atLeast(200);
     final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java b/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java
index 2114343..a774159 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java
@@ -272,7 +272,7 @@
   public static SegmentInfoPerCommit writeDoc(Random random, Directory dir, Analyzer analyzer, Similarity similarity, Document doc) throws IOException {
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( /* LuceneTestCase.newIndexWriterConfig(random, */ 
         TEST_VERSION_CURRENT, analyzer).setSimilarity(similarity == null ? IndexSearcher.getDefaultSimilarity() : similarity));
-    //writer.setUseCompoundFile(false);
+    //writer.setNoCFSRatio(0.0);
     writer.addDocument(doc);
     writer.commit();
     SegmentInfoPerCommit info = writer.newestSegment();
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
index a507a68..3fea86c 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
@@ -25,17 +25,12 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.document.BinaryDocValuesField;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField; 
-import org.apache.lucene.document.SortedDocValuesField; 
-import org.apache.lucene.index.FieldInfo.DocValuesType;
 import org.apache.lucene.index.IndexWriter; // javadoc
 import org.apache.lucene.search.Query;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.InfoStream;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NullInfoStream;
 import org.apache.lucene.util.Version;
 import org.apache.lucene.util._TestUtil;
 
@@ -55,23 +50,22 @@
   private boolean getReaderCalled;
   private final Codec codec; // sugar
 
-  // Randomly calls Thread.yield so we mixup thread scheduling
-  private static final class MockIndexWriter extends IndexWriter {
-
-    private final Random r;
-
-    public MockIndexWriter(Random r, Directory dir, IndexWriterConfig conf) throws IOException {
-      super(dir, conf);
-      // TODO: this should be solved in a different way; Random should not be shared (!).
-      this.r = new Random(r.nextLong());
-    }
-
-    @Override
-    boolean testPoint(String name) {
-      if (r.nextInt(4) == 2)
-        Thread.yield();
-      return true;
-    }
+  
+  public static IndexWriter mockIndexWriter(Directory dir, IndexWriterConfig conf, Random r) throws IOException {
+    // Randomly calls Thread.yield so we mixup thread scheduling
+    final Random random = new Random(r.nextLong());
+    return mockIndexWriter(dir, conf,  new TestPoint() {
+      @Override
+      public void apply(String message) {
+        if (random.nextInt(4) == 2)
+          Thread.yield();
+      }
+    });
+  }
+  
+  public static IndexWriter mockIndexWriter(Directory dir, IndexWriterConfig conf, TestPoint testPoint) throws IOException {
+    conf.setInfoStream(new TestPointInfoStream(conf.getInfoStream(), testPoint));
+    return new IndexWriter(dir, conf);
   }
 
   /** create a RandomIndexWriter with a random config: Uses TEST_VERSION_CURRENT and MockAnalyzer */
@@ -93,7 +87,7 @@
   public RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c) throws IOException {
     // TODO: this should be solved in a different way; Random should not be shared (!).
     this.r = new Random(r.nextLong());
-    w = new MockIndexWriter(r, dir, c);
+    w = mockIndexWriter(dir, c, r);
     flushAt = _TestUtil.nextInt(r, 10, 1000);
     codec = w.getConfig().getCodec();
     if (LuceneTestCase.VERBOSE) {
@@ -345,4 +339,42 @@
   public void forceMerge(int maxSegmentCount) throws IOException {
     w.forceMerge(maxSegmentCount);
   }
+  
+  private static final class TestPointInfoStream extends InfoStream {
+    private final InfoStream delegate;
+    private final TestPoint testPoint;
+    
+    public TestPointInfoStream(InfoStream delegate, TestPoint testPoint) {
+      this.delegate = delegate == null ? new NullInfoStream(): delegate;
+      this.testPoint = testPoint;
+    }
+
+    @Override
+    public void close() throws IOException {
+      delegate.close();
+    }
+
+    @Override
+    public void message(String component, String message) {
+      if ("TP".equals(component)) {
+        testPoint.apply(message);
+      }
+      if (delegate.isEnabled(component)) {
+        delegate.message(component, message);
+      }
+    }
+    
+    @Override
+    public boolean isEnabled(String component) {
+      return "TP".equals(component) || delegate.isEnabled(component);
+    }
+  }
+  
+  /**
+   * Simple interface that is executed for each <tt>TP</tt> {@link InfoStream} component
+   * message. See also {@link RandomIndexWriter#mockIndexWriter(Directory, IndexWriterConfig, TestPoint)}
+   */
+  public static interface TestPoint {
+    public abstract void apply(String message);
+  }
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
index aeb8039..c193923 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
@@ -20,6 +20,7 @@
 import java.io.Closeable;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.file.NoSuchFileException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -385,7 +386,7 @@
       if (randomState.nextBoolean()) {
         throw new IOException("a random IOException (" + name + ")");
       } else {
-        throw new FileNotFoundException("a random IOException (" + name + ")");
+        throw randomState.nextBoolean() ? new FileNotFoundException("a random IOException (" + name + ")") : new NoSuchFileException("a random IOException (" + name + ")");
       }
     }
   }
@@ -544,7 +545,7 @@
       maybeThrowDeterministicException();
     }
     if (!delegate.fileExists(name)) {
-      throw new FileNotFoundException(name + " in dir=" + delegate);
+      throw randomState.nextBoolean() ? new FileNotFoundException(name + " in dir=" + delegate) : new NoSuchFileException(name + " in dir=" + delegate);
     }
 
     // cannot open a file for input if it's still open for
@@ -920,7 +921,7 @@
       throws IOException {
     maybeYield();
     if (!delegate.fileExists(name)) {
-      throw new FileNotFoundException(name);
+      throw randomState.nextBoolean() ? new FileNotFoundException(name) : new NoSuchFileException(name);
     }
     // cannot open a file for input if it's still open for
     // output, except for segments.gen and segments_N
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java b/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java
index 93f1f24..063dec3 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java
@@ -36,6 +36,7 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.IntField;
 import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
@@ -158,6 +159,7 @@
     final Field titleDV;
     final Field body;
     final Field id;
+    final Field idNum;
     final Field date;
 
     public DocState(boolean useDocValues) {
@@ -180,6 +182,9 @@
       id = new StringField("docid", "", Field.Store.YES);
       doc.add(id);
 
+      idNum = new IntField("docid_int", 0, Field.Store.NO);
+      doc.add(idNum);
+
       date = new StringField("date", "", Field.Store.YES);
       doc.add(date);
 
@@ -233,7 +238,9 @@
     }
     docState.titleTokenized.setStringValue(title);
     docState.date.setStringValue(line.substring(1+spot, spot2));
-    docState.id.setStringValue(Integer.toString(id.getAndIncrement()));
+    final int i = id.getAndIncrement();
+    docState.id.setStringValue(Integer.toString(i));
+    docState.idNum.setIntValue(i);
     return docState.doc;
   }
 }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
index 2ce2823..3791e48 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
@@ -737,6 +737,12 @@
 
     if (r.nextBoolean()) {
       c.setMergeScheduler(new SerialMergeScheduler());
+    } else if (rarely(r)) {
+      int maxThreadCount = _TestUtil.nextInt(random(), 1, 4);
+      int maxMergeCount = _TestUtil.nextInt(random(), maxThreadCount, maxThreadCount+4);
+      ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+      cms.setMaxMergesAndThreads(maxMergeCount, maxThreadCount);
+      c.setMergeScheduler(cms);
     }
     if (r.nextBoolean()) {
       if (rarely(r)) {
@@ -808,6 +814,7 @@
     if (rarely(r)) {
       c.setMergedSegmentWarmer(new SimpleMergedSegmentWarmer(c.getInfoStream()));
     }
+    c.setUseCompoundFile(r.nextBoolean());
     c.setReaderPooling(r.nextBoolean());
     c.setReaderTermsIndexDivisor(_TestUtil.nextInt(r, 1, 4));
     return c;
@@ -831,20 +838,29 @@
 
   public static LogMergePolicy newLogMergePolicy(Random r) {
     LogMergePolicy logmp = r.nextBoolean() ? new LogDocMergePolicy() : new LogByteSizeMergePolicy();
-    logmp.setUseCompoundFile(r.nextBoolean());
     logmp.setCalibrateSizeByDeletes(r.nextBoolean());
     if (rarely(r)) {
       logmp.setMergeFactor(_TestUtil.nextInt(r, 2, 9));
     } else {
       logmp.setMergeFactor(_TestUtil.nextInt(r, 10, 50));
     }
-    logmp.setUseCompoundFile(r.nextBoolean());
-    logmp.setNoCFSRatio(0.1 + r.nextDouble()*0.8);
-    if (rarely()) {
-      logmp.setMaxCFSSegmentSizeMB(0.2 + r.nextDouble() * 2.0);
-    }
+    configureRandom(r, logmp);
     return logmp;
   }
+  
+  private static void configureRandom(Random r, MergePolicy mergePolicy) {
+    if (r.nextBoolean()) {
+      mergePolicy.setNoCFSRatio(0.1 + r.nextDouble()*0.8);
+    } else {
+      mergePolicy.setNoCFSRatio(r.nextBoolean() ? 1.0 : 0.0);
+    }
+    
+    if (rarely()) {
+      mergePolicy.setMaxCFSSegmentSizeMB(0.2 + r.nextDouble() * 2.0);
+    } else {
+      mergePolicy.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY);
+    }
+  }
 
   public static TieredMergePolicy newTieredMergePolicy(Random r) {
     TieredMergePolicy tmp = new TieredMergePolicy();
@@ -867,29 +883,25 @@
     } else {
       tmp.setSegmentsPerTier(_TestUtil.nextInt(r, 10, 50));
     }
-    tmp.setUseCompoundFile(r.nextBoolean());
-    tmp.setNoCFSRatio(0.1 + r.nextDouble()*0.8);
-    if (rarely()) {
-      tmp.setMaxCFSSegmentSizeMB(0.2 + r.nextDouble() * 2.0);
-    }
+    configureRandom(r, tmp);
     tmp.setReclaimDeletesWeight(r.nextDouble()*4);
     return tmp;
   }
 
-  public static LogMergePolicy newLogMergePolicy(boolean useCFS) {
-    LogMergePolicy logmp = newLogMergePolicy();
-    logmp.setUseCompoundFile(useCFS);
+  public static MergePolicy newLogMergePolicy(boolean useCFS) {
+    MergePolicy logmp = newLogMergePolicy();
+    logmp.setNoCFSRatio(useCFS ? 1.0 : 0.0);
     return logmp;
   }
 
-  public static LogMergePolicy newLogMergePolicy(boolean useCFS, int mergeFactor) {
+  public static MergePolicy newLogMergePolicy(boolean useCFS, int mergeFactor) {
     LogMergePolicy logmp = newLogMergePolicy();
-    logmp.setUseCompoundFile(useCFS);
+    logmp.setNoCFSRatio(useCFS ? 1.0 : 0.0);
     logmp.setMergeFactor(mergeFactor);
     return logmp;
   }
 
-  public static LogMergePolicy newLogMergePolicy(int mergeFactor) {
+  public static MergePolicy newLogMergePolicy(int mergeFactor) {
     LogMergePolicy logmp = newLogMergePolicy();
     logmp.setMergeFactor(mergeFactor);
     return logmp;
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java b/lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java
index fa7754e..32f2cc0 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java
@@ -88,12 +88,27 @@
   @Override
   public void testFinished(Description description) throws Exception {
     if (testFailed) {
-      reportAdditionalFailureInfo(description.getMethodName());
+      reportAdditionalFailureInfo(
+          stripTestNameAugmentations(
+              description.getMethodName()));
     }
     scope = LifecycleScope.SUITE;
     testFailed = false;
   }
 
+  /**
+   * The {@link Description} object in JUnit does not expose the actual test method,
+   * instead it has the concept of a unique "name" of a test. To run the same method (tests)
+   * repeatedly, randomizedtesting must make those "names" unique: it appends the current iteration
+   * and seeds to the test method's name. We strip this information here.   
+   */
+  private String stripTestNameAugmentations(String methodName) {
+    if (methodName != null) {
+      methodName = methodName.replaceAll("\\s*\\{.+?\\}", "");
+    }
+    return methodName;
+  }
+
   @Override
   public void testRunFinished(Result result) throws Exception {
     if (printDiagnosticsAfterClass || LuceneTestCase.VERBOSE) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java
index 7f31d48..01cbbcd 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java
@@ -84,6 +84,9 @@
 
     @Override
     public void message(String component, String message) {
+      if ("TP".equals(component)) {
+        return; // ignore test points!
+      }
       final String name;
       if (Thread.currentThread().getName().startsWith("TEST-")) {
         // The name of the main thread is way too
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/_TestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/_TestUtil.java
index 919f45d..9eccf27 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/_TestUtil.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/_TestUtil.java
@@ -49,7 +49,12 @@
 import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat;
 import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoubleField;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType.NumericType;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.LongField;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.SortedDocValuesField;
 import org.apache.lucene.index.AtomicReader;
@@ -78,14 +83,15 @@
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.index.TieredMergePolicy;
 import org.apache.lucene.search.FieldDoc;
-import org.apache.lucene.search.FilteredQuery.FilterStrategy;
 import org.apache.lucene.search.FilteredQuery;
+import org.apache.lucene.search.FilteredQuery.FilterStrategy;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.CompoundFileDirectory;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.junit.Assert;
+
 import com.carrotsearch.randomizedtesting.RandomizedContext;
 import com.carrotsearch.randomizedtesting.generators.RandomInts;
 import com.carrotsearch.randomizedtesting.generators.RandomPicks;
@@ -260,17 +266,6 @@
     }
   }
 
-  // NOTE: only works for TMP and LMP!!
-  public static void setUseCompoundFile(MergePolicy mp, boolean v) {
-    if (mp instanceof TieredMergePolicy) {
-      ((TieredMergePolicy) mp).setUseCompoundFile(v);
-    } else if (mp instanceof LogMergePolicy) {
-      ((LogMergePolicy) mp).setUseCompoundFile(v);
-    } else {
-      throw new IllegalArgumentException("cannot set compound file for MergePolicy " + mp);
-    }
-  }
-
   /** start and end are BOTH inclusive */
   public static int nextInt(Random r, int start, int end) {
     return RandomInts.randomIntBetween(r, start, end);
@@ -293,7 +288,11 @@
   }
 
   public static String randomSimpleString(Random r, int maxLength) {
-    final int end = nextInt(r, 0, maxLength);
+    return randomSimpleString(r, 0, maxLength);
+  }
+  
+  public static String randomSimpleString(Random r, int minLength, int maxLength) {
+    final int end = nextInt(r, minLength, maxLength);
     if (end == 0) {
       // allow 0 length
       return "";
@@ -319,7 +318,7 @@
   }
 
   public static String randomSimpleString(Random r) {
-    return randomSimpleString(r, 10);
+    return randomSimpleString(r, 0, 10);
   }
 
   /** Returns random string, including full unicode range. */
@@ -762,17 +761,17 @@
     if (mp instanceof LogMergePolicy) {
       LogMergePolicy lmp = (LogMergePolicy) mp;
       lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
-      lmp.setUseCompoundFile(true);
+      lmp.setNoCFSRatio(1.0);
     } else if (mp instanceof TieredMergePolicy) {
       TieredMergePolicy tmp = (TieredMergePolicy) mp;
       tmp.setMaxMergeAtOnce(Math.min(5, tmp.getMaxMergeAtOnce()));
       tmp.setSegmentsPerTier(Math.min(5, tmp.getSegmentsPerTier()));
-      tmp.setUseCompoundFile(true);
+      tmp.setNoCFSRatio(1.0);
     }
     MergeScheduler ms = w.getConfig().getMergeScheduler();
     if (ms instanceof ConcurrentMergeScheduler) {
-      ((ConcurrentMergeScheduler) ms).setMaxThreadCount(2);
-      ((ConcurrentMergeScheduler) ms).setMaxMergeCount(3);
+      // wtf... shouldnt it be even lower since its 1 by default?!?!
+      ((ConcurrentMergeScheduler) ms).setMaxMergesAndThreads(3, 2);
     }
   }
 
@@ -879,6 +878,7 @@
       final Field field1 = (Field) f;
       final Field field2;
       final DocValuesType dvType = field1.fieldType().docValueType();
+      final NumericType numType = field1.fieldType().numericType();
       if (dvType != null) {
         switch(dvType) {
           case NUMERIC:
@@ -893,6 +893,23 @@
           default:
             throw new IllegalStateException("unknown Type: " + dvType);
         }
+      } else if (numType != null) {
+        switch (numType) {
+          case INT:
+            field2 = new IntField(field1.name(), field1.numericValue().intValue(), field1.fieldType());
+            break;
+          case FLOAT:
+            field2 = new FloatField(field1.name(), field1.numericValue().intValue(), field1.fieldType());
+            break;
+          case LONG:
+            field2 = new LongField(field1.name(), field1.numericValue().intValue(), field1.fieldType());
+            break;
+          case DOUBLE:
+            field2 = new DoubleField(field1.name(), field1.numericValue().intValue(), field1.fieldType());
+            break;
+          default:
+            throw new IllegalStateException("unknown Type: " + numType);
+        }
       } else {
         field2 = new Field(field1.name(), field1.stringValue(), field1.fieldType());
       }
diff --git a/lucene/tools/build.xml b/lucene/tools/build.xml
index 35310bc..d9cbdc7 100644
--- a/lucene/tools/build.xml
+++ b/lucene/tools/build.xml
@@ -21,6 +21,8 @@
   <description>Lucene Tools</description>
 
   <property name="build.dir" location="../build/tools"/>
+  
+  <property name="rat.additional-includes" value="forbiddenApis/**,prettify/**"/>
 
   <import file="../common-build.xml"/>
 
diff --git a/lucene/tools/custom-tasks.xml b/lucene/tools/custom-tasks.xml
index 074059b..10e4b56 100644
--- a/lucene/tools/custom-tasks.xml
+++ b/lucene/tools/custom-tasks.xml
@@ -1,4 +1,21 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one
+   or more contributor license agreements.  See the NOTICE file
+   distributed with this work for additional information
+   regarding copyright ownership.  The ASF licenses this file
+   to you under the Apache License, Version 2.0 (the
+   "License"); you may not use this file except in compliance
+   with the License.  You may obtain a copy of the License at
 
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing,
+   software distributed under the License is distributed on an
+   "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+   KIND, either express or implied.  See the License for the
+   specific language governing permissions and limitations
+   under the License.
+-->
 <project name="custom-tasks">
   <description>
     This file is designed for importing into a main build file, and not intended
@@ -26,7 +43,7 @@
 
     <!-- Typical version patterns. -->
     <replaceregex pattern="\.rc[0-9]+" replace="" flags="gi" />
-    <replaceregex pattern="\-(r)?([0-9\-\_\.])+(b(eta)?([0-9\-\.])*)?$" replace="" flags="gi" />
+    <replaceregex pattern="\-(r)?([0-9\-\_\.])+((b(eta)?)|((a(lpha)?))([0-9\-\.])*)?(\-tests)?$" replace="" flags="gi" />
 
     <!-- git hashcode pattern: its always 40 chars right? -->
     <replaceregex pattern="\-[a-z0-9]{40,40}$" replace="" flags="gi" />
diff --git a/lucene/tools/forbiddenApis/chars.txt b/lucene/tools/forbiddenApis/chars.txt
new file mode 100644
index 0000000..a5679cb
--- /dev/null
+++ b/lucene/tools/forbiddenApis/chars.txt
@@ -0,0 +1,17 @@
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#  contributor license agreements.  See the NOTICE file distributed with
+#  this work for additional information regarding copyright ownership.
+#  The ASF licenses this file to You under the Apache License, Version 2.0
+#  (the "License"); you may not use this file except in compliance with
+#  the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+java.lang.Character#codePointBefore(char[],int) @ Implicit start offset is error-prone when the char[] is a buffer and the first chars are random chars
+java.lang.Character#codePointAt(char[],int) @ Implicit end offset is error-prone when the char[] is a buffer and the last chars are random chars
diff --git a/lucene/tools/junit4/tests.policy b/lucene/tools/junit4/tests.policy
index f8c4002..0933cab 100644
--- a/lucene/tools/junit4/tests.policy
+++ b/lucene/tools/junit4/tests.policy
@@ -59,6 +59,10 @@
   permission javax.management.MBeanPermission "*", "*";
   permission javax.management.MBeanServerPermission "*";
   permission javax.management.MBeanTrustPermission "*";
+  permission javax.security.auth.AuthPermission "*";
+  permission javax.security.auth.PrivateCredentialPermission "org.apache.hadoop.security.Credentials * \"*\"", "read";
+  permission java.security.SecurityPermission "putProviderProperty.SaslPlainServer";
+  permission java.security.SecurityPermission "insertProvider.SaslPlainServer";
   
   // TIKA uses BouncyCastle and that registers new provider for PDF parsing + MSOffice parsing. Maybe report as bug!
   permission java.security.SecurityPermission "putProviderProperty.BC";
diff --git a/lucene/tools/prettify/lang-apollo.js b/lucene/tools/prettify/lang-apollo.js
index 7098baf..a9e4597 100644
--- a/lucene/tools/prettify/lang-apollo.js
+++ b/lucene/tools/prettify/lang-apollo.js
@@ -1,2 +1,18 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 PR.registerLangHandler(PR.createSimpleLexer([["com",/^#[^\n\r]*/,null,"#"],["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r Â\xa0"],["str",/^"(?:[^"\\]|\\[\S\s])*(?:"|$)/,null,'"']],[["kwd",/^(?:ADS|AD|AUG|BZF|BZMF|CAE|CAF|CA|CCS|COM|CS|DAS|DCA|DCOM|DCS|DDOUBL|DIM|DOUBLE|DTCB|DTCF|DV|DXCH|EDRUPT|EXTEND|INCR|INDEX|NDX|INHINT|LXCH|MASK|MSK|MP|MSU|NOOP|OVSK|QXCH|RAND|READ|RELINT|RESUME|RETURN|ROR|RXOR|SQUARE|SU|TCR|TCAA|OVSK|TCF|TC|TS|WAND|WOR|WRITE|XCH|XLQ|XXALQ|ZL|ZQ|ADD|ADZ|SUB|SUZ|MPY|MPR|MPZ|DVP|COM|ABS|CLA|CLZ|LDQ|STO|STQ|ALS|LLS|LRS|TRA|TSQ|TMI|TOV|AXT|TIX|DLY|INP|OUT)\s/,
 null],["typ",/^(?:-?GENADR|=MINUS|2BCADR|VN|BOF|MM|-?2CADR|-?[1-6]DNADR|ADRES|BBCON|[ES]?BANK=?|BLOCK|BNKSUM|E?CADR|COUNT\*?|2?DEC\*?|-?DNCHAN|-?DNPTR|EQUALS|ERASE|MEMORY|2?OCT|REMADR|SETLOC|SUBRO|ORG|BSS|BES|SYN|EQU|DEFINE|END)\s/,null],["lit",/^'(?:-*(?:\w|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?)?/],["pln",/^-*(?:[!-z]|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?/],["pun",/^[^\w\t\n\r "'-);\\\xa0]+/]]),["apollo","agc","aea"]);
diff --git a/lucene/tools/prettify/lang-css.js b/lucene/tools/prettify/lang-css.js
index 041e1f5..e937457 100644
--- a/lucene/tools/prettify/lang-css.js
+++ b/lucene/tools/prettify/lang-css.js
@@ -1,2 +1,18 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\f\r ]+/,null," \t\r\n"]],[["str",/^"(?:[^\n\f\r"\\]|\\(?:\r\n?|\n|\f)|\\[\S\s])*"/,null],["str",/^'(?:[^\n\f\r'\\]|\\(?:\r\n?|\n|\f)|\\[\S\s])*'/,null],["lang-css-str",/^url\(([^"')]*)\)/i],["kwd",/^(?:url|rgb|!important|@import|@page|@media|@charset|inherit)(?=[^\w-]|$)/i,null],["lang-css-kw",/^(-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*)\s*:/i],["com",/^\/\*[^*]*\*+(?:[^*/][^*]*\*+)*\//],["com",
 /^(?:<\!--|--\>)/],["lit",/^(?:\d+|\d*\.\d+)(?:%|[a-z]+)?/i],["lit",/^#[\da-f]{3,6}/i],["pln",/^-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*/i],["pun",/^[^\s\w"']+/]]),["css"]);PR.registerLangHandler(PR.createSimpleLexer([],[["kwd",/^-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*/i]]),["css-kw"]);PR.registerLangHandler(PR.createSimpleLexer([],[["str",/^[^"')]+/]]),["css-str"]);
diff --git a/lucene/tools/prettify/lang-hs.js b/lucene/tools/prettify/lang-hs.js
index 9d77b08..0858e5c 100644
--- a/lucene/tools/prettify/lang-hs.js
+++ b/lucene/tools/prettify/lang-hs.js
@@ -1,2 +1,18 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t-\r ]+/,null,"\t\n\r "],["str",/^"(?:[^\n\f\r"\\]|\\[\S\s])*(?:"|$)/,null,'"'],["str",/^'(?:[^\n\f\r'\\]|\\[^&])'?/,null,"'"],["lit",/^(?:0o[0-7]+|0x[\da-f]+|\d+(?:\.\d+)?(?:e[+-]?\d+)?)/i,null,"0123456789"]],[["com",/^(?:--+[^\n\f\r]*|{-(?:[^-]|-+[^}-])*-})/],["kwd",/^(?:case|class|data|default|deriving|do|else|if|import|in|infix|infixl|infixr|instance|let|module|newtype|of|then|type|where|_)(?=[^\d'A-Za-z]|$)/,
 null],["pln",/^(?:[A-Z][\w']*\.)*[A-Za-z][\w']*/],["pun",/^[^\d\t-\r "'A-Za-z]+/]]),["hs"]);
diff --git a/lucene/tools/prettify/lang-lisp.js b/lucene/tools/prettify/lang-lisp.js
index 02a30e8..dc7fa01 100644
--- a/lucene/tools/prettify/lang-lisp.js
+++ b/lucene/tools/prettify/lang-lisp.js
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 var a=null;
 PR.registerLangHandler(PR.createSimpleLexer([["opn",/^\(+/,a,"("],["clo",/^\)+/,a,")"],["com",/^;[^\n\r]*/,a,";"],["pln",/^[\t\n\r \xa0]+/,a,"\t\n\r \xa0"],["str",/^"(?:[^"\\]|\\[\S\s])*(?:"|$)/,a,'"']],[["kwd",/^(?:block|c[ad]+r|catch|con[ds]|def(?:ine|un)|do|eq|eql|equal|equalp|eval-when|flet|format|go|if|labels|lambda|let|load-time-value|locally|macrolet|multiple-value-call|nil|progn|progv|quote|require|return-from|setq|symbol-macrolet|t|tagbody|the|throw|unwind)\b/,a],
 ["lit",/^[+-]?(?:[#0]x[\da-f]+|\d+\/\d+|(?:\.\d+|\d+(?:\.\d*)?)(?:[de][+-]?\d+)?)/i],["lit",/^'(?:-*(?:\w|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?)?/],["pln",/^-*(?:[_a-z]|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?/i],["pun",/^[^\w\t\n\r "'-);\\\xa0]+/]]),["cl","el","lisp","scm"]);
diff --git a/lucene/tools/prettify/lang-lua.js b/lucene/tools/prettify/lang-lua.js
index e83a3c4..f02011e 100644
--- a/lucene/tools/prettify/lang-lua.js
+++ b/lucene/tools/prettify/lang-lua.js
@@ -1,2 +1,18 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r Â\xa0"],["str",/^(?:"(?:[^"\\]|\\[\S\s])*(?:"|$)|'(?:[^'\\]|\\[\S\s])*(?:'|$))/,null,"\"'"]],[["com",/^--(?:\[(=*)\[[\S\s]*?(?:]\1]|$)|[^\n\r]*)/],["str",/^\[(=*)\[[\S\s]*?(?:]\1]|$)/],["kwd",/^(?:and|break|do|else|elseif|end|false|for|function|if|in|local|nil|not|or|repeat|return|then|true|until|while)\b/,null],["lit",/^[+-]?(?:0x[\da-f]+|(?:\.\d+|\d+(?:\.\d*)?)(?:e[+-]?\d+)?)/i],
 ["pln",/^[_a-z]\w*/i],["pun",/^[^\w\t\n\r \xa0][^\w\t\n\r "'+=\xa0-]*/]]),["lua"]);
diff --git a/lucene/tools/prettify/lang-ml.js b/lucene/tools/prettify/lang-ml.js
index 6df02d7..6d17e8b 100644
--- a/lucene/tools/prettify/lang-ml.js
+++ b/lucene/tools/prettify/lang-ml.js
@@ -1,2 +1,18 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r Â\xa0"],["com",/^#(?:if[\t\n\r \xa0]+(?:[$_a-z][\w']*|``[^\t\n\r`]*(?:``|$))|else|endif|light)/i,null,"#"],["str",/^(?:"(?:[^"\\]|\\[\S\s])*(?:"|$)|'(?:[^'\\]|\\[\S\s])(?:'|$))/,null,"\"'"]],[["com",/^(?:\/\/[^\n\r]*|\(\*[\S\s]*?\*\))/],["kwd",/^(?:abstract|and|as|assert|begin|class|default|delegate|do|done|downcast|downto|elif|else|end|exception|extern|false|finally|for|fun|function|if|in|inherit|inline|interface|internal|lazy|let|match|member|module|mutable|namespace|new|null|of|open|or|override|private|public|rec|return|static|struct|then|to|true|try|type|upcast|use|val|void|when|while|with|yield|asr|land|lor|lsl|lsr|lxor|mod|sig|atomic|break|checked|component|const|constraint|constructor|continue|eager|event|external|fixed|functor|global|include|method|mixin|object|parallel|process|protected|pure|sealed|trait|virtual|volatile)\b/],
 ["lit",/^[+-]?(?:0x[\da-f]+|(?:\.\d+|\d+(?:\.\d*)?)(?:e[+-]?\d+)?)/i],["pln",/^(?:[_a-z][\w']*[!#?]?|``[^\t\n\r`]*(?:``|$))/i],["pun",/^[^\w\t\n\r "'\xa0]+/]]),["fs","ml"]);
diff --git a/lucene/tools/prettify/lang-proto.js b/lucene/tools/prettify/lang-proto.js
index f006ad8..741a438 100644
--- a/lucene/tools/prettify/lang-proto.js
+++ b/lucene/tools/prettify/lang-proto.js
@@ -1 +1,17 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 PR.registerLangHandler(PR.sourceDecorator({keywords:"bytes,default,double,enum,extend,extensions,false,group,import,max,message,option,optional,package,repeated,required,returns,rpc,service,syntax,to,true",types:/^(bool|(double|s?fixed|[su]?int)(32|64)|float|string)\b/,cStyleComments:!0}),["proto"]);
diff --git a/lucene/tools/prettify/lang-sql.js b/lucene/tools/prettify/lang-sql.js
index da705b0..09d6558 100644
--- a/lucene/tools/prettify/lang-sql.js
+++ b/lucene/tools/prettify/lang-sql.js
@@ -1,2 +1,18 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r Â\xa0"],["str",/^(?:"(?:[^"\\]|\\.)*"|'(?:[^'\\]|\\.)*')/,null,"\"'"]],[["com",/^(?:--[^\n\r]*|\/\*[\S\s]*?(?:\*\/|$))/],["kwd",/^(?:add|all|alter|and|any|as|asc|authorization|backup|begin|between|break|browse|bulk|by|cascade|case|check|checkpoint|close|clustered|coalesce|collate|column|commit|compute|constraint|contains|containstable|continue|convert|create|cross|current|current_date|current_time|current_timestamp|current_user|cursor|database|dbcc|deallocate|declare|default|delete|deny|desc|disk|distinct|distributed|double|drop|dummy|dump|else|end|errlvl|escape|except|exec|execute|exists|exit|fetch|file|fillfactor|for|foreign|freetext|freetexttable|from|full|function|goto|grant|group|having|holdlock|identity|identitycol|identity_insert|if|in|index|inner|insert|intersect|into|is|join|key|kill|left|like|lineno|load|match|merge|national|nocheck|nonclustered|not|null|nullif|of|off|offsets|on|open|opendatasource|openquery|openrowset|openxml|option|or|order|outer|over|percent|plan|precision|primary|print|proc|procedure|public|raiserror|read|readtext|reconfigure|references|replication|restore|restrict|return|revoke|right|rollback|rowcount|rowguidcol|rule|save|schema|select|session_user|set|setuser|shutdown|some|statistics|system_user|table|textsize|then|to|top|tran|transaction|trigger|truncate|tsequal|union|unique|update|updatetext|use|user|using|values|varying|view|waitfor|when|where|while|with|writetext)(?=[^\w-]|$)/i,
 null],["lit",/^[+-]?(?:0x[\da-f]+|(?:\.\d+|\d+(?:\.\d*)?)(?:e[+-]?\d+)?)/i],["pln",/^[_a-z][\w-]*/i],["pun",/^[^\w\t\n\r "'\xa0][^\w\t\n\r "'+\xa0-]*/]]),["sql"]);
diff --git a/lucene/tools/prettify/lang-vb.js b/lucene/tools/prettify/lang-vb.js
index 07506b0..dad809e 100644
--- a/lucene/tools/prettify/lang-vb.js
+++ b/lucene/tools/prettify/lang-vb.js
@@ -1,2 +1,18 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xa0\u2028\u2029]+/,null,"\t\n\r Â\xa0

"],["str",/^(?:["\u201c\u201d](?:[^"\u201c\u201d]|["\u201c\u201d]{2})(?:["\u201c\u201d]c|$)|["\u201c\u201d](?:[^"\u201c\u201d]|["\u201c\u201d]{2})*(?:["\u201c\u201d]|$))/i,null,'"“”'],["com",/^['\u2018\u2019].*/,null,"'‘’"]],[["kwd",/^(?:addhandler|addressof|alias|and|andalso|ansi|as|assembly|auto|boolean|byref|byte|byval|call|case|catch|cbool|cbyte|cchar|cdate|cdbl|cdec|char|cint|class|clng|cobj|const|cshort|csng|cstr|ctype|date|decimal|declare|default|delegate|dim|directcast|do|double|each|else|elseif|end|endif|enum|erase|error|event|exit|finally|for|friend|function|get|gettype|gosub|goto|handles|if|implements|imports|in|inherits|integer|interface|is|let|lib|like|long|loop|me|mod|module|mustinherit|mustoverride|mybase|myclass|namespace|new|next|not|notinheritable|notoverridable|object|on|option|optional|or|orelse|overloads|overridable|overrides|paramarray|preserve|private|property|protected|public|raiseevent|readonly|redim|removehandler|resume|return|select|set|shadows|shared|short|single|static|step|stop|string|structure|sub|synclock|then|throw|to|try|typeof|unicode|until|variant|wend|when|while|with|withevents|writeonly|xor|endif|gosub|let|variant|wend)\b/i,
 null],["com",/^rem.*/i],["lit",/^(?:true\b|false\b|nothing\b|\d+(?:e[+-]?\d+[dfr]?|[dfilrs])?|(?:&h[\da-f]+|&o[0-7]+)[ils]?|\d*\.\d+(?:e[+-]?\d+)?[dfr]?|#\s+(?:\d+[/-]\d+[/-]\d+(?:\s+\d+:\d+(?::\d+)?(\s*(?:am|pm))?)?|\d+:\d+(?::\d+)?(\s*(?:am|pm))?)\s+#)/i],["pln",/^(?:(?:[a-z]|_\w)\w*|\[(?:[a-z]|_\w)\w*])/i],["pun",/^[^\w\t\n\r "'[\]\xa0\u2018\u2019\u201c\u201d\u2028\u2029]+/],["pun",/^(?:\[|])/]]),["vb","vbs"]);
diff --git a/lucene/tools/prettify/lang-wiki.js b/lucene/tools/prettify/lang-wiki.js
index 9b0b448..d43b74f 100644
--- a/lucene/tools/prettify/lang-wiki.js
+++ b/lucene/tools/prettify/lang-wiki.js
@@ -1,2 +1,18 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\d\t a-gi-z\xa0]+/,null,"\t Â\xa0abcdefgijklmnopqrstuvwxyz0123456789"],["pun",/^[*=[\]^~]+/,null,"=*~^[]"]],[["lang-wiki.meta",/(?:^^|\r\n?|\n)(#[a-z]+)\b/],["lit",/^[A-Z][a-z][\da-z]+[A-Z][a-z][^\W_]+\b/],["lang-",/^{{{([\S\s]+?)}}}/],["lang-",/^`([^\n\r`]+)`/],["str",/^https?:\/\/[^\s#/?]*(?:\/[^\s#?]*)?(?:\?[^\s#]*)?(?:#\S*)?/i],["pln",/^(?:\r\n|[\S\s])[^\n\r#*=A-[^`h{~]*/]]),["wiki"]);
 PR.registerLangHandler(PR.createSimpleLexer([["kwd",/^#[a-z]+/i,null,"#"]],[]),["wiki.meta"]);
diff --git a/lucene/tools/prettify/prettify.css b/lucene/tools/prettify/prettify.css
index d44b3a2..98f5851 100644
--- a/lucene/tools/prettify/prettify.css
+++ b/lucene/tools/prettify/prettify.css
@@ -1 +1,17 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 .pln{color:#000}@media screen{.str{color:#080}.kwd{color:#008}.com{color:#800}.typ{color:#606}.lit{color:#066}.pun,.opn,.clo{color:#660}.tag{color:#008}.atn{color:#606}.atv{color:#080}.dec,.var{color:#606}.fun{color:red}}@media print,projection{.str{color:#060}.kwd{color:#006;font-weight:bold}.com{color:#600;font-style:italic}.typ{color:#404;font-weight:bold}.lit{color:#044}.pun,.opn,.clo{color:#440}.tag{color:#006;font-weight:bold}.atn{color:#404}.atv{color:#060}}pre.prettyprint{padding:2px;border:1px solid #888}ol.linenums{margin-top:0;margin-bottom:0}li.L0,li.L1,li.L2,li.L3,li.L5,li.L6,li.L7,li.L8{list-style-type:none}li.L1,li.L3,li.L5,li.L7,li.L9{background:#eee}
\ No newline at end of file
diff --git a/lucene/tools/prettify/prettify.js b/lucene/tools/prettify/prettify.js
index eef5ad7..dc35c65 100644
--- a/lucene/tools/prettify/prettify.js
+++ b/lucene/tools/prettify/prettify.js
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 var q=null;window.PR_SHOULD_USE_CONTINUATION=!0;
 (function(){function L(a){function m(a){var f=a.charCodeAt(0);if(f!==92)return f;var b=a.charAt(1);return(f=r[b])?f:"0"<=b&&b<="7"?parseInt(a.substring(1),8):b==="u"||b==="x"?parseInt(a.substring(2),16):a.charCodeAt(1)}function e(a){if(a<32)return(a<16?"\\x0":"\\x")+a.toString(16);a=String.fromCharCode(a);if(a==="\\"||a==="-"||a==="["||a==="]")a="\\"+a;return a}function h(a){for(var f=a.substring(1,a.length-1).match(/\\u[\dA-Fa-f]{4}|\\x[\dA-Fa-f]{2}|\\[0-3][0-7]{0,2}|\\[0-7]{1,2}|\\[\S\s]|[^\\]/g),a=
 [],b=[],o=f[0]==="^",c=o?1:0,i=f.length;c<i;++c){var j=f[c];if(/\\[bdsw]/i.test(j))a.push(j);else{var j=m(j),d;c+2<i&&"-"===f[c+1]?(d=m(f[c+2]),c+=2):d=j;b.push([j,d]);d<65||j>122||(d<65||j>90||b.push([Math.max(65,j)|32,Math.min(d,90)|32]),d<97||j>122||b.push([Math.max(97,j)&-33,Math.min(d,122)&-33]))}}b.sort(function(a,f){return a[0]-f[0]||f[1]-a[1]});f=[];j=[NaN,NaN];for(c=0;c<b.length;++c)i=b[c],i[0]<=j[1]+1?j[1]=Math.max(j[1],i[1]):f.push(j=i);b=["["];o&&b.push("^");b.push.apply(b,a);for(c=0;c<
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 51eff5b..1980ed8 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -24,7 +24,7 @@
 
 Versions of Major Components
 ---------------------
-Apache Tika 1.3
+Apache Tika 1.4
 Carrot2 3.6.2
 Velocity 1.7 and Velocity Tools 2.0
 Apache UIMA 2.3.1
@@ -33,7 +33,8 @@
 Upgrading from Solr 4.x
 ----------------------
 
-TBD...
+The "file" attribute of infoStream in solrconfig.xml is removed. Control this 
+via your logging configuration (org.apache.solr.update.LoggingInfoStream) instead.
 
 Detailed Change List
 ----------------------
@@ -44,11 +45,15 @@
 * SOLR-4622: Hardcoded SolrCloud defaults for hostContext and hostPort that
   were deprecated in 4.3 have been removed completely. (hossman)
 
+* SOLR-4792: Stop shipping a .war. (Robert Muir)
+
+* SOLR-4948: Tidied up CoreContainer construction logic.  (Alan Woodward)
+
 ==================  4.4.0 ==================
 
 Versions of Major Components
 ---------------------
-Apache Tika 1.3
+Apache Tika 1.4
 Carrot2 3.6.2
 Velocity 1.7 and Velocity Tools 2.0
 Apache UIMA 2.3.1
@@ -57,9 +62,30 @@
 Upgrading from Solr 4.3.0
 ----------------------
 
+* TieredMergePolicy and the various subtypes of LogMergePolicy no longer have 
+  an explicit "setUseCompoundFile" method.  Instead the behavior of new 
+  segments is determined by the IndexWriter configuration, and the MergePolicy 
+  is only consulted to determine if merge segements should use the compound 
+  file format (based on the value of "setNoCFSRatio").  If you have explicitly 
+  configured one of these classes using <mergePolicy> and include an init arg 
+  like this...
+     <bool name="useCompoundFile">true</bool>
+  ...this will now be treated as if you specified...
+     <useCompoundFile>true</useCompoundFile> 
+  ...directly on the <indexConfig> (overriding any value already set using that
+  syntax) and a warning will be logged to updated your configuration.  Users 
+  with an explicitly declared <mergePolicy> are encouraged to review the 
+  current javadocs for their MergePolicy subclass and review their configured 
+  options carefully.  See SOLR-4941, SOLR-4934 and LUCENE-5038 for more 
+  information.
+
 * SOLR-4778: The signature of LogWatcher.registerListener has changed, from
   (ListenerConfig, CoreContainer) to (ListenerConfig).  Users implementing their
   own LogWatcher classes will need to change their code accordingly.
+
+* LUCENE-5063: ByteField and ShortField have been deprecated and will be removed
+  in 5.0. If you are still using these field types, you should migrate your
+  fields to TrieIntField.
   
 Detailed Change List
 ----------------------
@@ -69,8 +95,9 @@
 
 * SOLR-3251: Dynamically add fields to schema. (Steve Rowe, Robert Muir, yonik)   
 
-* SOLR-4761: Add option to plugin a merged segment warmer into solrconfig.xml
-  (Mark Miller, Mike McCandless, Robert Muir)
+* SOLR-4761, SOLR-4976: Add option to plugin a merged segment warmer into solrconfig.xml. 
+  Info about segments warmed in the background is available via infostream.
+  (Mark Miller, Ryan Ernst, Mike McCandless, Robert Muir)
 
 * SOLR-3240: Add "spellcheck.collateMaxCollectDocs" option so that when testing
   potential Collations against the index, SpellCheckComponent will only collect
@@ -86,6 +113,43 @@
 
 * SOLR-4228: SolrJ's SolrPing object has new methods for ping, enable, and
   disable. (Shawn Heisey, hossman, Steve Rowe)
+  
+* SOLR-4893: Extend FieldMutatingUpdateProcessor.ConfigurableFieldNameSelector
+  to enable checking whether a field matches any schema field.  To select field
+  names that don't match any fields or dynamic fields in the schema, add
+  <bool name="fieldNameMatchesSchemaField">false</bool> to an update
+  processor's configuration in solrconfig.xml.  (Steve Rowe, hossman)
+
+* SOLR-4921: Admin UI now supports adding documents to Solr (gsingers, steffkes)
+
+* SOLR-4916: Add support to write and read Solr index files and transaction log
+  files to and from HDFS. (phunt, Mark Miller, Greg Chanan)
+  
+* SOLR-4892: Add FieldMutatingUpdateProcessorFactory subclasses 
+  Parse{Date,Integer,Long,Float,Double,Boolean}UpdateProcessorFactory. These
+  factories have a default selector that matches all fields that either don’t
+  match any schema field, or are in the schema with the corresponding
+  typeClass. If they see a value that is not a CharSequence, or can't parse
+  the value, they leave it as is. For multi-valued fields, these processors
+  will not convert any values unless all are first successfully parsed, or 
+  already are instances of the target class. Ordering the processors, e.g.
+  [Boolean, Long, Double, Date] will allow e.g. values ["2", "5", "8.6"] to
+  be left alone by the Boolean and Long processors, but then converted by the
+  Double processor.  (Steve Rowe, hossman)
+
+* SOLR-4972: Add PUT command to ZkCli tool. (Roman Shaposhnik via Mark Miller)
+
+* SOLR-4973: Adding getter method for defaultCollection on CloudSolrServer.
+  (Furkan KAMACI via Mark Miller)
+  
+* SOLR-4897: Add solr/example/example-schemaless/, an example config set
+  for schemaless mode. (Steve Rowe)
+
+* SOLR-4655: Add option to have Overseer assign generic node names so that
+  new addresses can host shards without naming confusion. (Mark Miller, Anshum Gupta)
+
+* SOLR-4977: Add option to send IndexWriter's infostream to the logging system.
+  (Ryan Ernst via Robert Muir)
 
 Bug Fixes
 ----------------------
@@ -114,6 +178,93 @@
 * SOLR-4863: Removed non-existent attribute sourceId from dynamic JMX stats
   to fix AttributeNotFoundException (suganuma, hossman via shalin)
 
+* SOLR-4891: JsonLoader should preserve field value types from the JSON content stream.
+  (Steve Rowe)
+ 
+* SOLR-4805: SolreCore#reload should not call preRegister and publish a DOWN state to
+  ZooKeeper. (Mark Miller, Jared Rodriguez)
+
+* SOLR-4899: When reconnecting after ZooKeeper expiration, we need to be willing to wait 
+  forever, not just for 30 seconds. (Mark Miller)
+
+* SOLR-4920: JdbcDataSource incorrectly suppresses exceptions when retrieving a connection from
+  a JNDI context and falls back to trying to use DriverManager to obtain a connection. Additionally,
+  if a SQLException is thrown while initializing a connection, such as in setAutoCommit(), the
+  connection will not be closed. (Chris Eldredge via shalin)
+
+* SOLR-4915: The root cause should be returned to the user when a SolrCore create call fails.
+  (Mark Miller)
+
+* SOLR-4925 : Collection create throws NPE when 'numShards' param is missing (Noble Paul)
+
+* SOLR-4910: persisting solr.xml is broken. More stringent testing of persistence fixed
+  up a number of issues and several bugs with persistence. Among them are
+  > don't persisting implicit properties
+  > should persist zkHost in the <solr> tag (user's list)
+  > reloading a core that has transient="true" returned an error. reload should load
+    a transient core if it's not yet loaded.
+  > No longer persisting loadOnStartup or transient core properties if they were not
+    specified in the original solr.xml
+  > Testing flushed out the fact that you couldn't swap a core marked transient=true
+    loadOnStartup=false because it hadn't been loaded yet.
+  > SOLR-4862, CREATE fails to persist schema, config, and dataDir
+  > SOLR-4363, not persisting coreLoadThreads in <solr> tag
+  > SOLR-3900, logWatcher properties not persisted
+  > SOLR-4850, cores defined as loadOnStartup=true, transient=false can't be searched
+  (Erick Erickson)
+
+* SOLR-4923: Commits to non leaders as part of a request that also contain updates
+  can execute out of order. (hossman, Ricardo Merizalde, Mark Miller)
+
+* SOLR-4932: persisting solr.xml saves some parameters it shouldn't when they weren't
+  defined in the original. Benign since the default values are saved, but still incorrect.
+  (Erick Erickson, thanks Shawn Heisey for helping test!)
+
+* SOLR-4934, SOLR-4941: Fix handling of <mergePolicy> init arg 
+  "useCompoundFile" needed after changes in LUCENE-5038 (hossman)
+
+* SOLR-4456: Admin UI: Displays dashboard even if Solr is down (steffkes)
+
+* SOLR-4949: UI Analysis page dropping characters from input box (steffkes)
+
+* SOLR-4960: Fix race conditions in shutdown of CoreContainer
+  and getCore that could cause a request to attempt to use a core that
+  has shut down. (yonik)
+
+* SOLR-4926: Fixed rare replication bug that normally only manifested when 
+  using compound file format. (yonik, Mark Miller)
+  
+* SOLR-4974: Outgrowth of SOLR-4960 that includes transient cores and pending cores
+  (Erick Erickson)
+
+* SOLR-3369: shards.tolerant=true is broken for group queries
+  (Russell Black, Martijn van Groningen, Jabouille jean Charles, Ryan McKinley via shalin)
+
+* SOLR-4452: Hunspell stemmer should not merge duplicate dictionary entries (janhoy)
+
+* SOLR-5000: ManagedIndexSchema doesn't persist uniqueKey tag after calling addFields
+  method. (Jun Ohtani, Steve Rowe)
+
+Optimizations
+----------------------
+
+* SOLR-4923: Commit to all nodes in a collection in parallel rather than locally and
+  then to all other nodes. (hossman, Ricardo Merizalde, Mark Miller)
+
+* SOLR-3838: Admin UI - Multiple filter queries are not supported in Query UI (steffkes)
+
+* SOLR-4719 : Admin UI - Default to wt=json on Query-Screen (steffkes)
+
+* SOLR-4611: Admin UI - Analysis-Urls with empty parameters create empty result table
+  (steffkes)
+
+* SOLR-4955: Admin UI - Show address bar on top for Schema + Config (steffkes)
+
+* SOLR-4412: New parameter langid.lcmap to map detected language code to be placed 
+  in "language" field (janhoy)
+
+* SOLR-4815: Admin-UI - DIH: Let "commit" be checked by default (steffkes)
+
 Other Changes
 ----------------------
 
@@ -142,6 +293,19 @@
 
 * SOLR-4448: Allow the solr internal load balancer to be more easily pluggable.
   (Philip Hoy via Robert Muir)
+  
+* SOLR-4224: Refactor JavaBinCodec input stream definition to enhance reuse.
+  (phunt via Mark Miller)
+
+* SOLR-4931: SolrDeletionPolicy onInit and onCommit methods changed to override
+  exact signatures (with generics) from IndexDeletionPolicy (shalin)
+
+* SOLR-4942: test improvements to randomize use of compound files (hosman)
+
+* SOLR-4966: CSS, JS and other files in webapp without license (uschindler,
+  steffkes)
+
+* SOLR-4986: Upgrade to Tika 1.4 (Markus Jelsma via janhoy)
 
 ==================  4.3.1 ==================
 
@@ -209,6 +373,26 @@
 
 * SOLR-4867: Admin UI - setting loglevel on root throws RangeError (steffkes)
 
+* SOLR-4870: RecentUpdates.update() does not increment numUpdates loop counter
+  (Alexey Kudinov via shalin)
+
+* SOLR-4877, LUCENE-5023: Removed SolrIndexSearcher#getDocSetNC()'s special
+  case for handling TermQuery to prevent NullPointerException if reader does
+  not have fields.  (Bao Yang Yang, Uwe Schindler)
+
+* SOLR-4881: Fix DocumentAnalysisRequestHandler to correctly use
+  EmptyEntityResolver to prevent loading of external entities like
+  UpdateRequestHandler does.  (Hossman, Uwe Schindler)
+
+* SOLR-4858: SolrCore reloading was broken when the UpdateLog
+  was enabled.  (Hossman, Anshum Gupta, Alexey Serba, Mark Miller, yonik)
+
+* SOLR-4853: Fixed SolrJettyTestBase so it may be reused by end users
+  (hossman)
+
+* SOLR-4744: Update failure on sub shard is not propagated to clients by parent
+  shard (Anshum Gupta, yonik, shalin)
+
 Other Changes
 ----------------------
 
diff --git a/solr/NOTICE.txt b/solr/NOTICE.txt
index 7078611..5252faa 100644
--- a/solr/NOTICE.txt
+++ b/solr/NOTICE.txt
@@ -11,6 +11,8 @@
   - Apache Lucene Java
   - Apache Commons
   - Apache Geronimo (stax API)
+  - Apache Blur
+  - Apache Hadoop
 
 This product includes the JQuery JavaScript library created by John Resig.
 Copyright (c) 2010 John Resig, http://jquery.com/
@@ -21,6 +23,42 @@
 This product includes the highlight.js Javascript library created by Ivan Sagalaev
 Copyright (c) 2006, Ivan Sagalaev, https://github.com/isagalaev/highlight.js
 
+This product includes the ZeroClipboard.js Javascript library created by Jon Rohan, James M. Greene
+Copyright (c) 2012 Jon Rohan, James M. Greene, https://github.com/zeroclipboard/ZeroClipboard
+
+This product includes the Chosen Javascript library created by Patrick Filler
+Copyright (c) 2011-2013 by Harvest, https://github.com/harvesthq/chosen
+
+This product includes jquery.ajaxfileupload.js Javascript library created by Jordan Feldstein
+Copyright (c) 2011 Jordan Feldstein, https://github.com/jfeldstein/jQuery.AjaxFileUpload.js
+
+This product includes jquery.blockUI.js Javascript library created by Mike Alsup
+Copyright (c) 2007-2013 M. Alsup https://github.com/malsup/blockui/
+
+This product includes jquery.cookie.js Javascript library created by Klaus Hartl
+Copyright (c) 2013 Klaus Hartl, https://github.com/carhartl/jquery-cookie
+
+This product includes jquery.form Javascript library created by Mike Alsup
+Copyright 2006-2013 (c) M. Alsup, https://github.com/malsup/form/
+
+This product includes the jstree Javascript library created by Ivan Bozhanov
+Copyright (c) 2013 Ivan Bozhanov, https://github.com/vakata/jstree
+
+This product includes the Sammy.js Javascript library created by Aaron Quint
+Copyright (c) 2008 Aaron Quint, Quirkey NYC, LLC, https://github.com/quirkey/sammy
+
+This product includes jquery.timeago.js Javascript library by Ryan McGeary
+Copyright (c) 2008-2013, Ryan McGeary, https://github.com/rmm5t/jquery-timeago
+
+This product includes linker.js Javascript library created by Michalis Tzikas & Vasilis Lolos
+Copyright (C) 2011 by Michalis Tzikas & Vasilis Lolos, https://github.com/lolos/jquery-Linker/
+
+This product includes require.js Javascript library created by James Burke
+Copyright (C) 2010-2013 James Burke, https://github.com/jrburke/requirejs
+
+This product includes fugue icons created by Yusuke Kamiyamane
+Copyright (C) 2013 Yusuke Kamiyamane, https://github.com/yusukekamiyamane/fugue-icons
+
 stax-utils library: https://stax-utils.dev.java.net/
 Copyright (c) 2004, Christian Niles, unit12.net
 Copyright (c) 2004, Sun Microsystems, Inc.
@@ -36,6 +74,9 @@
 HSQL Database (HSQLDB): http://hsqldb.org/
 License: http://hsqldb.org/web/hsqlLicense.html
 
+Jersey Core: https://jersey.java.net/
+License: Common Development and Distribution License (CDDL) v1.0 (https://glassfish.dev.java.net/public/CDDLv1.0.html)
+
 =========================================================================
 ==  Apache Lucene Notice                                               ==
 =========================================================================
@@ -72,6 +113,9 @@
 The class org.apache.lucene.util.WeakIdentityMap was derived from
 the Apache CXF project and is Apache License 2.0.
 
+The HdfsDirectory and BlockDirectory were derived from
+the Apache Blur incubating project and are Apache License 2.0.
+
 The Google Code Prettify is Apache License 2.0.
 See http://code.google.com/p/google-code-prettify/
 
@@ -510,3 +554,11 @@
 This product contains software developed by the Restlet project.
 
 See http://www.restlet.org/
+
+=========================================================================
+==     Protocol Buffers Notice                                         ==
+=========================================================================
+
+Protocol Buffers - Google's data interchange format
+Copyright 2008 Google Inc.
+http://code.google.com/apis/protocolbuffers/
diff --git a/solr/README.txt b/solr/README.txt
index 2872300..13f4ca3 100644
--- a/solr/README.txt
+++ b/solr/README.txt
@@ -41,14 +41,10 @@
 
 example/
   A self-contained example Solr instance, complete with a sample
-  configuration, documents to index, and the Jetty Servlet container.
+  configuration and documents to index.
   Please see example/README.txt for information about running this
   example.
 
-dist/solr-XX.war
-  The Apache Solr Application.  Deploy this WAR file to any servlet
-  container to run Apache Solr.
-
 dist/solr-<component>-XX.jar
   The Apache Solr libraries.  To compile Apache Solr Plugins,
   one or more of these will be required.  The core library is
diff --git a/solr/build.xml b/solr/build.xml
index 7875315..8de080e 100644
--- a/solr/build.xml
+++ b/solr/build.xml
@@ -25,7 +25,7 @@
     <echo message="And for developers:"/>
     <echo message="Use 'ant clean' to clean compiled files." />
     <echo message="Use 'ant compile' to compile the source code." />
-    <echo message="Use 'ant dist' to build the project WAR and JAR files." />
+    <echo message="Use 'ant dist' to build the project JAR files." />
     <echo message="Use 'ant documentation' to build documentation." />
     <echo message="Use 'ant generate-maven-artifacts' to generate maven artifacts." />
     <echo message="Use 'ant package' to generate zip, tgz for distribution." />
@@ -55,7 +55,7 @@
     </delete>
     <echo>See ${example}/README.txt for how to run the Solr example configuration.</echo>
   </target>
-
+  
   <target name="run-example" depends="example"
           description="Run Solr interactively, via Jetty.  -Dexample.debug=true to enable JVM debugger">
     <property name="example.solr.home" location="example/solr"/>
@@ -266,6 +266,7 @@
       <bundledSignatures name="commons-io-unsafe-${commons-io.version}"/>
       <signaturesFileSet dir="${common.dir}/tools/forbiddenApis">
         <include name="executors.txt" />
+        <include name="chars.txt" />
         <include name="servlet-api.txt" />
       </signaturesFileSet>
       <fileset dir="${basedir}/build">
@@ -291,12 +292,13 @@
 
   <!-- rat sources -->
   <!-- rat-sources-typedef is *not* a useless dependency. do not remove -->
-  <target name="rat-sources" depends="rat-sources-typedef">
+  <target name="rat-sources" depends="rat-sources-typedef,common.rat-sources">
     <subant target="rat-sources" inheritall="false" >
       <propertyset refid="uptodate.and.compiled.properties"/>
       <fileset dir="core" includes="build.xml"/>
       <fileset dir="solrj" includes="build.xml"/>
       <fileset dir="test-framework" includes="build.xml"/>
+      <fileset dir="webapp" includes="build.xml"/>
     </subant>
     <contrib-crawl target="rat-sources" failonerror="true"/>
   </target>
@@ -476,7 +478,7 @@
                   includes="example/**/*.sh example/**/bin/" />
       <tarfileset dir="."
                   prefix="${fullnamever}"
-                  includes="dist/*.jar dist/*.war 
+                  includes="dist/*.jar
                             dist/solrj-lib/*
                             dist/test-framework/**"
                   excludes="**/*.tgz **/*.zip **/*.md5 **/*src*.jar **/*docs*.jar **/*.sha1" />
diff --git a/solr/cloud-dev/solrcloud-start.sh b/solr/cloud-dev/solrcloud-start.sh
index a8940ec..0960f96 100644
--- a/solr/cloud-dev/solrcloud-start.sh
+++ b/solr/cloud-dev/solrcloud-start.sh
@@ -25,7 +25,7 @@
 cp -r -f example example5
 cp -r -f example example6
 
-java -classpath "example/solr-webapp/webapp/WEB-INF/lib/*:example/lib/ext/" org.apache.solr.cloud.ZkCLI -cmd bootstrap -zkhost 127.0.0.1:9983 -solrhome example/solr -runzk 8983
+java -classpath "example/solr-webapp/webapp/WEB-INF/lib/*:example/lib/ext/*" org.apache.solr.cloud.ZkCLI -cmd bootstrap -zkhost 127.0.0.1:9983 -solrhome example/solr -runzk 8983
 
 cd example
 java -DzkRun -DnumShards=2 -DSTOP.PORT=7983 -DSTOP.KEY=key -jar start.jar 1>example.log 2>&1 &
diff --git a/solr/common-build.xml b/solr/common-build.xml
index cc7336b..c42e220 100644
--- a/solr/common-build.xml
+++ b/solr/common-build.xml
@@ -96,6 +96,12 @@
 
   <path id="solr.test.base.classpath">
     <pathelement path="${common-solr.dir}/build/solr-test-framework/classes/java"/>
+    <fileset dir="${common-solr.dir}/test-framework/lib">
+      <include name="*.jar"/>
+      <exclude name="junit-*.jar" />
+      <exclude name="randomizedtesting-runner-*.jar" />
+      <exclude name="ant*.jar" />
+    </fileset>
   	<pathelement path="${build.dir}/test-files"/>
   	<path refid="test.base.classpath"/>
   </path>
diff --git a/solr/contrib/analysis-extras/ivy.xml b/solr/contrib/analysis-extras/ivy.xml
index aee7c74..597f606 100644
--- a/solr/contrib/analysis-extras/ivy.xml
+++ b/solr/contrib/analysis-extras/ivy.xml
@@ -20,9 +20,9 @@
     <info organisation="org.apache.solr" module="analysis-extras"/>
     <dependencies>
       <dependency org="com.ibm.icu" name="icu4j" rev="49.1" transitive="false"/>
-      <dependency org="org.carrot2" name="morfologik-polish" rev="1.5.5" transitive="false"/>
-      <dependency org="org.carrot2" name="morfologik-fsa" rev="1.5.5" transitive="false"/>
-      <dependency org="org.carrot2" name="morfologik-stemming" rev="1.5.5" transitive="false"/>
+      <dependency org="org.carrot2" name="morfologik-polish" rev="1.6.0" transitive="false"/>
+      <dependency org="org.carrot2" name="morfologik-fsa" rev="1.6.0" transitive="false"/>
+      <dependency org="org.carrot2" name="morfologik-stemming" rev="1.6.0" transitive="false"/>
       <exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/> 
     </dependencies>
 </ivy-module>
diff --git a/solr/contrib/analysis-extras/src/test-files/analysis-extras/solr/collection1/conf/solrconfig-icucollate.xml b/solr/contrib/analysis-extras/src/test-files/analysis-extras/solr/collection1/conf/solrconfig-icucollate.xml
index 2ae6c7f..73004d7 100644
--- a/solr/contrib/analysis-extras/src/test-files/analysis-extras/solr/collection1/conf/solrconfig-icucollate.xml
+++ b/solr/contrib/analysis-extras/src/test-files/analysis-extras/solr/collection1/conf/solrconfig-icucollate.xml
@@ -19,6 +19,9 @@
 
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+  </indexConfig>
   <requestHandler name="standard" class="solr.StandardRequestHandler"></requestHandler>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
 </config>
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/solrconfig.xml b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/solrconfig.xml
index 76cd7d4..04d591c 100644
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/solrconfig.xml
+++ b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/solrconfig.xml
@@ -28,6 +28,7 @@
 
   <indexConfig>
     <lockType>single</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
   
   <!--	Enables JMX if and only if an existing MBeanServer is found, use 
diff --git a/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-solrconfig.xml b/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-solrconfig.xml
index 1b263dd..d0e9844 100644
--- a/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-solrconfig.xml
+++ b/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-solrconfig.xml
@@ -18,6 +18,9 @@
 
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+  </indexConfig>
 
   <!-- Used to specify an alternate directory to hold all index data
        other than the default ./data under the Solr home.
@@ -26,8 +29,6 @@
 
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
 
-  <!-- <indexConfig> section could go here, but we want the defaults -->
-
   <!-- the default high-performance update handler -->
   <updateHandler class="solr.DirectUpdateHandler2">
 
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java
index ccaf3a6..b3a0e9c 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java
@@ -22,6 +22,8 @@
 import org.slf4j.LoggerFactory;
 
 import javax.naming.InitialContext;
+import javax.naming.NamingException;
+
 import java.sql.*;
 import java.util.*;
 import java.util.concurrent.Callable;
@@ -132,66 +134,90 @@
                 + url);
         long start = System.currentTimeMillis();
         Connection c = null;
-        try {
-          if(url != null){
+
+        if (jndiName != null) {
+          c = getFromJndi(initProps, jndiName);
+        } else if (url != null) {
+          try {
             c = DriverManager.getConnection(url, initProps);
-          } else if(jndiName != null){
-            InitialContext ctx =  new InitialContext();
-            Object jndival =  ctx.lookup(jndiName);
-            if (jndival instanceof javax.sql.DataSource) {
-              javax.sql.DataSource dataSource = (javax.sql.DataSource) jndival;
-              String user = (String) initProps.get("user");
-              String pass = (String) initProps.get("password");
-              if(user == null || user.trim().equals("")){
-                c = dataSource.getConnection();
-              } else {
-                c = dataSource.getConnection(user, pass);
-              }
-            } else {
-              throw new DataImportHandlerException(SEVERE,
-                      "the jndi name : '"+jndiName +"' is not a valid javax.sql.DataSource");
-            }
+          } catch (SQLException e) {
+            // DriverManager does not allow you to use a driver which is not loaded through
+            // the class loader of the class which is trying to make the connection.
+            // This is a workaround for cases where the user puts the driver jar in the
+            // solr.home/lib or solr.home/core/lib directories.
+            Driver d = (Driver) DocBuilder.loadClass(driver, context.getSolrCore()).newInstance();
+            c = d.connect(url, initProps);
           }
-        } catch (SQLException e) {
-          // DriverManager does not allow you to use a driver which is not loaded through
-          // the class loader of the class which is trying to make the connection.
-          // This is a workaround for cases where the user puts the driver jar in the
-          // solr.home/lib or solr.home/core/lib directories.
-          Driver d = (Driver) DocBuilder.loadClass(driver, context.getSolrCore()).newInstance();
-          c = d.connect(url, initProps);
         }
         if (c != null) {
-          if (Boolean.parseBoolean(initProps.getProperty("readOnly"))) {
-            c.setReadOnly(true);
-            // Add other sane defaults
-            c.setAutoCommit(true);
-            c.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED);
-            c.setHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT);
-          }
-          if (!Boolean.parseBoolean(initProps.getProperty("autoCommit"))) {
-            c.setAutoCommit(false);
-          }
-          String transactionIsolation = initProps.getProperty("transactionIsolation");
-          if ("TRANSACTION_READ_UNCOMMITTED".equals(transactionIsolation)) {
-            c.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED);
-          } else if ("TRANSACTION_READ_COMMITTED".equals(transactionIsolation)) {
-            c.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
-          } else if ("TRANSACTION_REPEATABLE_READ".equals(transactionIsolation)) {
-            c.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ);
-          } else if ("TRANSACTION_SERIALIZABLE".equals(transactionIsolation)) {
-            c.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
-          } else if ("TRANSACTION_NONE".equals(transactionIsolation)) {
-            c.setTransactionIsolation(Connection.TRANSACTION_NONE);
-          }
-          String holdability = initProps.getProperty("holdability");
-          if ("CLOSE_CURSORS_AT_COMMIT".equals(holdability)) {
-            c.setHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT);
-          } else if ("HOLD_CURSORS_OVER_COMMIT".equals(holdability)) {
-            c.setHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT);
+          try {
+            initializeConnection(c, initProps);
+          } catch (SQLException e) {
+            try {
+              c.close();
+            } catch (SQLException e2) {
+              LOG.warn("Exception closing connection during cleanup", e2);
+            }
+
+            throw new DataImportHandlerException(SEVERE, "Exception initializing SQL connection", e);
           }
         }
         LOG.info("Time taken for getConnection(): "
-                + (System.currentTimeMillis() - start));
+            + (System.currentTimeMillis() - start));
+        return c;
+      }
+
+      private void initializeConnection(Connection c, final Properties initProps)
+          throws SQLException {
+        if (Boolean.parseBoolean(initProps.getProperty("readOnly"))) {
+          c.setReadOnly(true);
+          // Add other sane defaults
+          c.setAutoCommit(true);
+          c.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED);
+          c.setHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT);
+        }
+        if (!Boolean.parseBoolean(initProps.getProperty("autoCommit"))) {
+          c.setAutoCommit(false);
+        }
+        String transactionIsolation = initProps.getProperty("transactionIsolation");
+        if ("TRANSACTION_READ_UNCOMMITTED".equals(transactionIsolation)) {
+          c.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED);
+        } else if ("TRANSACTION_READ_COMMITTED".equals(transactionIsolation)) {
+          c.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
+        } else if ("TRANSACTION_REPEATABLE_READ".equals(transactionIsolation)) {
+          c.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ);
+        } else if ("TRANSACTION_SERIALIZABLE".equals(transactionIsolation)) {
+          c.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
+        } else if ("TRANSACTION_NONE".equals(transactionIsolation)) {
+          c.setTransactionIsolation(Connection.TRANSACTION_NONE);
+        }
+        String holdability = initProps.getProperty("holdability");
+        if ("CLOSE_CURSORS_AT_COMMIT".equals(holdability)) {
+          c.setHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT);
+        } else if ("HOLD_CURSORS_OVER_COMMIT".equals(holdability)) {
+          c.setHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT);
+        }
+      }
+
+      private Connection getFromJndi(final Properties initProps, final String jndiName) throws NamingException,
+          SQLException {
+
+        Connection c = null;
+        InitialContext ctx =  new InitialContext();
+        Object jndival =  ctx.lookup(jndiName);
+        if (jndival instanceof javax.sql.DataSource) {
+          javax.sql.DataSource dataSource = (javax.sql.DataSource) jndival;
+          String user = (String) initProps.get("user");
+          String pass = (String) initProps.get("password");
+          if(user == null || user.trim().equals("")){
+            c = dataSource.getConnection();
+          } else {
+            c = dataSource.getConnection(user, pass);
+          }
+        } else {
+          throw new DataImportHandlerException(SEVERE,
+                  "the jndi name : '"+jndiName +"' is not a valid javax.sql.DataSource");
+        }
         return c;
       }
     };
diff --git a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/contentstream-solrconfig.xml b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/contentstream-solrconfig.xml
index 3a8b15d..4e8a96b 100644
--- a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/contentstream-solrconfig.xml
+++ b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/contentstream-solrconfig.xml
@@ -18,6 +18,9 @@
 
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+  </indexConfig>
 
   <!-- Used to specify an alternate directory to hold all index data
        other than the default ./data under the Solr home.
@@ -26,8 +29,6 @@
 
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
 
-  <!-- <indexConfig> section could go here, but we want the defaults -->
-
   <!-- the default high-performance update handler -->
   <updateHandler class="solr.DirectUpdateHandler2">
 
diff --git a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-nodatasource-solrconfig.xml b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-nodatasource-solrconfig.xml
index 796f831..9fa3073 100644
--- a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-nodatasource-solrconfig.xml
+++ b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-nodatasource-solrconfig.xml
@@ -28,6 +28,7 @@
 
   <indexConfig>
     <lockType>single</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
 
   <!-- the default high-performance update handler -->
diff --git a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solrconfig.xml b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solrconfig.xml
index db0a57c..c7d208a 100644
--- a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solrconfig.xml
+++ b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solrconfig.xml
@@ -18,6 +18,9 @@
 
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+  </indexConfig>
 
   <!-- Used to specify an alternate directory to hold all index data
        other than the default ./data under the Solr home.
@@ -26,8 +29,6 @@
 
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
 
-  <!-- <indexConfig> section could go here, but we want the defaults -->
-
   <!-- the default high-performance update handler -->
   <updateHandler class="solr.DirectUpdateHandler2">
 
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestJdbcDataSource.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestJdbcDataSource.java
index eed883c..da3c0a5 100644
--- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestJdbcDataSource.java
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestJdbcDataSource.java
@@ -19,6 +19,7 @@
 import java.sql.Connection;
 import java.sql.Driver;
 import java.sql.DriverManager;
+import java.sql.SQLException;
 import java.util.*;
 
 import javax.sql.DataSource;
@@ -127,6 +128,48 @@
   }
 
   @Test
+  public void testRetrieveFromJndiFailureNotHidden() throws Exception {
+    MockInitialContextFactory.bind("java:comp/env/jdbc/JndiDB", dataSource);
+
+    props.put(JdbcDataSource.JNDI_NAME, "java:comp/env/jdbc/JndiDB");
+
+    SQLException sqlException = new SQLException("fake");
+    EasyMock.expect(dataSource.getConnection()).andThrow(sqlException);
+
+    mockControl.replay();
+    
+    try {
+      jdbcDataSource.createConnectionFactory(context, props).call();
+    } catch (SQLException ex) {
+      assertSame(sqlException, ex);
+    }
+    
+    mockControl.verify();
+  }
+  
+  @Test
+  public void testClosesConnectionWhenExceptionThrownOnSetAutocommit() throws Exception {
+    MockInitialContextFactory.bind("java:comp/env/jdbc/JndiDB", dataSource);
+
+    props.put(JdbcDataSource.JNDI_NAME, "java:comp/env/jdbc/JndiDB");
+
+    SQLException sqlException = new SQLException("fake");
+    EasyMock.expect(dataSource.getConnection()).andReturn(connection);
+    connection.setAutoCommit(false);
+    EasyMock.expectLastCall().andThrow(sqlException);
+    connection.close();
+    mockControl.replay();
+    
+    try {
+      jdbcDataSource.createConnectionFactory(context, props).call();
+    } catch (DataImportHandlerException ex) {
+      assertSame(sqlException, ex.getCause());
+    }
+    
+    mockControl.verify();
+  }
+  
+  @Test
   public void testRetrieveFromDriverManager() throws Exception {
     DriverManager.registerDriver(driver);
     try {
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestZKPropertiesWriter.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestZKPropertiesWriter.java
index 6f29106..45bafb4 100644
--- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestZKPropertiesWriter.java
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestZKPropertiesWriter.java
@@ -60,7 +60,8 @@
     AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(), getFile("dih/solr"),
         "dataimport-solrconfig.xml", "dataimport-schema.xml");
 
-    initCore("dataimport-solrconfig.xml", "dataimport-schema.xml", getFile("dih/solr").getAbsolutePath());
+    //initCore("solrconfig.xml", "schema.xml", getFile("dih/solr").getAbsolutePath());
+    createDefaultCoreContainer(getFile("dih/solr").getAbsolutePath());
   }
 
   @Before
diff --git a/solr/contrib/extraction/ivy.xml b/solr/contrib/extraction/ivy.xml
index fab6492..16dfb3c 100644
--- a/solr/contrib/extraction/ivy.xml
+++ b/solr/contrib/extraction/ivy.xml
@@ -20,8 +20,8 @@
     <info organisation="org.apache.solr" module="extraction"/>
     <dependencies>
       <!-- Tika JARs -->
-      <dependency org="org.apache.tika" name="tika-core" rev="1.3" transitive="false"/>
-      <dependency org="org.apache.tika" name="tika-parsers" rev="1.3" transitive="false"/>
+      <dependency org="org.apache.tika" name="tika-core" rev="1.4" transitive="false"/>
+      <dependency org="org.apache.tika" name="tika-parsers" rev="1.4" transitive="false"/>
       <!-- Tika dependencies - see http://tika.apache.org/1.3/gettingstarted.html#Using_Tika_as_a_Maven_dependency -->
       <!-- When upgrading Tika, upgrade dependencies versions and add any new ones
            (except slf4j-api, commons-codec, commons-logging, geronimo-stax-api_1.0_spec) -->
@@ -31,15 +31,15 @@
       <dependency org="org.apache.james" name="apache-mime4j-core" rev="0.7.2" transitive="false"/>
       <dependency org="org.apache.james" name="apache-mime4j-dom" rev="0.7.2" transitive="false"/>
       <dependency org="org.apache.commons" name="commons-compress" rev="1.4.1" transitive="false"/>
-      <dependency org="org.apache.pdfbox" name="pdfbox" rev="1.7.1" transitive="false"/>
-      <dependency org="org.apache.pdfbox" name="fontbox" rev="1.7.1" transitive="false"/>
-      <dependency org="org.apache.pdfbox" name="jempbox" rev="1.7.1" transitive="false"/>
+      <dependency org="org.apache.pdfbox" name="pdfbox" rev="1.8.1" transitive="false"/>
+      <dependency org="org.apache.pdfbox" name="fontbox" rev="1.8.1" transitive="false"/>
+      <dependency org="org.apache.pdfbox" name="jempbox" rev="1.8.1" transitive="false"/>
       <dependency org="org.bouncycastle" name="bcmail-jdk15" rev="1.45" transitive="false"/>
       <dependency org="org.bouncycastle" name="bcprov-jdk15" rev="1.45" transitive="false"/>
-      <dependency org="org.apache.poi" name="poi" rev="3.8" transitive="false"/>
-      <dependency org="org.apache.poi" name="poi-scratchpad" rev="3.8" transitive="false"/>
-      <dependency org="org.apache.poi" name="poi-ooxml" rev="3.8" transitive="false"/>
-      <dependency org="org.apache.poi" name="poi-ooxml-schemas" rev="3.8" transitive="false"/>
+      <dependency org="org.apache.poi" name="poi" rev="3.9" transitive="false"/>
+      <dependency org="org.apache.poi" name="poi-scratchpad" rev="3.9" transitive="false"/>
+      <dependency org="org.apache.poi" name="poi-ooxml" rev="3.9" transitive="false"/>
+      <dependency org="org.apache.poi" name="poi-ooxml-schemas" rev="3.9" transitive="false"/>
       <dependency org="org.apache.xmlbeans" name="xmlbeans" rev="2.3.0" transitive="false"/>
       <dependency org="dom4j" name="dom4j" rev="1.6.1" transitive="false"/>
       <dependency org="org.ccil.cowan.tagsoup" name="tagsoup" rev="1.2.1" transitive="false"/>
diff --git a/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/solrconfig.xml b/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/solrconfig.xml
index 9700dcd..1a7f2bb 100644
--- a/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/solrconfig.xml
+++ b/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/solrconfig.xml
@@ -20,6 +20,9 @@
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
   <jmx />
+  <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+  </indexConfig>
 
   <!-- Used to specify an alternate directory to hold all index data.
        It defaults to "index" if not present, and should probably
@@ -27,8 +30,6 @@
   <dataDir>${solr.data.dir:}</dataDir>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
 
-  <!-- <indexConfig> section could go here, but we want the defaults -->
-
   <updateHandler class="solr.DirectUpdateHandler2">
 
     <!-- autocommit pending docs if certain criteria are met 
diff --git a/solr/contrib/langid/src/java/org/apache/solr/update/processor/LangIdParams.java b/solr/contrib/langid/src/java/org/apache/solr/update/processor/LangIdParams.java
index 8c35bfd..fcce0d9 100644
--- a/solr/contrib/langid/src/java/org/apache/solr/update/processor/LangIdParams.java
+++ b/solr/contrib/langid/src/java/org/apache/solr/update/processor/LangIdParams.java
@@ -31,6 +31,7 @@
   String THRESHOLD  = LANGUAGE_ID + ".threshold";            // Detection threshold
   String ENFORCE_SCHEMA =  LANGUAGE_ID + ".enforceSchema";   // Enforces that output fields exist in schema
   String LANG_WHITELIST  = LANGUAGE_ID + ".whitelist";       // Allowed languages
+  String LCMAP =  LANGUAGE_ID + ".lcmap";                    // Maps detected langcode to other value
   String MAP_ENABLE =  LANGUAGE_ID + ".map";                 // Turns on or off the field mapping
   String MAP_FL =  LANGUAGE_ID + ".map.fl";                  // Field list for mapping
   String MAP_OVERWRITE =  LANGUAGE_ID + ".map.overwrite";    // Whether to overwrite existing fields
diff --git a/solr/contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java b/solr/contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java
index 96d6d62..c998ec4 100644
--- a/solr/contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java
+++ b/solr/contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java
@@ -75,6 +75,7 @@
   protected HashSet<String> mapIndividualFieldsSet;
   protected HashSet<String> allMapFieldsSet;
   protected HashMap<String,String> lcMap;
+  protected HashMap<String,String> mapLcMap;
   protected IndexSchema schema;
 
   // Regex patterns
@@ -138,13 +139,26 @@
         allMapFieldsSet.addAll(mapIndividualFieldsSet);
       }
 
-      // Language Code mapping
+      // Normalize detected langcode onto normalized langcode
       lcMap = new HashMap<String,String>();
+      if(params.get(LCMAP) != null) {
+        for(String mapping : params.get(LCMAP).split("[, ]")) {
+          String[] keyVal = mapping.split(":");
+          if(keyVal.length == 2) {
+            lcMap.put(keyVal[0], keyVal[1]);
+          } else {
+            log.error("Unsupported format for langid.lcmap: "+mapping+". Skipping this mapping.");
+          }
+        }
+      }
+
+      // Language Code mapping
+      mapLcMap = new HashMap<String,String>();
       if(params.get(MAP_LCMAP) != null) {
         for(String mapping : params.get(MAP_LCMAP).split("[, ]")) {
           String[] keyVal = mapping.split(":");
           if(keyVal.length == 2) {
-            lcMap.put(keyVal[0], keyVal[1]);
+            mapLcMap.put(keyVal[0], keyVal[1]);
           } else {
             log.error("Unsupported format for langid.map.lcmap: "+mapping+". Skipping this mapping.");
           }
@@ -322,10 +336,11 @@
       langStr = fallbackLang;
     } else {
       DetectedLanguage lang = languages.get(0);
-      if(langWhitelist.isEmpty() || langWhitelist.contains(lang.getLangCode())) {
-        log.debug("Language detected {} with certainty {}", lang.getLangCode(), lang.getCertainty());
+      String normalizedLang = normalizeLangCode(lang.getLangCode());
+      if(langWhitelist.isEmpty() || langWhitelist.contains(normalizedLang)) {
+        log.debug("Language detected {} with certainty {}", normalizedLang, lang.getCertainty());
         if(lang.getCertainty() >= threshold) {
-          langStr = lang.getLangCode();
+          langStr = normalizedLang;
         } else {
           log.debug("Detected language below threshold {}, using fallback {}", threshold, fallbackLang);
           langStr = fallbackLang;
@@ -345,6 +360,20 @@
   }
 
   /**
+   * Looks up language code in map (langid.lcmap) and returns mapped value
+   * @param langCode the language code string returned from detector
+   * @return the normalized/mapped language code
+   */
+  protected String normalizeLangCode(String langCode) {
+    if (lcMap.containsKey(langCode)) {
+      String lc = lcMap.get(langCode);
+      log.debug("Doing langcode normalization mapping from "+langCode+" to "+lc);
+      return lc;
+    }
+    return langCode;
+  }
+
+  /**
    * Returns the name of the field to map the current contents into, so that they are properly analyzed.  For instance
    * if the currentField is "text" and the code is "en", the new field would by default be "text_en".
    * This method also performs custom regex pattern replace if configured. If enforceSchema=true
@@ -355,7 +384,7 @@
    * @return The new schema field name, based on pattern and replace, or null if illegal
    */
   protected String getMappedField(String currentField, String language) {
-    String lc = lcMap.containsKey(language) ? lcMap.get(language) : language;
+    String lc = mapLcMap.containsKey(language) ? mapLcMap.get(language) : language;
     String newFieldName = langPattern.matcher(mapPattern.matcher(currentField).replaceFirst(mapReplaceStr)).replaceFirst(lc);
     if(enforceSchema && schema.getFieldOrNull(newFieldName) == null) {
       log.warn("Unsuccessful field name mapping from {} to {}, field does not exist and enforceSchema=true; skipping mapping.", currentField, newFieldName);
diff --git a/solr/contrib/langid/src/test-files/langid/solr/collection1/conf/solrconfig-languageidentifier.xml b/solr/contrib/langid/src/test-files/langid/solr/collection1/conf/solrconfig-languageidentifier.xml
index 6b2fdcd..c8d98fd 100644
--- a/solr/contrib/langid/src/test-files/langid/solr/collection1/conf/solrconfig-languageidentifier.xml
+++ b/solr/contrib/langid/src/test-files/langid/solr/collection1/conf/solrconfig-languageidentifier.xml
@@ -20,6 +20,9 @@
 <config>
 
   <jmx />
+  <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+  </indexConfig>
 
   <!-- Used to specify an alternate directory to hold all index data.
        It defaults to "index" if not present, and should probably
diff --git a/solr/contrib/langid/src/test/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessorFactoryTestCase.java b/solr/contrib/langid/src/test/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessorFactoryTestCase.java
index b718384..550d10f 100644
--- a/solr/contrib/langid/src/test/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessorFactoryTestCase.java
+++ b/solr/contrib/langid/src/test/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessorFactoryTestCase.java
@@ -116,6 +116,22 @@
   }
 
   @Test
+  public void testMapLangcode() throws Exception {
+    parameters = new ModifiableSolrParams();
+    parameters.add("langid.fl", "name");
+    parameters.add("langid.lcmap", "zh_cn:zh zh_tw:zh");
+    parameters.set("langid.enforceSchema", "false");
+    liProcessor = createLangIdProcessor(parameters);
+
+    assertEquals("zh", liProcessor.resolveLanguage("zh_cn", "NA"));
+    assertEquals("zh", liProcessor.resolveLanguage("zh_tw", "NA"));
+    assertEquals("no", liProcessor.resolveLanguage("no", "NA"));
+    List<DetectedLanguage> langs = new ArrayList<DetectedLanguage>();
+    langs.add(new DetectedLanguage("zh_cn", 0.8));
+    assertEquals("zh", liProcessor.resolveLanguage(langs, "NA"));
+  }
+
+  @Test
   public void testPreExisting() throws Exception {
     SolrInputDocument doc;
     parameters = new ModifiableSolrParams();
diff --git a/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/solrconfig.xml b/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/solrconfig.xml
index ebd7903..9a7143f 100644
--- a/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/solrconfig.xml
+++ b/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/solrconfig.xml
@@ -25,6 +25,9 @@
   -->
 <config xmlns:xi="http://www.w3.org/2001/XInclude">
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+  </indexConfig>
   <!--
     lib directives can be used to instruct Solr to load an Jars
     identified and use them to resolve any "plugins" specified in your
@@ -68,7 +71,6 @@
   -->
   <dataDir>${solr.data.dir:}</dataDir>
 
-  <!-- <indexConfig> section could go here, but we want the defaults -->
 
   <!--
     Enables JMX if and only if an existing MBeanServer is found, use
diff --git a/solr/contrib/uima/src/test-files/uima/uima-tokenizers-solrconfig.xml b/solr/contrib/uima/src/test-files/uima/uima-tokenizers-solrconfig.xml
index 76de143..c4ec6ca 100644
--- a/solr/contrib/uima/src/test-files/uima/uima-tokenizers-solrconfig.xml
+++ b/solr/contrib/uima/src/test-files/uima/uima-tokenizers-solrconfig.xml
@@ -25,6 +25,9 @@
   -->
 <config xmlns:xi="http://www.w3.org/2001/XInclude">
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+  </indexConfig>
   <!--
     lib directives can be used to instruct Solr to load an Jars
     identified and use them to resolve any "plugins" specified in your
@@ -68,8 +71,6 @@
   -->
   <dataDir>${solr.data.dir:}</dataDir>
 
-  <!-- <indexConfig> section could go here, but we want the defaults -->
-
   <!--
     Enables JMX if and only if an existing MBeanServer is found, use
     this if you want to configure JMX through JVM parameters. Remove
diff --git a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/solrconfig.xml b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/solrconfig.xml
index 8d77ee1..0c702b1 100644
--- a/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/solrconfig.xml
+++ b/solr/contrib/velocity/src/test-files/velocity/solr/collection1/conf/solrconfig.xml
@@ -22,6 +22,9 @@
 -->
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+  </indexConfig>
 
   <lib dir="../../contrib/velocity/lib" />
   <lib dir="../../dist/" regex="solr-velocity-\d.*\.jar" />
@@ -31,9 +34,6 @@
   <directoryFactory name="DirectoryFactory"
                     class="${solr.directoryFactory:solr.StandardDirectoryFactory}"/>
 
-
-  <!-- <indexConfig> section could go here, but we want the defaults -->
-
   <updateHandler class="solr.DirectUpdateHandler2">
   </updateHandler>
   
diff --git a/solr/core/ivy.xml b/solr/core/ivy.xml
index 2b4ccc4..b1d974b 100644
--- a/solr/core/ivy.xml
+++ b/solr/core/ivy.xml
@@ -16,6 +16,9 @@
    specific language governing permissions and limitations
    under the License.    
 -->
+<!DOCTYPE ivy-module [
+  <!ENTITY hadoop.version "2.0.5-alpha">
+]>
 <ivy-module version="2.0">
     <info organisation="org.apache.solr" module="core"/>
 
@@ -32,6 +35,15 @@
       <dependency org="javax.servlet" name="javax.servlet-api" rev="3.0.1" transitive="false"/>
       <dependency org="org.restlet.jee" name="org.restlet" rev="2.1.1" transitive="false"/>
       <dependency org="org.restlet.jee" name="org.restlet.ext.servlet" rev="2.1.1" transitive="false"/>
-      <exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/>
+      <dependency org="joda-time" name="joda-time" rev="2.2" transitive="false"/>
+      
+      <dependency org="org.apache.hadoop" name="hadoop-common" rev="&hadoop.version;" transitive="false"/>
+      <dependency org="org.apache.hadoop" name="hadoop-hdfs" rev="&hadoop.version;" transitive="false"/>
+      <dependency org="org.apache.hadoop" name="hadoop-annotations" rev="&hadoop.version;" transitive="false"/>
+      <dependency org="org.apache.hadoop" name="hadoop-auth" rev="&hadoop.version;" transitive="false"/>
+      <dependency org="commons-configuration" name="commons-configuration" rev="1.6" transitive="false"/>
+      <dependency org="com.google.protobuf" name="protobuf-java" rev="2.4.0a" transitive="false"/>
+      <dependency org="com.googlecode.concurrentlinkedhashmap" name="concurrentlinkedhashmap-lru" rev="1.2" transitive="false"/>
+      <exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/> 
     </dependencies>
 </ivy-module>
diff --git a/solr/core/src/java/org/apache/solr/SolrLogFormatter.java b/solr/core/src/java/org/apache/solr/SolrLogFormatter.java
index 32478d0..d6636e8 100644
--- a/solr/core/src/java/org/apache/solr/SolrLogFormatter.java
+++ b/solr/core/src/java/org/apache/solr/SolrLogFormatter.java
@@ -17,20 +17,24 @@
  * limitations under the License.
  */
 
-import org.apache.solr.cloud.ZkController;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.cloud.Replica;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.core.SolrCore;
-import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.request.SolrRequestInfo;
-import org.slf4j.LoggerFactory;
-
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.WeakHashMap;
-import java.util.logging.*;
+import java.util.logging.ConsoleHandler;
+import java.util.logging.Formatter;
+import java.util.logging.Handler;
+import java.util.logging.Level;
+import java.util.logging.LogRecord;
+import java.util.logging.Logger;
+
+import org.apache.solr.cloud.ZkController;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.request.SolrRequestInfo;
+import org.slf4j.LoggerFactory;
 
 public class SolrLogFormatter extends Formatter {
 
@@ -259,7 +263,7 @@
 
   private Map<String,Object> getReplicaProps(ZkController zkController, SolrCore core) {
     final String collection = core.getCoreDescriptor().getCloudDescriptor().getCollectionName();
-    Replica replica = zkController.getClusterState().getReplica(collection, zkController.getCoreNodeName(core.getCoreDescriptor()));
+    Replica replica = zkController.getClusterState().getReplica(collection, core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName());
     if(replica!=null) {
       return replica.getProperties();
     }
diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
index 20cc190..5eeb19c 100644
--- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
+++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
@@ -85,6 +85,7 @@
   private String shards;
 
   private String dataDir;
+  private String solrUlogDir;
   
   private volatile boolean startedBefore = false;
 
@@ -359,6 +360,9 @@
     if( dataDir != null) {
       System.setProperty("solr.data.dir", dataDir);
     }
+    if( solrUlogDir != null) {
+      System.setProperty("solr.ulog.dir", solrUlogDir);
+    }
     if(shards != null) {
       System.setProperty("shard", shards);
     }
@@ -382,6 +386,8 @@
     System.clearProperty("shard");
     System.clearProperty("solr.data.dir");
     System.clearProperty("coreNodeName");
+    System.clearProperty("solr.ulog.dir");
+
   }
 
   public void stop() throws Exception {
@@ -485,6 +491,10 @@
   public void setDataDir(String dataDir) {
     this.dataDir = dataDir;
   }
+  
+  public void setUlogDir(String ulogDir) {
+    this.solrUlogDir = ulogDir;
+  }
 
   public void setCoreNodeName(String coreNodeName) {
     this.coreNodeName = coreNodeName;
diff --git a/solr/core/src/java/org/apache/solr/cloud/AssignShard.java b/solr/core/src/java/org/apache/solr/cloud/Assign.java
similarity index 77%
rename from solr/core/src/java/org/apache/solr/cloud/AssignShard.java
rename to solr/core/src/java/org/apache/solr/cloud/Assign.java
index 524dfe5..8f0120b 100644
--- a/solr/core/src/java/org/apache/solr/cloud/AssignShard.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Assign.java
@@ -23,12 +23,36 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 
-public class AssignShard {
 
+public class Assign {
+  private static Pattern COUNT = Pattern.compile("core_node(\\d+)");
+
+  public static String assignNode(String collection, ClusterState state) {
+    Map<String, Slice> sliceMap = state.getSlicesMap(collection);
+    if (sliceMap == null) {
+      return "core_node1";
+    }
+
+    int max = 0;
+    for (Slice slice : sliceMap.values()) {
+      for (Replica replica : slice.getReplicas()) {
+        Matcher m = COUNT.matcher(replica.getName());
+        if (m.matches()) {
+          max = Math.max(max, Integer.parseInt(m.group(1)));
+        }
+      }
+    }
+
+    return "core_node" + (max + 1);
+  }
+  
   /**
    * Assign a new unique id up to slices count - then add replicas evenly.
    * 
diff --git a/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java b/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java
index cf086fd..8a071c7 100644
--- a/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java
+++ b/solr/core/src/java/org/apache/solr/cloud/CloudDescriptor.java
@@ -44,6 +44,10 @@
   public boolean isLeader() {
     return isLeader;
   }
+  
+  public void setLeader(boolean isLeader) {
+    this.isLeader = isLeader;
+  }
 
   public void setShardId(String shardId) {
     this.shardId = shardId;
diff --git a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
index 1d436af..52d22f0 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ElectionContext.java
@@ -47,9 +47,9 @@
   String leaderSeqPath;
   private SolrZkClient zkClient;
   
-  public ElectionContext(final String shardZkNodeName,
+  public ElectionContext(final String coreNodeName,
       final String electionPath, final String leaderPath, final ZkNodeProps leaderProps, final SolrZkClient zkClient) {
-    this.id = shardZkNodeName;
+    this.id = coreNodeName;
     this.electionPath = electionPath;
     this.leaderPath = leaderPath;
     this.leaderProps = leaderProps;
@@ -78,8 +78,8 @@
   protected LeaderElector leaderElector;
 
   public ShardLeaderElectionContextBase(LeaderElector leaderElector, final String shardId,
-      final String collection, final String shardZkNodeName, ZkNodeProps props, ZkStateReader zkStateReader) {
-    super(shardZkNodeName, ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/leader_elect/"
+      final String collection, final String coreNodeName, ZkNodeProps props, ZkStateReader zkStateReader) {
+    super(coreNodeName, ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/leader_elect/"
         + shardId, ZkStateReader.getShardLeadersPath(collection, shardId),
         props, zkStateReader.getZkClient());
     this.leaderElector = leaderElector;
@@ -95,7 +95,7 @@
     zkClient.makePath(leaderPath, ZkStateReader.toJSON(leaderProps),
         CreateMode.EPHEMERAL, true);
     assert shardId != null;
-    ZkNodeProps m = ZkNodeProps.fromKeyVals(Overseer.QUEUE_OPERATION, "leader",
+    ZkNodeProps m = ZkNodeProps.fromKeyVals(Overseer.QUEUE_OPERATION, ZkStateReader.LEADER_PROP,
         ZkStateReader.SHARD_ID_PROP, shardId, ZkStateReader.COLLECTION_PROP,
         collection, ZkStateReader.BASE_URL_PROP, leaderProps.getProperties()
             .get(ZkStateReader.BASE_URL_PROP), ZkStateReader.CORE_NAME_PROP,
@@ -119,8 +119,8 @@
   
   public ShardLeaderElectionContext(LeaderElector leaderElector, 
       final String shardId, final String collection,
-      final String shardZkNodeName, ZkNodeProps props, ZkController zkController, CoreContainer cc) {
-    super(leaderElector, shardId, collection, shardZkNodeName, props,
+      final String coreNodeName, ZkNodeProps props, ZkController zkController, CoreContainer cc) {
+    super(leaderElector, shardId, collection, coreNodeName, props,
         zkController.getZkStateReader());
     this.zkController = zkController;
     this.cc = cc;
@@ -138,12 +138,12 @@
   @Override
   void runLeaderProcess(boolean weAreReplacement) throws KeeperException,
       InterruptedException, IOException {
-    log.info("Running the leader process.");
+    log.info("Running the leader process for shard " + shardId);
     
     String coreName = leaderProps.getStr(ZkStateReader.CORE_NAME_PROP);
     
     // clear the leader in clusterstate
-    ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "leader",
+    ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, ZkStateReader.LEADER_PROP,
         ZkStateReader.SHARD_ID_PROP, shardId, ZkStateReader.COLLECTION_PROP,
         collection);
     Overseer.getInQueue(zkClient).offer(ZkStateReader.toJSON(m));
@@ -243,8 +243,8 @@
       }
 
       log.info("I am the new leader: "
-          + ZkCoreNodeProps.getCoreUrl(leaderProps));
-      core.getCoreDescriptor().getCloudDescriptor().isLeader = true;
+          + ZkCoreNodeProps.getCoreUrl(leaderProps) + " " + shardId);
+      core.getCoreDescriptor().getCloudDescriptor().setLeader(true);
     } finally {
       if (core != null) {
         core.close();
@@ -254,16 +254,17 @@
     try {
       super.runLeaderProcess(weAreReplacement);
     } catch (Throwable t) {
+      SolrException.log(log, "There was a problem trying to register as the leader", t);
+      cancelElection();
       try {
         core = cc.getCore(coreName);
         if (core == null) {
-          cancelElection();
           throw new SolrException(ErrorCode.SERVER_ERROR,
               "Fatal Error, SolrCore not found:" + coreName + " in "
                   + cc.getCoreNames());
         }
         
-        core.getCoreDescriptor().getCloudDescriptor().isLeader = false;
+        core.getCoreDescriptor().getCloudDescriptor().setLeader(false);
         
         // we could not publish ourselves as leader - rejoin election
         rejoinLeaderElection(leaderSeqPath, core);
@@ -332,7 +333,7 @@
           return;
         } else {
           if (cnt % 40 == 0) {
-            log.info("Waiting until we see more replicas up: total="
+            log.info("Waiting until we see more replicas up for shard " + shardId + ": total="
               + slices.getReplicasMap().size() + " found=" + found
               + " timeoutin=" + (timeoutAt - System.currentTimeMillis()));
           }
diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
index c5a9fb4..cf8d02e 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
@@ -50,15 +50,16 @@
  */
 public class Overseer {
   public static final String QUEUE_OPERATION = "operation";
+  public static final String DELETECORE = "deletecore";
   public static final String REMOVECOLLECTION = "removecollection";
   
   private static final int STATE_UPDATE_DELAY = 1500;  // delay between cloud state updates
 
+
   private static Logger log = LoggerFactory.getLogger(Overseer.class);
   
   private class ClusterStateUpdater implements Runnable, ClosableThread {
     
-    private static final String DELETECORE = "deletecore";
     private final ZkStateReader reader;
     private final SolrZkClient zkClient;
     private final String myId;
@@ -267,8 +268,14 @@
         final String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
         String coreNodeName = message.getStr(ZkStateReader.CORE_NODE_NAME_PROP);
         if (coreNodeName == null) {
-          // it must be the default then
-          coreNodeName = message.getStr(ZkStateReader.NODE_NAME_PROP) + "_" + message.getStr(ZkStateReader.CORE_NAME_PROP);
+          coreNodeName = getAssignedCoreNodeName(state, message);
+          if (coreNodeName != null) {
+            log.info("node=" + coreNodeName + " is already registered");
+          } else {
+            // if coreNodeName is null, auto assign one
+            coreNodeName = Assign.assignNode(collection, state);
+          }
+          message.getProperties().put(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
         }
         Integer numShards = message.getStr(ZkStateReader.NUM_SHARDS_PROP)!=null?Integer.parseInt(message.getStr(ZkStateReader.NUM_SHARDS_PROP)):null;
         log.info("Update state numShards={} message={}", numShards, message);
@@ -281,7 +288,6 @@
         // use the provided non null shardId
         String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP);
         if (sliceName == null) {
-          //String nodeName = message.getStr(ZkStateReader.NODE_NAME_PROP);
           //get shardId from ClusterState
           sliceName = getAssignedId(state, coreNodeName, message);
           if (sliceName != null) {
@@ -295,8 +301,8 @@
             numShards = state.getCollectionStates().get(collection).getSlices().size();
             log.info("Collection already exists with " + ZkStateReader.NUM_SHARDS_PROP + "=" + numShards);
           }
-          sliceName = AssignShard.assignShard(collection, state, numShards);
-          log.info("Assigning new node to shard=" + sliceName);
+          sliceName = Assign.assignShard(collection, state, numShards);
+          log.info("Assigning new node to shard shard=" + sliceName);
         }
 
         Slice slice = state.getSlice(collection, sliceName);
@@ -320,8 +326,11 @@
           }
         }
 
-        // we don't put num_shards in the clusterstate
+        // we don't put these in the clusterstate
           replicaProps.remove(ZkStateReader.NUM_SHARDS_PROP);
+          replicaProps.remove(ZkStateReader.CORE_NODE_NAME_PROP);
+          replicaProps.remove(ZkStateReader.SHARD_ID_PROP);
+          replicaProps.remove(ZkStateReader.COLLECTION_PROP);
           replicaProps.remove(QUEUE_OPERATION);
           
           // remove any props with null values
@@ -417,6 +426,26 @@
         return null;
       }
       
+      private String getAssignedCoreNodeName(ClusterState state, ZkNodeProps message) {
+        Collection<Slice> slices = state.getSlices(message.getStr(ZkStateReader.COLLECTION_PROP));
+        if (slices != null) {
+          for (Slice slice : slices) {
+            for (Replica replica : slice.getReplicas()) {
+              String baseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+              String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+              
+              String msgBaseUrl = message.getStr(ZkStateReader.BASE_URL_PROP);
+              String msgCore = message.getStr(ZkStateReader.CORE_NAME_PROP);
+              
+              if (baseUrl.equals(msgBaseUrl) && core.equals(msgCore)) {
+                return replica.getName();
+              }
+            }
+          }
+        }
+        return null;
+      }
+      
       private ClusterState updateSlice(ClusterState state, String collectionName, Slice slice) {
         // System.out.println("###!!!### OLD CLUSTERSTATE: " + JSONUtil.toJSON(state.getCollectionStates()));
         // System.out.println("Updating slice:" + slice);
@@ -526,10 +555,6 @@
       private ClusterState removeCore(final ClusterState clusterState, ZkNodeProps message) {
         
         String cnn = message.getStr(ZkStateReader.CORE_NODE_NAME_PROP);
-        if (cnn == null) {
-          // it must be the default then
-          cnn = message.getStr(ZkStateReader.NODE_NAME_PROP) + "_" + message.getStr(ZkStateReader.CORE_NAME_PROP);
-        }
 
         final String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
 
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
index 5b83f63..1ce14fe 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
@@ -360,13 +360,17 @@
             throw new SolrException(ErrorCode.BAD_REQUEST, "Sub-shard: " + subSlice + " exists in active state. Aborting split shard.");
           } else if (Slice.CONSTRUCTION.equals(oSlice.getState()))  {
             for (Replica replica : oSlice.getReplicas()) {
-              String core = replica.getStr("core");
-              log.info("Unloading core: " + core + " from node: " + replica.getNodeName());
-              ModifiableSolrParams params = new ModifiableSolrParams();
-              params.set(CoreAdminParams.ACTION, CoreAdminAction.UNLOAD.toString());
-              params.set(CoreAdminParams.CORE, core);
-              params.set(CoreAdminParams.DELETE_INDEX, "true");
-              sendShardRequest(replica.getNodeName(), params);
+              if (clusterState.liveNodesContain(replica.getNodeName())) {
+                String core = replica.getStr("core");
+                log.info("Unloading core: " + core + " from node: " + replica.getNodeName());
+                ModifiableSolrParams params = new ModifiableSolrParams();
+                params.set(CoreAdminParams.ACTION, CoreAdminAction.UNLOAD.toString());
+                params.set(CoreAdminParams.CORE, core);
+                params.set(CoreAdminParams.DELETE_INDEX, "true");
+                sendShardRequest(replica.getNodeName(), params);
+              } else  {
+                log.warn("Replica {} exists in shard {} but is not live and cannot be unloaded", replica, oSlice);
+              }
             }
           }
         }
@@ -397,13 +401,19 @@
         //params.set(ZkStateReader.NUM_SHARDS_PROP, numSlices); todo: is it necessary, we're not creating collections?
 
         sendShardRequest(nodeName, params);
+      }
 
+      collectShardResponses(results, true,
+          "SPLTSHARD failed to create subshard leaders");
+
+      for (String subShardName : subShardNames) {
         // wait for parent leader to acknowledge the sub-shard core
         log.info("Asking parent leader to wait for: " + subShardName + " to be alive on: " + nodeName);
+        String coreNodeName = waitForCoreNodeName(collection, zkStateReader.getZkClient().getBaseUrlForNodeName(nodeName), subShardName);
         CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
         cmd.setCoreName(subShardName);
         cmd.setNodeName(nodeName);
-        cmd.setCoreNodeName(nodeName + "_" + subShardName);
+        cmd.setCoreNodeName(coreNodeName);
         cmd.setState(ZkStateReader.ACTIVE);
         cmd.setCheckLive(true);
         cmd.setOnlyIfLeader(true);
@@ -411,7 +421,7 @@
       }
 
       collectShardResponses(results, true,
-          "SPLTSHARD failed to create subshard leaders or timed out waiting for them to come up");
+          "SPLTSHARD timed out waiting for subshard leaders to come up");
       
       log.info("Successfully created all sub-shards for collection "
           + collectionName + " parent shard: " + slice + " on: " + parentShardLeader);
@@ -506,12 +516,13 @@
 
           sendShardRequest(subShardNodeName, params);
 
+          String coreNodeName = waitForCoreNodeName(collection, zkStateReader.getZkClient().getBaseUrlForNodeName(subShardNodeName), shardName);
           // wait for the replicas to be seen as active on sub shard leader
           log.info("Asking sub shard leader to wait for: " + shardName + " to be alive on: " + subShardNodeName);
           CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
           cmd.setCoreName(subShardNames.get(i-1));
           cmd.setNodeName(subShardNodeName);
-          cmd.setCoreNodeName(subShardNodeName + "_" + shardName);
+          cmd.setCoreNodeName(coreNodeName);
           cmd.setState(ZkStateReader.ACTIVE);
           cmd.setCheckLive(true);
           cmd.setOnlyIfLeader(true);
@@ -545,6 +556,35 @@
       throw new SolrException(ErrorCode.SERVER_ERROR, null, e);
     }
   }
+  
+  private String waitForCoreNodeName(DocCollection collection, String msgBaseUrl, String msgCore) {
+    int retryCount = 320;
+    while (retryCount-- > 0) {
+      Map<String,Slice> slicesMap = zkStateReader.getClusterState()
+          .getSlicesMap(collection.getName());
+      if (slicesMap != null) {
+        
+        for (Slice slice : slicesMap.values()) {
+          for (Replica replica : slice.getReplicas()) {
+            // TODO: for really large clusters, we could 'index' on this
+            
+            String baseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+            String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+            
+            if (baseUrl.equals(msgBaseUrl) && core.equals(msgCore)) {
+              return replica.getName();
+            }
+          }
+        }
+      }
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+      }
+    }
+    throw new SolrException(ErrorCode.SERVER_ERROR, "Could not find coreNodeName");
+  }
 
   private void collectShardResponses(NamedList results, boolean abortOnError, String msgOnError) {
     ShardResponse srsp;
@@ -552,8 +592,13 @@
       srsp = shardHandler.takeCompletedOrError();
       if (srsp != null) {
         processResponse(results, srsp);
-        if (abortOnError && srsp.getException() != null)  {
-          throw new SolrException(ErrorCode.SERVER_ERROR, msgOnError, srsp.getException());
+        Throwable exception = srsp.getException();
+        if (abortOnError && exception != null)  {
+          // drain pending requests
+          while (srsp != null)  {
+            srsp = shardHandler.takeCompletedOrError();
+          }
+          throw new SolrException(ErrorCode.SERVER_ERROR, msgOnError, exception);
         }
       }
     } while (srsp != null);
@@ -779,7 +824,7 @@
       throws Exception {
     String str = message.getStr(key);
     try {
-      return str == null ? def : Integer.parseInt(str);
+      return str == null ? def : Integer.valueOf(str);
     } catch (Exception ex) {
       SolrException.log(log, "Could not parse " + key, ex);
       throw ex;
diff --git a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
index c10bd5e..7a0dc7c 100644
--- a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
+++ b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
@@ -91,7 +91,7 @@
     zkController = cc.getZkController();
     zkStateReader = zkController.getZkStateReader();
     baseUrl = zkController.getBaseUrl();
-    coreZkNodeName = zkController.getCoreNodeName(cd);
+    coreZkNodeName = cd.getCloudDescriptor().getCoreNodeName();
   }
 
   public void setRecoveringAfterStartup(boolean recoveringAfterStartup) {
@@ -118,44 +118,40 @@
     }
   }
   
-  private void replicate(String nodeName, SolrCore core, ZkNodeProps leaderprops, String baseUrl)
+  private void replicate(String nodeName, SolrCore core, ZkNodeProps leaderprops)
       throws SolrServerException, IOException {
-   
-    String leaderBaseUrl = leaderprops.getStr(ZkStateReader.BASE_URL_PROP);
+
     ZkCoreNodeProps leaderCNodeProps = new ZkCoreNodeProps(leaderprops);
     String leaderUrl = leaderCNodeProps.getCoreUrl();
     
     log.info("Attempting to replicate from " + leaderUrl + ". core=" + coreName);
     
-    // if we are the leader, either we are trying to recover faster
-    // then our ephemeral timed out or we are the only node
-    if (!leaderBaseUrl.equals(baseUrl)) {
-      
-      // send commit
-      commitOnLeader(leaderUrl);
-      
-      // use rep handler directly, so we can do this sync rather than async
-      SolrRequestHandler handler = core.getRequestHandler(REPLICATION_HANDLER);
-      if (handler instanceof LazyRequestHandlerWrapper) {
-        handler = ((LazyRequestHandlerWrapper)handler).getWrappedHandler();
-      }
-      ReplicationHandler replicationHandler = (ReplicationHandler) handler;
-      
-      if (replicationHandler == null) {
-        throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
-            "Skipping recovery, no " + REPLICATION_HANDLER + " handler found");
-      }
-      
-      ModifiableSolrParams solrParams = new ModifiableSolrParams();
-      solrParams.set(ReplicationHandler.MASTER_URL, leaderUrl);
-      
-      if (isClosed()) retries = INTERRUPTED;
-      boolean success = replicationHandler.doFetch(solrParams, false);
+    // send commit
+    commitOnLeader(leaderUrl);
+    
+    // use rep handler directly, so we can do this sync rather than async
+    SolrRequestHandler handler = core.getRequestHandler(REPLICATION_HANDLER);
+    if (handler instanceof LazyRequestHandlerWrapper) {
+      handler = ((LazyRequestHandlerWrapper) handler).getWrappedHandler();
+    }
+    ReplicationHandler replicationHandler = (ReplicationHandler) handler;
+    
+    if (replicationHandler == null) {
+      throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
+          "Skipping recovery, no " + REPLICATION_HANDLER + " handler found");
+    }
+    
+    ModifiableSolrParams solrParams = new ModifiableSolrParams();
+    solrParams.set(ReplicationHandler.MASTER_URL, leaderUrl);
+    
+    if (isClosed()) retries = INTERRUPTED;
+    boolean success = replicationHandler.doFetch(solrParams, false);
+    
+    if (!success) {
+      throw new SolrException(ErrorCode.SERVER_ERROR,
+          "Replication for recovery failed.");
+    }
 
-      if (!success) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "Replication for recovery failed.");
-      }
-      
       // solrcloud_debug
 //      try {
 //        RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
@@ -169,7 +165,7 @@
 //      } catch (Exception e) {
 //        
 //      }
-    }
+    
   }
 
   private void commitOnLeader(String leaderUrl) throws SolrServerException, IOException {
@@ -329,10 +325,10 @@
         String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
 
         boolean isLeader = leaderUrl.equals(ourUrl);
-        if (isLeader && !cloudDesc.isLeader) {
+        if (isLeader && !cloudDesc.isLeader()) {
           throw new SolrException(ErrorCode.SERVER_ERROR, "Cloud state still says we are leader.");
         }
-        if (cloudDesc.isLeader) {
+        if (cloudDesc.isLeader()) {
           // we are now the leader - no one else must have been suitable
           log.warn("We have not yet recovered - but we are now the leader! core=" + coreName);
           log.info("Finished recovery process. core=" + coreName);
@@ -406,8 +402,7 @@
         
         try {
 
-          replicate(zkController.getNodeName(), core,
-              leaderprops, leaderUrl);
+          replicate(zkController.getNodeName(), core, leaderprops);
 
           replay(ulog);
           replayed = true;
diff --git a/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java b/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
index 1a063b4..12d3738 100644
--- a/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
+++ b/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java
@@ -62,8 +62,8 @@
 
   private volatile boolean isClosed;
   
-  private final static HttpClient client;
-  static {
+  private final HttpClient client;
+  {
     ModifiableSolrParams params = new ModifiableSolrParams();
     params.set(HttpClientUtil.PROP_MAX_CONNECTIONS, 10000);
     params.set(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, 20);
@@ -87,6 +87,10 @@
     if (SKIP_AUTO_RECOVERY) {
       return true;
     }
+    if (isClosed) {
+      log.warn("Closed, skipping sync up.");
+      return false;
+    }
     log.info("Sync replicas to " + ZkCoreNodeProps.getCoreUrl(leaderProps));
     // TODO: look at our state usage of sync
     // zkController.publish(core, ZkStateReader.SYNC);
@@ -112,20 +116,6 @@
       log.info("We have been closed, won't sync with replicas");
       return false;
     }
-    // if no one that is up is active, we are willing to wait...
-    // we don't want a recovering node to become leader and then
-    // a better candidate pops up a second later.
-//    int tries = 20;
-//    while (!areAnyReplicasActive(zkController, collection, shardId)) {
-//      if (tries-- == 0) {
-//        break;
-//      }
-//      try {
-//        Thread.sleep(500);
-//      } catch (InterruptedException e) {
-//        Thread.currentThread().interrupt();
-//      }
-//    }
     
     // first sync ourselves - we are the potential leader after all
     try {
@@ -146,7 +136,7 @@
         syncToMe(zkController, collection, shardId, leaderProps, core.getCoreDescriptor());
         
       } else {
-        log.info("Leader's attempt to sync with shard failed, moving to the next canidate");
+        log.info("Leader's attempt to sync with shard failed, moving to the next candidate");
         // lets see who seems ahead...
       }
       
@@ -160,8 +150,7 @@
   private boolean syncWithReplicas(ZkController zkController, SolrCore core,
       ZkNodeProps props, String collection, String shardId) {
     List<ZkCoreNodeProps> nodes = zkController.getZkStateReader()
-        .getReplicaProps(collection, shardId,
-            zkController.getCoreNodeName(core.getCoreDescriptor()),
+        .getReplicaProps(collection, shardId,core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName(),
             props.getStr(ZkStateReader.CORE_NAME_PROP));
     
     if (nodes == null) {
@@ -189,7 +178,7 @@
     List<ZkCoreNodeProps> nodes = zkController
         .getZkStateReader()
         .getReplicaProps(collection, shardId,
-            zkController.getCoreNodeName(cd),
+            cd.getCloudDescriptor().getCoreNodeName(),
             leaderProps.getStr(ZkStateReader.CORE_NAME_PROP));
     if (nodes == null) {
       log.info(ZkCoreNodeProps.getCoreUrl(leaderProps) + " has no replicas");
@@ -273,6 +262,11 @@
   public void close() {
     this.isClosed = true;
     try {
+      client.getConnectionManager().shutdown();
+    } catch (Throwable e) {
+      SolrException.log(log, e);
+    }
+    try {
       ExecutorUtil.shutdownNowAndAwaitTermination(recoveryCmdExecutor);
     } catch (Throwable e) {
       SolrException.log(log, e);
@@ -280,7 +274,6 @@
   }
   
   private void requestRecovery(final ZkNodeProps leaderProps, final String baseUrl, final String coreName) throws SolrServerException, IOException {
-    // TODO: do this in background threads
     Thread thread = new Thread() {
       {
         setDaemon(true);
@@ -291,7 +284,7 @@
         recoverRequestCmd.setAction(CoreAdminAction.REQUESTRECOVERY);
         recoverRequestCmd.setCoreName(coreName);
         
-        HttpSolrServer server = new HttpSolrServer(baseUrl);
+        HttpSolrServer server = new HttpSolrServer(baseUrl, client);
         server.setConnectionTimeout(45000);
         server.setSoTimeout(45000);
         try {
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkCLI.java b/solr/core/src/java/org/apache/solr/cloud/ZkCLI.java
index f8e171e..dad2d83 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkCLI.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkCLI.java
@@ -1,14 +1,5 @@
 package org.apache.solr.cloud;
 
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.List;
-import java.util.concurrent.TimeoutException;
-
-import javax.xml.parsers.ParserConfigurationException;
-
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.HelpFormatter;
@@ -17,18 +8,23 @@
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.PosixParser;
-import org.apache.commons.io.IOUtils;
 import org.apache.solr.common.cloud.OnReconnect;
 import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.core.Config;
 import org.apache.solr.core.ConfigSolr;
-import org.apache.solr.core.ConfigSolrXml;
-import org.apache.solr.core.ConfigSolrXmlOld;
 import org.apache.solr.core.SolrResourceLoader;
+import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.data.ACL;
 import org.xml.sax.InputSource;
 import org.xml.sax.SAXException;
 
+import javax.xml.parsers.ParserConfigurationException;
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.TimeoutException;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -49,6 +45,7 @@
 public class ZkCLI {
   
   private static final String MAKEPATH = "makepath";
+  private static final String PUT = "put";
   private static final String DOWNCONFIG = "downconfig";
   private static final String ZK_CLI_NAME = "ZkCLI";
   private static final String HELP = "help";
@@ -92,7 +89,7 @@
         .hasArg(true)
         .withDescription(
             "cmd to run: " + BOOTSTRAP + ", " + UPCONFIG + ", " + DOWNCONFIG
-                + ", " + LINKCONFIG + ", " + MAKEPATH + ", "+ LIST + ", " +CLEAR).create(CMD));
+                + ", " + LINKCONFIG + ", " + MAKEPATH + ", "+ PUT + ", "+ LIST + ", " + CLEAR).create(CMD));
 
     Option zkHostOption = new Option("z", ZKHOST, true,
         "ZooKeeper host address");
@@ -134,7 +131,8 @@
         System.out.println("zkcli.sh -zkhost localhost:9983 -cmd " + UPCONFIG + " -" + CONFDIR + " /opt/solr/collection1/conf" + " -" + CONFNAME + " myconf");
         System.out.println("zkcli.sh -zkhost localhost:9983 -cmd " + DOWNCONFIG + " -" + CONFDIR + " /opt/solr/collection1/conf" + " -" + CONFNAME + " myconf");
         System.out.println("zkcli.sh -zkhost localhost:9983 -cmd " + LINKCONFIG + " -" + COLLECTION + " collection1" + " -" + CONFNAME + " myconf");
-        System.out.println("zkcli.sh -zkhost localhost:9983 -cmd " + MAKEPATH + " /apache/solr");
+        System.out.println("zkcli.sh -zkhost localhost:9983 -cmd " + MAKEPATH + " /apache/solr/data.txt 'config data'");
+        System.out.println("zkcli.sh -zkhost localhost:9983 -cmd " + PUT + " /solr.conf 'conf data'");
         System.out.println("zkcli.sh -zkhost localhost:9983 -cmd " + CLEAR + " /solr");
         System.out.println("zkcli.sh -zkhost localhost:9983 -cmd " + LIST);
         return;
@@ -178,25 +176,7 @@
           SolrResourceLoader loader = new SolrResourceLoader(solrHome);
           solrHome = loader.getInstanceDir();
 
-          File configFile = new File(solrHome, SOLR_XML);
-          InputStream is = new FileInputStream(configFile);
-
-          ConfigSolr cfg;
-
-          try {
-            Config config = new Config(loader, null, new InputSource(is), null, false);
-            
-            boolean oldStyle = (config.getNode("solr/cores", false) != null);
-
-             if (oldStyle) {
-               cfg = new ConfigSolrXmlOld(config, null);
-             } else {
-               cfg = new ConfigSolrXml(config, null);
-             }
-          } finally {
-            IOUtils.closeQuietly(is);
-          }
-
+          ConfigSolr cfg = ConfigSolr.fromSolrHome(solrHome);
 
           if(!ZkController.checkChrootPath(zkServerAddress, true)) {
             System.out.println("A chroot was specified in zkHost but the znode doesn't exist. ");
@@ -256,6 +236,15 @@
             System.exit(1);
           }
           zkClient.makePath(arglist.get(0).toString(), true);
+        } else if (line.getOptionValue(CMD).equals(PUT)) {
+          List<ACL> acl = ZooDefs.Ids.OPEN_ACL_UNSAFE;
+          List arglist = line.getArgList();
+          if (arglist.size() != 2) {
+            System.out.println("-" + PUT + " requires two args - the path to create and the data string");
+            System.exit(1);
+          }
+          zkClient.create(arglist.get(0).toString(), arglist.get(1).toString().getBytes("UTF-8"),
+                          acl, CreateMode.PERSISTENT, true);
         }
       } finally {
         if (solrPort != null) {
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
index e8c7bca..b046e7d 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
@@ -17,28 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.File;
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.net.InetAddress;
-import java.net.NetworkInterface;
-import java.net.URLEncoder;
-import java.net.UnknownHostException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeoutException;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
 import org.apache.commons.io.FileUtils;
 import org.apache.solr.client.solrj.impl.HttpSolrServer;
 import org.apache.solr.client.solrj.request.CoreAdminRequest.WaitForState;
@@ -74,6 +52,28 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.File;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.URLEncoder;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeoutException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 /**
  * Handle ZooKeeper interactions.
  * 
@@ -132,6 +132,8 @@
   protected volatile Overseer overseer;
 
   private String leaderVoteWait;
+  
+  private boolean genericCoreNodeNames;
 
   private int clientTimeout;
 
@@ -140,11 +142,11 @@
   private UpdateShardHandler updateShardHandler;
 
   public ZkController(final CoreContainer cc, String zkServerAddress, int zkClientTimeout, int zkClientConnectTimeout, String localHost, String locaHostPort,
-      String localHostContext, String leaderVoteWait, int distribUpdateConnTimeout, int distribUpdateSoTimeout, final CurrentCoreDescriptorProvider registerOnReconnect) throws InterruptedException,
+      String localHostContext, String leaderVoteWait, boolean genericCoreNodeNames, int distribUpdateConnTimeout, int distribUpdateSoTimeout, final CurrentCoreDescriptorProvider registerOnReconnect) throws InterruptedException,
       TimeoutException, IOException {
     if (cc == null) throw new IllegalArgumentException("CoreContainer cannot be null.");
     this.cc = cc;
-
+    this.genericCoreNodeNames = genericCoreNodeNames;
     // be forgiving and strip this off leading/trailing slashes
     // this allows us to support users specifying hostContext="/" in 
     // solr.xml to indicate the root context, instead of hostContext="" 
@@ -254,9 +256,9 @@
       // before registering as live, make sure everyone is in a
       // down state
       for (CoreDescriptor descriptor : descriptors) {
-        final String coreZkNodeName = getCoreNodeName(descriptor);
+        final String coreZkNodeName = descriptor.getCloudDescriptor().getCoreNodeName();
         try {
-          descriptor.getCloudDescriptor().isLeader = false;
+          descriptor.getCloudDescriptor().setLeader(false);
           publish(descriptor, ZkStateReader.DOWN, updateLastPublished);
         } catch (Exception e) {
           if (isClosed) {
@@ -323,7 +325,7 @@
         .getCurrentDescriptors();
     if (descriptors != null) {
       for (CoreDescriptor descriptor : descriptors) {
-        descriptor.getCloudDescriptor().isLeader = false;
+        descriptor.getCloudDescriptor().setLeader(false);
       }
     }
   }
@@ -544,7 +546,6 @@
           if (replica.getNodeName().equals(getNodeName())
               && !(replica.getStr(ZkStateReader.STATE_PROP)
                   .equals(ZkStateReader.DOWN))) {
-            assert replica.getStr(ZkStateReader.SHARD_ID_PROP) != null;
             ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state",
                 ZkStateReader.STATE_PROP, ZkStateReader.DOWN,
                 ZkStateReader.BASE_URL_PROP, getBaseUrl(),
@@ -555,8 +556,7 @@
                 ZkStateReader.NODE_NAME_PROP, getNodeName(),
                 ZkStateReader.SHARD_ID_PROP,
                 replica.getStr(ZkStateReader.SHARD_ID_PROP),
-                ZkStateReader.COLLECTION_PROP,
-                replica.getStr(ZkStateReader.COLLECTION_PROP),
+                ZkStateReader.COLLECTION_PROP, collectionName,
                 ZkStateReader.CORE_NODE_NAME_PROP, replica.getName());
             updatedNodes.add(replica.getStr(ZkStateReader.CORE_NAME_PROP));
             overseerJobQueue.offer(ZkStateReader.toJSON(m));
@@ -735,7 +735,8 @@
     final CloudDescriptor cloudDesc = desc.getCloudDescriptor();
     final String collection = cloudDesc.getCollectionName();
 
-    final String coreZkNodeName = getCoreNodeName(desc);
+    final String coreZkNodeName = desc.getCloudDescriptor().getCoreNodeName();
+    assert coreZkNodeName != null : "we should have a coreNodeName by now";
     
     String shardId = cloudDesc.getShardId();
 
@@ -923,16 +924,16 @@
     props.put(ZkStateReader.CORE_NAME_PROP, cd.getName());
     props.put(ZkStateReader.NODE_NAME_PROP, getNodeName());
     
-    final String coreZkNodeName = getCoreNodeName(cd);
+    final String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
     ZkNodeProps ourProps = new ZkNodeProps(props);
     String collection = cd.getCloudDescriptor()
         .getCollectionName();
     
     ElectionContext context = new ShardLeaderElectionContext(leaderElector, shardId,
-        collection, coreZkNodeName, ourProps, this, cc);
+        collection, coreNodeName, ourProps, this, cc);
 
     leaderElector.setup(context);
-    electionContexts.put(coreZkNodeName, context);
+    electionContexts.put(coreNodeName, context);
     leaderElector.joinElection(context, false);
   }
 
@@ -1017,7 +1018,7 @@
 
     final CloudDescriptor cloudDesc = desc.getCloudDescriptor();
     
-    final String shardId = state.getShardId(coreNodeName);
+    final String shardId = state.getShardId(getBaseUrl(), desc.getName());
 
     if (shardId != null) {
       cloudDesc.setShardId(shardId);
@@ -1028,16 +1029,21 @@
 
   public void unregister(String coreName, CoreDescriptor cd)
       throws InterruptedException, KeeperException {
-    final String zkNodeName = getCoreNodeName(cd);
-    ElectionContext context = electionContexts.remove(zkNodeName);
+    final String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
+    ElectionContext context = electionContexts.remove(coreNodeName);
+    
+    assert context != null : coreNodeName;
+    
     if (context != null) {
       context.cancelElection();
     }
+    CloudDescriptor cloudDescriptor = cd.getCloudDescriptor();
     
     ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION,
-        "deletecore", ZkStateReader.CORE_NAME_PROP, coreName,
+        Overseer.DELETECORE, ZkStateReader.CORE_NAME_PROP, coreName,
         ZkStateReader.NODE_NAME_PROP, getNodeName(),
-        ZkStateReader.COLLECTION_PROP, cd.getCloudDescriptor().getCollectionName());
+        ZkStateReader.COLLECTION_PROP, cloudDescriptor.getCollectionName(),
+        ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
     overseerJobQueue.offer(ZkStateReader.toJSON(m));
   }
   
@@ -1206,13 +1212,60 @@
     return zkStateReader;
   }
 
-  private String doGetShardIdProcess(String coreName, CoreDescriptor descriptor) {
-    final String coreNodeName = getCoreNodeName(descriptor);
+  private void doGetShardIdAndNodeNameProcess(CoreDescriptor cd) {
+    final String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
+
+    if (coreNodeName != null) {
+      waitForShardId(cd);
+    } else {
+      // if no explicit coreNodeName, we want to match by base url and core name
+      waitForCoreNodeName(cd);
+      waitForShardId(cd);
+    }
+  }
+
+  private void waitForCoreNodeName(CoreDescriptor descriptor) {
+    int retryCount = 320;
+    log.info("look for our core node name");
+    while (retryCount-- > 0) {
+      Map<String,Slice> slicesMap = zkStateReader.getClusterState()
+          .getSlicesMap(descriptor.getCloudDescriptor().getCollectionName());
+      if (slicesMap != null) {
+        
+        for (Slice slice : slicesMap.values()) {
+          for (Replica replica : slice.getReplicas()) {
+            // TODO: for really large clusters, we could 'index' on this
+            
+            String baseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+            String core = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+            
+            String msgBaseUrl = getBaseUrl();
+            String msgCore = descriptor.getName();
+
+            if (baseUrl.equals(msgBaseUrl) && core.equals(msgCore)) {
+              descriptor.getCloudDescriptor()
+                  .setCoreNodeName(replica.getName());
+              return;
+            }
+          }
+        }
+      }
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+      }
+    }
+  }
+
+  private void waitForShardId(CoreDescriptor cd) {
+    log.info("waiting to find shard id in clusterstate for " + cd.getName());
     int retryCount = 320;
     while (retryCount-- > 0) {
-      final String shardId = zkStateReader.getClusterState().getShardId(coreNodeName);
+      final String shardId = zkStateReader.getClusterState().getShardId(getBaseUrl(), cd.getName());
       if (shardId != null) {
-        return shardId;
+        cd.getCloudDescriptor().setShardId(shardId);
+        return;
       }
       try {
         Thread.sleep(1000);
@@ -1222,7 +1275,7 @@
     }
     
     throw new SolrException(ErrorCode.SERVER_ERROR,
-        "Could not get shard_id for core: " + coreName + " coreNodeName:" + coreNodeName);
+        "Could not get shard id for core: " + cd.getName());
   }
   
   public static void uploadToZK(SolrZkClient zkClient, File dir, String zkPath) throws IOException, KeeperException, InterruptedException {
@@ -1261,7 +1314,7 @@
   
   public String getCoreNodeName(CoreDescriptor descriptor){
     String coreNodeName = descriptor.getCloudDescriptor().getCoreNodeName();
-    if (coreNodeName == null) {
+    if (coreNodeName == null && !genericCoreNodeNames) {
       // it's the default
       return getNodeName() + "_" + descriptor.getName();
     }
@@ -1277,31 +1330,33 @@
     downloadFromZK(zkClient, ZkController.CONFIGS_ZKNODE + "/" + configName, dir);
   }
 
-  public void preRegister(CoreDescriptor cd) throws KeeperException, InterruptedException {
-
-
-    // before becoming available, make sure we are not live and active
-    // this also gets us our assigned shard id if it was not specified
-    publish(cd, ZkStateReader.DOWN, false);
-    // shardState and shardRange are for one-time use only, thereafter the actual values in the Slice should be used
-    if (Slice.CONSTRUCTION.equals(cd.getCloudDescriptor().getShardState())) {
-      cd.getCloudDescriptor().setShardState(null);
-      cd.getCloudDescriptor().setShardRange(null);
-    }
-    String coreNodeName = getCoreNodeName(cd);
+  public void preRegister(CoreDescriptor cd ) {
     
+    String coreNodeName = getCoreNodeName(cd);
+
     // make sure the node name is set on the descriptor
     if (cd.getCloudDescriptor().getCoreNodeName() == null) {
       cd.getCloudDescriptor().setCoreNodeName(coreNodeName);
     }
+
+    // before becoming available, make sure we are not live and active
+    // this also gets us our assigned shard id if it was not specified
+    try {
+      publish(cd, ZkStateReader.DOWN, false);
+    } catch (KeeperException e) {
+      log.error("", e);
+      throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      log.error("", e);
+      throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
+    }
     
     if (cd.getCloudDescriptor().getShardId() == null && needsToBeAssignedShardId(cd, zkStateReader.getClusterState(), coreNodeName)) {
-      String shardId;
-      shardId = doGetShardIdProcess(cd.getName(), cd);
-      cd.getCloudDescriptor().setShardId(shardId);
+      doGetShardIdAndNodeNameProcess(cd);
     } else {
       // still wait till we see us in local state
-      doGetShardIdProcess(cd.getName(), cd);
+      doGetShardIdAndNodeNameProcess(cd);
     }
 
   }
@@ -1336,7 +1391,7 @@
         }
       }
     }
-    
+
     String leaderBaseUrl = leaderProps.getBaseUrl();
     String leaderCoreName = leaderProps.getCoreName();
     
@@ -1498,11 +1553,11 @@
   }
   
   /**
-   * utilitiy method fro trimming and leading and/or trailing slashes from 
+   * Utility method for trimming and leading and/or trailing slashes from 
    * it's input.  May return the empty string.  May return null if and only 
    * if the input is null.
    */
-  private static String trimLeadingAndTrailingSlashes(final String in) {
+  public static String trimLeadingAndTrailingSlashes(final String in) {
     if (null == in) return in;
     
     String out = in;
diff --git a/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
index 594d8f9..60c4131 100644
--- a/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
@@ -32,6 +32,7 @@
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext.Context;
+import org.apache.lucene.store.NRTCachingDirectory;
 import org.apache.lucene.store.NativeFSLockFactory;
 import org.apache.lucene.store.NoLockFactory;
 import org.apache.lucene.store.RateLimitedDirectoryWrapper;
@@ -40,6 +41,9 @@
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.store.blockcache.BlockDirectory;
+import org.apache.solr.store.hdfs.HdfsDirectory;
+import org.apache.solr.store.hdfs.HdfsLockFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -498,6 +502,24 @@
     } else if ("single".equals(lockType)) {
       if (!(dir.getLockFactory() instanceof SingleInstanceLockFactory)) dir
           .setLockFactory(new SingleInstanceLockFactory());
+    } else if ("hdfs".equals(lockType)) {
+      Directory del = dir;
+      
+      if (dir instanceof NRTCachingDirectory) {
+        del = ((NRTCachingDirectory) del).getDelegate();
+      }
+      
+      if (del instanceof BlockDirectory) {
+        del = ((BlockDirectory) del).getDirectory();
+      }
+      
+      if (!(del instanceof HdfsDirectory)) {
+        throw new SolrException(ErrorCode.FORBIDDEN, "Directory: "
+            + del.getClass().getName()
+            + ", but hdfs lock factory can only be used with HdfsDirectory");
+      }
+
+      dir.setLockFactory(new HdfsLockFactory(((HdfsDirectory)del).getHdfsDirPath(), ((HdfsDirectory)del).getConfiguration()));
     } else if ("none".equals(lockType)) {
       // Recipe for disaster
       log.error("CONFIGURATION WARNING: locks are disabled on " + dir);
@@ -519,7 +541,7 @@
     return path;
   }
   
-  private String stripTrailingSlash(String path) {
+  protected String stripTrailingSlash(String path) {
     if (path.endsWith("/")) {
       path = path.substring(0, path.length() - 1);
     }
diff --git a/solr/core/src/java/org/apache/solr/core/Config.java b/solr/core/src/java/org/apache/solr/core/Config.java
index 41fa3c2..af16b66 100644
--- a/solr/core/src/java/org/apache/solr/core/Config.java
+++ b/solr/core/src/java/org/apache/solr/core/Config.java
@@ -68,6 +68,7 @@
   static final XPathFactory xpathFactory = XPathFactory.newInstance();
 
   private final Document doc;
+  private final Document origDoc; // with unsubstituted properties
   private final String prefix;
   private final String name;
   private final SolrResourceLoader loader;
@@ -131,6 +132,7 @@
       db.setErrorHandler(xmllog);
       try {
         doc = db.parse(is);
+        origDoc = copyDoc(doc);
       } finally {
         // some XML parsers are broken and don't close the byte stream (but they should according to spec)
         IOUtils.closeQuietly(is.getByteStream());
@@ -140,19 +142,24 @@
       }
     } catch (ParserConfigurationException e)  {
       SolrException.log(log, "Exception during parsing file: " + name, e);
-      throw e;
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
     } catch (SAXException e)  {
       SolrException.log(log, "Exception during parsing file: " + name, e);
-      throw e;
-    } catch( SolrException e ){
-      SolrException.log(log,"Error in "+name,e);
-      throw e;
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
+    } catch (TransformerException e) {
+      SolrException.log(log, "Exception during parsing file: " + name, e);
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
     }
   }
   
   public Config(SolrResourceLoader loader, String name, Document doc) {
     this.prefix = null;
     this.doc = doc;
+    try {
+      this.origDoc = copyDoc(doc);
+    } catch (TransformerException e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
+    }
     this.name = name;
     this.loader = loader;
   }
@@ -441,4 +448,9 @@
     
     return version;
   }
+
+  public Config getOriginalConfig() {
+    return new Config(loader, null, origDoc);
+  }
+
 }
diff --git a/solr/core/src/java/org/apache/solr/core/ConfigSolr.java b/solr/core/src/java/org/apache/solr/core/ConfigSolr.java
index 63da0ef..edea722 100644
--- a/solr/core/src/java/org/apache/solr/core/ConfigSolr.java
+++ b/solr/core/src/java/org/apache/solr/core/ConfigSolr.java
@@ -17,15 +17,8 @@
  * limitations under the License.
  */
 
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-import javax.xml.xpath.XPath;
-import javax.xml.xpath.XPathConstants;
-import javax.xml.xpath.XPathExpressionException;
-
+import com.google.common.base.Charsets;
+import org.apache.commons.io.IOUtils;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.util.DOMUtil;
 import org.apache.solr.util.PropertiesUtil;
@@ -33,13 +26,77 @@
 import org.slf4j.LoggerFactory;
 import org.w3c.dom.Node;
 import org.w3c.dom.NodeList;
+import org.xml.sax.InputSource;
+
+import javax.xml.xpath.XPath;
+import javax.xml.xpath.XPathConstants;
+import javax.xml.xpath.XPathExpressionException;
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.InputStream;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
 
 
 public abstract class ConfigSolr {
   protected static Logger log = LoggerFactory.getLogger(ConfigSolr.class);
   
   public final static String SOLR_XML_FILE = "solr.xml";
-  
+
+  public static ConfigSolr fromFile(File configFile) {
+    log.info("Loading container configuration from {}", configFile.getAbsolutePath());
+
+    String solrHome = configFile.getParent();
+    SolrResourceLoader loader = new SolrResourceLoader(solrHome);
+    InputStream inputStream = null;
+
+    try {
+      if (!configFile.exists()) {
+        log.info("{} does not exist, using default configuration", configFile.getAbsolutePath());
+        inputStream = new ByteArrayInputStream(ConfigSolrXmlOld.DEF_SOLR_XML.getBytes(Charsets.UTF_8));
+      }
+      else {
+        inputStream = new FileInputStream(configFile);
+      }
+      return fromInputStream(loader, inputStream);
+    }
+    catch (Exception e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+          "Could not load SOLR configuration", e);
+    }
+    finally {
+      IOUtils.closeQuietly(inputStream);
+    }
+  }
+
+  public static ConfigSolr fromString(SolrResourceLoader loader, String xml) {
+    return fromInputStream(loader, new ByteArrayInputStream(xml.getBytes(Charsets.UTF_8)));
+  }
+
+  public static ConfigSolr fromInputStream(SolrResourceLoader loader, InputStream is) {
+    try {
+      Config config = new Config(loader, null, new InputSource(is), null, false);
+      //config.substituteProperties();
+      return fromConfig(config);
+    }
+    catch (Exception e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
+    }
+  }
+
+  public static ConfigSolr fromSolrHome(String solrHome) {
+    return fromFile(new File(solrHome, SOLR_XML_FILE));
+  }
+
+  public static ConfigSolr fromConfig(Config config) {
+    boolean oldStyle = (config.getNode("solr/cores", false) != null);
+    return oldStyle ? new ConfigSolrXmlOld(config)
+                    : new ConfigSolrXml(config, null);
+  }
+
   // Ugly for now, but we'll at least be able to centralize all of the differences between 4x and 5x.
   public static enum CfgProp {
     SOLR_ADMINHANDLER,
@@ -63,6 +120,7 @@
     SOLR_SHARDHANDLERFACTORY_SOCKETTIMEOUT,
     SOLR_SHARESCHEMA,
     SOLR_TRANSIENTCACHESIZE,
+    SOLR_GENERICCORENODENAMES,
     SOLR_ZKCLIENTTIMEOUT,
     SOLR_ZKHOST,
 
@@ -78,6 +136,11 @@
   public ConfigSolr(Config config) {
     this.config = config;
   }
+
+  // for extension & testing.
+  protected ConfigSolr() {
+
+  }
   
   public Config getConfig() {
     return config;
@@ -124,7 +187,8 @@
     Properties properties = new Properties();
     for (int i = 0; i < props.getLength(); i++) {
       Node prop = props.item(i);
-      properties.setProperty(DOMUtil.getAttr(prop, "name"), DOMUtil.getAttr(prop, "value"));
+      properties.setProperty(DOMUtil.getAttr(prop, "name"),
+          PropertiesUtil.substituteProperty(DOMUtil.getAttr(prop, "value"), null));
     }
     return properties;
   }
diff --git a/solr/core/src/java/org/apache/solr/core/ConfigSolrXml.java b/solr/core/src/java/org/apache/solr/core/ConfigSolrXml.java
index 0c274aa..8199f75 100644
--- a/solr/core/src/java/org/apache/solr/core/ConfigSolrXml.java
+++ b/solr/core/src/java/org/apache/solr/core/ConfigSolrXml.java
@@ -17,6 +17,12 @@
  * limitations under the License.
  */
 
+import org.apache.commons.io.IOUtils;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.util.PropertiesUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
@@ -27,15 +33,6 @@
 import java.util.Map;
 import java.util.Properties;
 
-import javax.xml.parsers.ParserConfigurationException;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.util.PropertiesUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.xml.sax.SAXException;
-
 
 /**
  *
@@ -46,22 +43,25 @@
   private SolrCoreDiscoverer solrCoreDiscoverer = new SolrCoreDiscoverer();
   private final Map<String, CoreDescriptor> coreDescriptorMap;
 
-  public ConfigSolrXml(Config config, CoreContainer container)
-      throws ParserConfigurationException, IOException, SAXException {
+  public ConfigSolrXml(Config config, CoreContainer container) {
     super(config);
-    checkForIllegalConfig();
-    
-    fillPropMap();
-    
-    String coreRoot = get(CfgProp.SOLR_COREROOTDIRECTORY, (container == null ? config.getResourceLoader().getInstanceDir() : container.getSolrHome()));
-    coreDescriptorMap = solrCoreDiscoverer.discover(container, new File(coreRoot));
+    try {
+      checkForIllegalConfig();
+      fillPropMap();
+      config.substituteProperties();
+      String coreRoot = get(CfgProp.SOLR_COREROOTDIRECTORY, (container == null ? config.getResourceLoader().getInstanceDir() : container.getSolrHome()));
+      coreDescriptorMap = solrCoreDiscoverer.discover(container, new File(coreRoot));
+    }
+    catch (IOException e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
+    }
   }
   
   private void checkForIllegalConfig() throws IOException {
     
     // Do sanity checks - we don't want to find old style config
     failIfFound("solr/@coreLoadThreads");
-    failIfFound("solr/@persist");
+    failIfFound("solr/@persistent");
     failIfFound("solr/@sharedLib");
     failIfFound("solr/@zkHost");
     
@@ -77,6 +77,7 @@
     failIfFound("solr/cores/@hostContext");
     failIfFound("solr/cores/@hostPort");
     failIfFound("solr/cores/@leaderVoteWait");
+    failIfFound("solr/cores/@genericCoreNodeNames");
     failIfFound("solr/cores/@managementPath");
     failIfFound("solr/cores/@shareSchema");
     failIfFound("solr/cores/@transientCacheSize");
@@ -118,6 +119,7 @@
     propMap.put(CfgProp.SOLR_HOSTCONTEXT, doSub("solr/solrcloud/str[@name='hostContext']"));
     propMap.put(CfgProp.SOLR_HOSTPORT, doSub("solr/solrcloud/int[@name='hostPort']"));
     propMap.put(CfgProp.SOLR_LEADERVOTEWAIT, doSub("solr/solrcloud/int[@name='leaderVoteWait']"));
+    propMap.put(CfgProp.SOLR_GENERICCORENODENAMES, doSub("solr/solrcloud/bool[@name='genericCoreNodeNames']"));
     propMap.put(CfgProp.SOLR_MANAGEMENTPATH, doSub("solr/str[@name='managementPath']"));
     propMap.put(CfgProp.SOLR_SHAREDLIB, doSub("solr/str[@name='sharedLib']"));
     propMap.put(CfgProp.SOLR_SHARESCHEMA, doSub("solr/str[@name='shareSchema']"));
diff --git a/solr/core/src/java/org/apache/solr/core/ConfigSolrXmlOld.java b/solr/core/src/java/org/apache/solr/core/ConfigSolrXmlOld.java
index 6d9a08d..40c1a87 100644
--- a/solr/core/src/java/org/apache/solr/core/ConfigSolrXmlOld.java
+++ b/solr/core/src/java/org/apache/solr/core/ConfigSolrXmlOld.java
@@ -17,6 +17,17 @@
  * limitations under the License.
  */
 
+import org.apache.solr.common.SolrException;
+import org.apache.solr.util.DOMUtil;
+import org.apache.solr.util.PropertiesUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.NamedNodeMap;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+import javax.xml.xpath.XPathConstants;
+import javax.xml.xpath.XPathExpressionException;
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -28,20 +39,6 @@
 import java.util.Properties;
 import java.util.Set;
 
-import javax.xml.parsers.ParserConfigurationException;
-import javax.xml.xpath.XPathConstants;
-import javax.xml.xpath.XPathExpressionException;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.util.DOMUtil;
-import org.apache.solr.util.PropertiesUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.NamedNodeMap;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.xml.sax.SAXException;
-
 
 /**
  *
@@ -51,17 +48,20 @@
 
   private NodeList coreNodes = null;
 
-  public ConfigSolrXmlOld(Config config, CoreContainer container)
-      throws ParserConfigurationException, IOException, SAXException {
-
+  public ConfigSolrXmlOld(Config config) {
     super(config);
-    checkForIllegalConfig(container);
-    
-    fillPropMap();
-    initCoreList(container);
+    try {
+      checkForIllegalConfig();
+      fillPropMap();
+      config.substituteProperties();
+      initCoreList();
+    }
+    catch (IOException e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
+    }
   }
   
-  private void checkForIllegalConfig(CoreContainer container) throws IOException {
+  private void checkForIllegalConfig() throws IOException {
     // Do sanity checks - we don't want to find new style
     // config
     failIfFound("solr/str[@name='adminHandler']");
@@ -73,6 +73,7 @@
     failIfFound("solr/solrcloud/str[@name='hostContext']");
     failIfFound("solr/solrcloud/int[@name='hostPort']");
     failIfFound("solr/solrcloud/int[@name='leaderVoteWait']");
+    failIfFound("solr/solrcloud/int[@name='genericCoreNodeNames']");
     failIfFound("solr/str[@name='managementPath']");
     failIfFound("solr/str[@name='sharedLib']");
     failIfFound("solr/str[@name='shareSchema']");
@@ -125,6 +126,8 @@
         config.getVal("solr/cores/@hostPort", false));
     propMap.put(CfgProp.SOLR_LEADERVOTEWAIT,
         config.getVal("solr/cores/@leaderVoteWait", false));
+    propMap.put(CfgProp.SOLR_GENERICCORENODENAMES,
+        config.getVal("solr/cores/@genericCoreNodeNames", false));
     propMap.put(CfgProp.SOLR_MANAGEMENTPATH,
         config.getVal("solr/cores/@managementPath", false));
     propMap.put(CfgProp.SOLR_SHARESCHEMA,
@@ -138,9 +141,9 @@
     propMap.put(CfgProp.SOLR_SHARDHANDLERFACTORY_NAME,
         config.getVal("solr/shardHandlerFactory/@name", false));
     propMap.put(CfgProp.SOLR_SHARDHANDLERFACTORY_CONNTIMEOUT,
-        config.getVal("solr/shardHandlerFactory/int[@connTimeout]", false));
+        config.getVal("solr/shardHandlerFactory/int[@name='connTimeout']", false));
     propMap.put(CfgProp.SOLR_SHARDHANDLERFACTORY_SOCKETTIMEOUT,
-        config.getVal("solr/shardHandlerFactory/int[@socketTimeout]", false));
+        config.getVal("solr/shardHandlerFactory/int[@name='socketTimeout']", false));
     
     // These have no counterpart in 5.0, asking, for any of these in Solr 5.0
     // will result in an error being
@@ -154,7 +157,7 @@
     
   }
 
-  private void initCoreList(CoreContainer container) throws IOException {
+  private void initCoreList() throws IOException {
     
     coreNodes = (NodeList) config.evaluate("solr/cores/core",
         XPathConstants.NODESET);
@@ -219,7 +222,7 @@
           NamedNodeMap attributes = node.getAttributes();
           for (int i = 0; i < attributes.getLength(); i++) {
             Node attribute = attributes.item(i);
-            String val = attribute.getNodeValue();
+            String val = PropertiesUtil.substituteProperty(attribute.getNodeValue(), null);
             if (CoreDescriptor.CORE_DATADIR.equals(attribute.getNodeName()) ||
                 CoreDescriptor.CORE_INSTDIR.equals(attribute.getNodeName())) {
               if (val.indexOf('$') == -1) {
@@ -257,7 +260,8 @@
         Node node = coreNodes.item(idx);
         if (coreName.equals(DOMUtil.getAttr(node, CoreDescriptor.CORE_NAME,
             null))) {
-          return DOMUtil.getAttr(node, property, defaultVal);
+          String propVal = DOMUtil.getAttr(node, property, defaultVal);
+          return PropertiesUtil.substituteProperty(propVal, null);
         }
       }
     }
@@ -286,16 +290,16 @@
   }
 
   public static final String DEF_SOLR_XML = "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"
-      + "<solr persistent=\"false\">\n"
-      + "  <cores adminPath=\"/admin/cores\" defaultCoreName=\""
-      + CoreContainer.DEFAULT_DEFAULT_CORE_NAME
-      + "\""
-      + " host=\"${host:}\" hostPort=\"${hostPort:}\" hostContext=\"${hostContext:}\" zkClientTimeout=\"${zkClientTimeout:15000}\""
-      + ">\n"
-      + "    <core name=\""
-      + CoreContainer.DEFAULT_DEFAULT_CORE_NAME
-      + "\" shard=\"${shard:}\" collection=\"${collection:}\" instanceDir=\"collection1\" />\n"
-      + "  </cores>\n" + "</solr>";
+        + "<solr persistent=\"false\">\n"
+        + "  <cores adminPath=\"/admin/cores\" defaultCoreName=\""
+        + CoreContainer.DEFAULT_DEFAULT_CORE_NAME
+        + "\""
+        + " host=\"${host:}\" hostPort=\"${hostPort:}\" hostContext=\"${hostContext:}\" zkClientTimeout=\"${zkClientTimeout:15000}\""
+        + ">\n"
+        + "    <core name=\""
+        + CoreContainer.DEFAULT_DEFAULT_CORE_NAME
+        + "\" shard=\"${shard:}\" collection=\"${collection:}\" instanceDir=\"collection1\" />\n"
+        + "  </cores>\n" + "</solr>";
 
   @Override
   public void substituteProperties() {
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index 1a96d21..6c2fc55 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -17,7 +17,6 @@
 
 package org.apache.solr.core;
 
-import org.apache.commons.io.IOUtils;
 import org.apache.solr.cloud.ZkController;
 import org.apache.solr.cloud.ZkSolrResourceLoader;
 import org.apache.solr.common.SolrException;
@@ -39,21 +38,10 @@
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.w3c.dom.Document;
 import org.w3c.dom.Node;
-import org.xml.sax.InputSource;
 
-import javax.xml.transform.Transformer;
-import javax.xml.transform.TransformerException;
-import javax.xml.transform.TransformerFactory;
-import javax.xml.transform.dom.DOMResult;
-import javax.xml.transform.dom.DOMSource;
 import javax.xml.xpath.XPathExpressionException;
-import java.io.ByteArrayInputStream;
 import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.InputStream;
 import java.text.SimpleDateFormat;
 import java.util.Collection;
 import java.util.Collections;
@@ -76,6 +64,8 @@
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
+import static com.google.common.base.Preconditions.checkNotNull;
+
 
 /**
  *
@@ -104,16 +94,16 @@
 
   protected CoreAdminHandler coreAdminHandler = null;
   protected CollectionsHandler collectionsHandler = null;
-  protected File configFile = null;
   protected String libDir = null;
-  protected SolrResourceLoader loader = null;
+
   protected Properties containerProperties;
   protected Map<String ,IndexSchema> indexSchemaCache;
   protected String adminHandler;
   protected boolean shareSchema;
   protected Integer zkClientTimeout;
-  protected String solrHome;
   protected String defaultCoreName = null;
+  protected int distribUpdateConnTimeout = 0;
+  protected int distribUpdateSoTimeout = 0;
 
   protected ZkContainer zkSys = new ZkContainer();
 
@@ -124,151 +114,90 @@
 
   private int coreLoadThreads;
   private CloserThread backgroundCloser = null;
-  protected volatile ConfigSolr cfg;
-  private Config origCfg;
+
+  protected final ConfigSolr cfg;
+  protected final SolrResourceLoader loader;
+  protected final String solrHome;
   
   {
     log.info("New CoreContainer " + System.identityHashCode(this));
   }
 
   /**
-   * Deprecated
-   * @deprecated use the single arg constructor with locateSolrHome()
-   * @see SolrResourceLoader#locateSolrHome
+   * Create a new CoreContainer using system properties to detect the solr home
+   * directory.  The container's cores are not loaded.
+   * @see #load()
    */
-  @Deprecated
   public CoreContainer() {
     this(SolrResourceLoader.locateSolrHome());
   }
 
   /**
-   * Initalize CoreContainer directly from the constructor
+   * Create a new CoreContainer using the given SolrResourceLoader.  The container's
+   * cores are not loaded.
+   * @param loader the SolrResourceLoader
+   * @see #load()
    */
-  public CoreContainer(String dir, File configFile) throws FileNotFoundException {
-    this(dir);
-    this.load(dir, configFile);
+  public CoreContainer(SolrResourceLoader loader) {
+    this(loader, ConfigSolr.fromSolrHome(loader.getInstanceDir()));
   }
 
   /**
-   * Minimal CoreContainer constructor.
-   * @param loader the CoreContainer resource loader
+   * Create a new CoreContainer using the given solr home directory.  The container's
+   * cores are not loaded.
+   * @param solrHome a String containing the path to the solr home directory
+   * @see #load()
    */
-  public CoreContainer(SolrResourceLoader loader) {
-    this(loader.getInstanceDir());
-    this.loader = loader;
+  public CoreContainer(String solrHome) {
+    this(new SolrResourceLoader(solrHome), ConfigSolr.fromSolrHome(solrHome));
   }
 
-  public CoreContainer(String solrHome) {
-    this.solrHome = solrHome;
+  /**
+   * Create a new CoreContainer using the given SolrResourceLoader and
+   * configuration.  The container's cores are not loaded.
+   * @param loader the SolrResourceLoader
+   * @param config a ConfigSolr representation of this container's configuration
+   * @see #load()
+   */
+  public CoreContainer(SolrResourceLoader loader, ConfigSolr config) {
+    this.loader = checkNotNull(loader);
+    this.solrHome = loader.getInstanceDir();
+    this.cfg = checkNotNull(config);
+  }
+
+  /**
+   * Create a new CoreContainer and load its cores
+   * @param solrHome the solr home directory
+   * @param configFile the file containing this container's configuration
+   * @return a loaded CoreContainer
+   */
+  public static CoreContainer createAndLoad(String solrHome, File configFile) {
+    CoreContainer cc = new CoreContainer(new SolrResourceLoader(solrHome), ConfigSolr.fromFile(configFile));
+    cc.load();
+    return cc;
   }
   
   public Properties getContainerProperties() {
     return containerProperties;
   }
 
-  // Helper class to initialize the CoreContainer
-  public static class Initializer {
-
-    // core container instantiation
-    public CoreContainer initialize() throws FileNotFoundException {
-      CoreContainer cores = null;
-      String solrHome = SolrResourceLoader.locateSolrHome();
-      // ContainerConfigFilename could  could be a properties file
-      File fconf = new File(solrHome, "solr.xml");
-
-      log.info("looking for solr config file: " + fconf.getAbsolutePath());
-      cores = new CoreContainer(solrHome);
-
-      // first we find zkhost, then we check for solr.xml in zk
-      // 1. look for zkhost from sys prop 2. look for zkhost in {solr.home}/solr.properties
-      
-      // Either we have a config file or not.
-      
-      if (fconf.exists()) {
-        cores.load(solrHome, fconf);
-      } else {
-        // Back compart support
-        log.info("no solr.xml found. using default old-style solr.xml");
-        try {
-          cores.load(solrHome, new ByteArrayInputStream(ConfigSolrXmlOld.DEF_SOLR_XML.getBytes("UTF-8")), null);
-        } catch (Exception e) {
-          throw new SolrException(ErrorCode.SERVER_ERROR,
-              "CoreContainer.Initialize failed when trying to load default solr.xml file", e);
-        }
-        cores.configFile = fconf;
-      }
-      
-      return cores;
-    }
-  }
-
-
   //-------------------------------------------------------------------
   // Initialization / Cleanup
   //-------------------------------------------------------------------
-  
-  /**
-   * Load a config file listing the available solr cores.
-   * @param dir the home directory of all resources.
-   * @param configFile the configuration file
-   */
-  public void load(String dir, File configFile) throws FileNotFoundException {
-    this.configFile = configFile;
-    InputStream in = new FileInputStream(configFile);
-    try {
-      this.load(dir, in,  configFile.getName());
-    } finally {
-      IOUtils.closeQuietly(in);
-    }
-  } 
 
   /**
-   * Load a config file listing the available solr cores.
-   * 
-   * @param dir the home directory of all resources.
-   * @param is the configuration file InputStream. May be a properties file or an xml file
+   * Load the cores defined for this CoreContainer
    */
+  public void load()  {
 
-  // Let's keep this ugly boolean out of public circulation.
-  protected void load(String dir, InputStream is, String fileName)  {
+    log.info("Loading cores into CoreContainer [instanceDir={}]", loader.getInstanceDir());
+
     ThreadPoolExecutor coreLoadExecutor = null;
-    if (null == dir) {
-      // don't rely on SolrResourceLoader(), determine explicitly first
-      dir = SolrResourceLoader.locateSolrHome();
-    }
-    log.info("Loading CoreContainer using Solr Home: '{}'", dir);
-    
-    this.loader = new SolrResourceLoader(dir);
-    solrHome = loader.getInstanceDir();
-
-    try {
-      Config config = new Config(loader, null, new InputSource(is), null, false);
-
-      // old style defines cores in solr.xml, new style disovers them by 
-      // directory structure
-      boolean oldStyle = (config.getNode("solr/cores", false) != null);
-      
-      if (oldStyle) {
-        // ConfigSolr handles keep orig values around for non solrcore level items,
-        // but this is still how original core lvl attributes are kept around
-        this.origCfg = new Config(loader, null, copyDoc(config.getDocument()));
-        
-        this.cfg = new ConfigSolrXmlOld(config, this);
-      } else {
-        this.cfg = new ConfigSolrXml(config, this);
-
-      }
-    } catch (Exception e) {
-      throw new SolrException(ErrorCode.SERVER_ERROR, "", e);
-    }
-    // Since the cores var is now initialized to null, let's set it up right
-    // now.
-    cfg.substituteProperties();
 
     // add the sharedLib to the shared resource loader before initializing cfg based plugins
     libDir = cfg.get(ConfigSolr.CfgProp.SOLR_SHAREDLIB , null);
     if (libDir != null) {
-      File f = FileUtils.resolvePath(new File(dir), libDir);
+      File f = FileUtils.resolvePath(new File(solrHome), libDir);
       log.info("loading shared library: " + f.getAbsolutePath());
       loader.addToClassLoader(libDir, null, false);
       loader.reloadLuceneSPI();
@@ -298,8 +227,8 @@
     shareSchema = cfg.getBool(ConfigSolr.CfgProp.SOLR_SHARESCHEMA, DEFAULT_SHARE_SCHEMA);
     zkClientTimeout = cfg.getInt(ConfigSolr.CfgProp.SOLR_ZKCLIENTTIMEOUT, DEFAULT_ZK_CLIENT_TIMEOUT);
     
-    int distribUpdateConnTimeout = cfg.getInt(ConfigSolr.CfgProp.SOLR_DISTRIBUPDATECONNTIMEOUT, 0);
-    int distribUpdateSoTimeout = cfg.getInt(ConfigSolr.CfgProp.SOLR_DISTRIBUPDATESOTIMEOUT, 0);
+    distribUpdateConnTimeout = cfg.getInt(ConfigSolr.CfgProp.SOLR_DISTRIBUPDATECONNTIMEOUT, 0);
+    distribUpdateSoTimeout = cfg.getInt(ConfigSolr.CfgProp.SOLR_DISTRIBUPDATESOTIMEOUT, 0);
 
     // Note: initZooKeeper will apply hardcoded default if cloud mode
     String hostPort = cfg.get(ConfigSolr.CfgProp.SOLR_HOSTPORT, null);
@@ -315,13 +244,15 @@
 
     transientCacheSize = cfg.getInt(ConfigSolr.CfgProp.SOLR_TRANSIENTCACHESIZE, Integer.MAX_VALUE);
 
+    boolean genericCoreNodeNames = cfg.getBool(ConfigSolr.CfgProp.SOLR_GENERICCORENODENAMES, false);
+
     if (shareSchema) {
       indexSchemaCache = new ConcurrentHashMap<String,IndexSchema>();
     }
 
     zkClientTimeout = Integer.parseInt(System.getProperty("zkClientTimeout",
         Integer.toString(zkClientTimeout)));
-    zkSys.initZooKeeper(this, solrHome, zkHost, zkClientTimeout, hostPort, hostContext, host, leaderVoteWait, distribUpdateConnTimeout, distribUpdateSoTimeout);
+    zkSys.initZooKeeper(this, solrHome, zkHost, zkClientTimeout, hostPort, hostContext, host, leaderVoteWait, genericCoreNodeNames, distribUpdateConnTimeout, distribUpdateSoTimeout);
     
     if (isZooKeeperAware() && coreLoadThreads <= 1) {
       throw new SolrException(ErrorCode.SERVER_ERROR,
@@ -415,7 +346,13 @@
             p.setTransient(("true".equalsIgnoreCase(opt) || "on"
                 .equalsIgnoreCase(opt)) ? true : false);
           }
-          
+
+          if (p.isTransient() || ! p.isLoadOnStartup()) {
+            // Store it away for later use. includes non-transient but not
+            // loaded at startup cores.
+            solrCores.putDynamicDescriptor(rawName, p);
+          }
+
           if (p.isLoadOnStartup()) { // The normal case
 
             Callable<SolrCore> task = new Callable<SolrCore>() {
@@ -423,6 +360,9 @@
               public SolrCore call() {
                 SolrCore c = null;
                 try {
+                  if (zkSys.getZkController() != null) {
+                    preRegisterInZk(p);
+                  }
                   c = create(p);
                   registerCore(p.isTransient(), name, c, false);
                 } catch (Throwable t) {
@@ -446,10 +386,6 @@
             };
             pending.add(completionService.submit(task));
 
-          } else {
-            // Store it away for later use. includes non-transient but not
-            // loaded at startup cores.
-            solrCores.putDynamicDescriptor(rawName, p);
           }
         } catch (Throwable ex) {
           SolrException.log(log, null, ex);
@@ -629,22 +565,6 @@
         name.indexOf( '\\' ) >= 0 ){
       throw new RuntimeException( "Invalid core name: "+name );
     }
-
-    if (zkSys.getZkController() != null) {
-      // this happens before we can receive requests
-      try {
-        zkSys.getZkController().preRegister(core.getCoreDescriptor());
-      } catch (KeeperException e) {
-        log.error("", e);
-        throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-            "", e);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        log.error("", e);
-        throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-            "", e);
-      }
-    }
     
     SolrCore old = null;
 
@@ -853,7 +773,7 @@
     try {
       name = checkDefault(name);
 
-      SolrCore core = solrCores.getCore(name);
+      SolrCore core = solrCores.getCoreFromAnyList(name, false);
       if (core == null)
         throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "No such core: " + name );
 
@@ -961,10 +881,9 @@
     name = checkDefault(name);
 
     // Do this in two phases since we don't want to lock access to the cores over a load.
-    SolrCore core = solrCores.getCoreFromAnyList(name);
+    SolrCore core = solrCores.getCoreFromAnyList(name, true);
 
     if (core != null) {
-      core.open();
       return core;
     }
 
@@ -991,6 +910,9 @@
                                  // the wait as a consequence of shutting down.
     try {
       if (core == null) {
+        if (zkSys.getZkController() != null) {
+          preRegisterInZk(desc);
+        }
         core = create(desc); // This should throw an error if it fails.
         core.open();
         registerCore(desc.isTransient(), name, core, false);
@@ -1082,7 +1004,7 @@
   }
   
   public File getConfigFile() {
-    return configFile;
+    return new File(solrHome, ConfigSolr.SOLR_XML_FILE);
   }
 
   /**
@@ -1096,7 +1018,7 @@
   /** Persists the cores config file in cores.xml. */
   @Deprecated
   public void persist() {
-    persistFile(configFile);
+    persistFile(getConfigFile());
   }
 
   /**
@@ -1117,45 +1039,82 @@
     // only the old solrxml persists
     if (cfg != null && !(cfg instanceof ConfigSolrXmlOld)) return;
 
-    log.info("Persisting cores config to " + (file == null ? configFile : file));
+    log.info("Persisting cores config to " + (file == null ? getConfigFile() : file));
 
-    
     // <solr attrib="value">
     Map<String,String> rootSolrAttribs = new HashMap<String,String>();
-    if (libDir != null) rootSolrAttribs.put("sharedLib", libDir);
-    rootSolrAttribs.put("persistent", Boolean.toString(isPersistent()));
-    
+
+    addAttrib(rootSolrAttribs, ConfigSolr.CfgProp.SOLR_SHAREDLIB, "sharedLib", this.libDir);
+    addAttrib(rootSolrAttribs, ConfigSolr.CfgProp.SOLR_PERSISTENT, "persistent",
+        Boolean.toString(isPersistent()), "false");
+    addAttrib(rootSolrAttribs, ConfigSolr.CfgProp.SOLR_CORELOADTHREADS, "coreLoadThreads",
+        Integer.toString(this.coreLoadThreads), Integer.toString(CORE_LOAD_THREADS));
+    addAttrib(rootSolrAttribs, ConfigSolr.CfgProp.SOLR_ZKHOST, "zkHost", this.zkHost);
+
     // <solr attrib="value"> <cores attrib="value">
     Map<String,String> coresAttribs = new HashMap<String,String>();
-    addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_ADMINPATH, "adminPath", this.adminPath, null);
-    addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_ADMINHANDLER, "adminHandler", this.adminHandler, null);
-    addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_SHARESCHEMA,"shareSchema",
+    addAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_ADMINPATH, "adminPath", this.adminPath, this.getAdminPath());
+    addAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_ADMINHANDLER, "adminHandler", this.adminHandler);
+    addAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_SHARESCHEMA, "shareSchema",
         Boolean.toString(this.shareSchema),
         Boolean.toString(DEFAULT_SHARE_SCHEMA));
-    addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_HOST, "host", zkSys.getHost(), null);
+    addAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_HOST, "host", zkSys.getHost());
 
     if (! (null == defaultCoreName || defaultCoreName.equals("")) ) {
       coresAttribs.put("defaultCoreName", defaultCoreName);
     }
 
-    addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_HOSTPORT, "hostPort",zkSys.getHostPort(), null);
-    addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_ZKCLIENTTIMEOUT, "zkClientTimeout",
+    addAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_HOSTPORT, "hostPort", zkSys.getHostPort());
+    addAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_ZKCLIENTTIMEOUT, "zkClientTimeout",
         intToString(this.zkClientTimeout),
         Integer.toString(DEFAULT_ZK_CLIENT_TIMEOUT));
-    addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_HOSTCONTEXT, "hostContext",
-        zkSys.getHostContext(), null);
-    addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_LEADERVOTEWAIT, "leaderVoteWait",
+    addAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_HOSTCONTEXT, "hostContext",
+        zkSys.getHostContext());
+    addAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_LEADERVOTEWAIT, "leaderVoteWait",
         zkSys.getLeaderVoteWait(), LEADER_VOTE_WAIT);
-    addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_CORELOADTHREADS, "coreLoadThreads",
-        Integer.toString(this.coreLoadThreads), Integer.toString(CORE_LOAD_THREADS));
+    addAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_GENERICCORENODENAMES, "genericCoreNodeNames",
+        Boolean.toString(zkSys.getGenericCoreNodeNames()), "false");
     if (transientCacheSize != Integer.MAX_VALUE) { // This test
     // is a consequence of testing. I really hate it.
-      addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_TRANSIENTCACHESIZE, "transientCacheSize",
+      addAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_TRANSIENTCACHESIZE, "transientCacheSize",
           Integer.toString(this.transientCacheSize), Integer.toString(Integer.MAX_VALUE));
     }
+    addAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_DISTRIBUPDATECONNTIMEOUT, "distribUpdateConnTimeout",
+        Integer.toString(this.distribUpdateConnTimeout), Integer.toString(this.distribUpdateConnTimeout));
+    addAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_DISTRIBUPDATESOTIMEOUT, "distribUpdateSoTimeout",
+        Integer.toString(this.distribUpdateSoTimeout), Integer.toString(this.distribUpdateSoTimeout));
+    addAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_MANAGEMENTPATH, "managementPath",
+        this.managementPath);
+
+    // don't forget the logging stuff
+    Map<String, String> loggingAttribs = new HashMap<String, String>();
+    addAttrib(loggingAttribs, ConfigSolr.CfgProp.SOLR_LOGGING_CLASS, "class",
+        cfg.get(ConfigSolr.CfgProp.SOLR_LOGGING_CLASS, null));
+    addAttrib(loggingAttribs, ConfigSolr.CfgProp.SOLR_LOGGING_ENABLED, "enabled",
+        cfg.get(ConfigSolr.CfgProp.SOLR_LOGGING_ENABLED, null));
+
+    Map<String, String> watcherAttribs = new HashMap<String, String>();
+    addAttrib(watcherAttribs, ConfigSolr.CfgProp.SOLR_LOGGING_WATCHER_SIZE, "size",
+        cfg.get(ConfigSolr.CfgProp.SOLR_LOGGING_WATCHER_SIZE, null));
+    addAttrib(watcherAttribs, ConfigSolr.CfgProp.SOLR_LOGGING_WATCHER_THRESHOLD, "threshold",
+        cfg.get(ConfigSolr.CfgProp.SOLR_LOGGING_WATCHER_THRESHOLD, null));
+
+
+    Map<String, String> shardHandlerAttrib = new HashMap<String, String>();
+    addAttrib(shardHandlerAttrib, ConfigSolr.CfgProp.SOLR_SHARDHANDLERFACTORY_CLASS, "class",
+        cfg.get(ConfigSolr.CfgProp.SOLR_SHARDHANDLERFACTORY_CLASS, null));
+    addAttrib(shardHandlerAttrib, ConfigSolr.CfgProp.SOLR_SHARDHANDLERFACTORY_NAME, "name",
+        cfg.get(ConfigSolr.CfgProp.SOLR_SHARDHANDLERFACTORY_NAME, null));
+
+    Map<String, String> shardHandlerProps = new HashMap<String, String>();
+    addAttrib(shardHandlerProps, ConfigSolr.CfgProp.SOLR_SHARDHANDLERFACTORY_CONNTIMEOUT, "connTimeout",
+        cfg.get(ConfigSolr.CfgProp.SOLR_SHARDHANDLERFACTORY_CONNTIMEOUT, null));
+    addAttrib(shardHandlerProps, ConfigSolr.CfgProp.SOLR_SHARDHANDLERFACTORY_SOCKETTIMEOUT, "socketTimeout",
+        cfg.get(ConfigSolr.CfgProp.SOLR_SHARDHANDLERFACTORY_SOCKETTIMEOUT, null));
 
     try {
-      solrCores.persistCores(origCfg, containerProperties, rootSolrAttribs, coresAttribs, file, configFile, loader);
+      solrCores.persistCores(cfg.config.getOriginalConfig(), containerProperties, rootSolrAttribs,coresAttribs,
+          loggingAttribs, watcherAttribs, shardHandlerAttrib, shardHandlerProps, file, loader);
     } catch (XPathExpressionException e) {
       throw new SolrException(ErrorCode.SERVER_ERROR, null, e);
     }
@@ -1166,30 +1125,39 @@
     return Integer.toString(integer);
   }
 
-  private void addCoresAttrib(Map<String,String> coresAttribs, ConfigSolr.CfgProp prop,
-                              String attribName, String attribValue, String defaultValue) {
+  private void addAttrib(Map<String, String> attribs, ConfigSolr.CfgProp prop,
+                         String attribName, String attribValue) {
+    addAttrib(attribs, prop, attribName, attribValue, null);
+  }
+
+    private void addAttrib(Map<String, String> attribs, ConfigSolr.CfgProp prop,
+                         String attribName, String attribValue, String defaultValue) {
     if (cfg == null) {
-      coresAttribs.put(attribName, attribValue);
+      attribs.put(attribName, attribValue);
       return;
     }
-    
+
     if (attribValue != null) {
       String origValue = cfg.getOrigProp(prop, null);
-      
+
       if (origValue == null && defaultValue != null && attribValue.equals(defaultValue)) return;
 
       if (attribValue.equals(PropertiesUtil.substituteProperty(origValue, loader.getCoreProperties()))) {
-        coresAttribs.put(attribName, origValue);
+        attribs.put(attribName, origValue);
       } else {
-        coresAttribs.put(attribName, attribValue);
+        attribs.put(attribName, attribValue);
       }
     }
   }
 
+  public void preRegisterInZk(final CoreDescriptor p) {
+    zkSys.getZkController().preRegister(p);
+  }
+
   public String getSolrHome() {
     return solrHome;
   }
-  
+
   public boolean isZooKeeperAware() {
     return zkSys.getZkController() != null;
   }
@@ -1221,14 +1189,7 @@
     return solrCores.getCoreToOrigName(core);
   }
   
-  private Document copyDoc(Document document) throws TransformerException {
-    TransformerFactory tfactory = TransformerFactory.newInstance();
-    Transformer tx   = tfactory.newTransformer();
-    DOMSource source = new DOMSource(document);
-    DOMResult result = new DOMResult();
-    tx.transform(source,result);
-    return (Document)result.getNode();
-  }
+
 }
 
 class CloserThread extends Thread {
diff --git a/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java b/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
index a44abe7..3e49071 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
@@ -65,6 +65,9 @@
   // them individually.
   private Properties coreProperties = new Properties();
 
+  //TODO: 5.0 remove this, this is solely a hack for persistence.
+  private Properties createdProperties = new Properties();
+
   private boolean loadedImplicit = false;
 
   private final CoreContainer coreContainer;
@@ -280,6 +283,14 @@
     }
   }
 
+  public void addCreatedProperty(String key, String value) {
+    createdProperties.put(key, value);
+  }
+
+  public final Properties getCreatedProperties() {
+    return createdProperties;
+  }
+
   public CloudDescriptor getCloudDescriptor() {
     return cloudDesc;
   }
diff --git a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
index a5b329c..2c1aa57 100644
--- a/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/DirectoryFactory.java
@@ -167,7 +167,6 @@
    */
   public abstract void release(Directory directory) throws IOException;
   
-  
   /**
    * Normalize a given path.
    * 
@@ -229,5 +228,21 @@
     }
     return isSuccess;
   }
-  
+
+  /**
+   * If your implementation can count on delete-on-last-close semantics
+   * or throws an exception when trying to remove a file in use, return
+   * false (eg NFS). Otherwise, return true. Defaults to returning false.
+   * 
+   * @return true if factory impl requires that Searcher's explicitly
+   * reserve commit points.
+   */
+  public boolean searchersReserveCommitPoints() {
+    return false;
+  }
+
+  public String getDataHome(CoreDescriptor cd) throws IOException {
+    // by default, we go off the instance directory
+    return normalize(SolrResourceLoader.normalizeDir(cd.getInstanceDir()) + cd.getDataDir());
+  }
 }
diff --git a/solr/core/src/java/org/apache/solr/core/EphemeralDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/EphemeralDirectoryFactory.java
index 904db07..f7f5809 100644
--- a/solr/core/src/java/org/apache/solr/core/EphemeralDirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/EphemeralDirectoryFactory.java
@@ -51,4 +51,16 @@
   public boolean isAbsolute(String path) {
     return true;
   }
+  
+  
+  @Override
+  public void remove(Directory dir) throws IOException {
+    // ram dir does not persist its dir anywhere
+  }
+  
+  @Override
+  public void remove(String path) throws IOException {
+    // ram dir does not persist its dir anywhere
+  }
+
 }
diff --git a/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
new file mode 100644
index 0000000..341c811
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/core/HdfsDirectoryFactory.java
@@ -0,0 +1,288 @@
+package org.apache.solr.core;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URLEncoder;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.NRTCachingDirectory;
+import org.apache.solr.cloud.ZkController;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.store.blockcache.BlockCache;
+import org.apache.solr.store.blockcache.BlockDirectory;
+import org.apache.solr.store.blockcache.BlockDirectoryCache;
+import org.apache.solr.store.blockcache.BufferStore;
+import org.apache.solr.store.blockcache.Cache;
+import org.apache.solr.store.blockcache.Metrics;
+import org.apache.solr.store.hdfs.HdfsDirectory;
+import org.apache.solr.util.HdfsUtil;
+import org.apache.solr.util.IOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class HdfsDirectoryFactory extends CachingDirectoryFactory {
+  public static Logger LOG = LoggerFactory
+      .getLogger(HdfsDirectoryFactory.class);
+  
+  public static final String BLOCKCACHE_SLAB_COUNT = "solr.hdfs.blockcache.slab.count";
+  public static final String BLOCKCACHE_DIRECT_MEMORY_ALLOCATION = "solr.hdfs.blockcache.direct.memory.allocation";
+  public static final String BLOCKCACHE_ENABLED = "solr.hdfs.blockcache.enabled";
+  public static final String BLOCKCACHE_READ_ENABLED = "solr.hdfs.blockcache.read.enabled";
+  public static final String BLOCKCACHE_WRITE_ENABLED = "solr.hdfs.blockcache.write.enabled";
+  
+  public static final String NRTCACHINGDIRECTORY_ENABLE = "solr.hdfs.nrtcachingdirectory.enable";
+  public static final String NRTCACHINGDIRECTORY_MAXMERGESIZEMB = "solr.hdfs.nrtcachingdirectory.maxmergesizemb";
+  public static final String NRTCACHINGDIRECTORY_MAXCACHEMB = "solr.hdfs.nrtcachingdirectory.maxcachedmb";
+  public static final String NUMBEROFBLOCKSPERBANK = "solr.hdfs.blockcache.blocksperbank";
+  
+  public static final String KERBEROS_ENABLED = "solr.hdfs.security.kerberos.enabled";
+  public static final String KERBEROS_KEYTAB = "solr.hdfs.security.kerberos.keytabfile";
+  public static final String KERBEROS_PRINCIPAL = "solr.hdfs.security.kerberos.principal";
+  
+  public static final String HDFS_HOME = "solr.hdfs.home";
+  
+  public static final String CONFIG_DIRECTORY = "solr.hdfs.confdir";
+  
+  private SolrParams params;
+  
+  private String hdfsDataDir;
+  
+  private String confDir;
+  
+  public static Metrics metrics;
+  private static Boolean kerberosInit;
+  
+  @Override
+  public void init(NamedList args) {
+    params = SolrParams.toSolrParams(args);
+    this.hdfsDataDir = params.get(HDFS_HOME);
+    if (this.hdfsDataDir != null && this.hdfsDataDir.length() == 0) {
+      this.hdfsDataDir = null;
+    }
+    boolean kerberosEnabled = params.getBool(KERBEROS_ENABLED, false);
+    LOG.info("Solr Kerberos Authentication "
+        + (kerberosEnabled ? "enabled" : "disabled"));
+    if (kerberosEnabled) {
+      initKerberos();
+    }
+  }
+  
+  @Override
+  protected Directory create(String path, DirContext dirContext)
+      throws IOException {
+    LOG.info("creating directory factory for path {}", path);
+    Configuration conf = getConf();
+    
+    if (metrics == null) {
+      metrics = new Metrics(conf);
+    }
+    
+    boolean blockCacheEnabled = params.getBool(BLOCKCACHE_ENABLED, true);
+    boolean blockCacheReadEnabled = params.getBool(BLOCKCACHE_READ_ENABLED,
+        true);
+    boolean blockCacheWriteEnabled = params.getBool(BLOCKCACHE_WRITE_ENABLED, true);
+    Directory dir = null;
+    
+    if (blockCacheEnabled && dirContext != DirContext.META_DATA) {
+      int numberOfBlocksPerBank = params.getInt(NUMBEROFBLOCKSPERBANK, 16384);
+      
+      int blockSize = BlockDirectory.BLOCK_SIZE;
+      
+      int bankCount = params.getInt(BLOCKCACHE_SLAB_COUNT, 1);
+      
+      boolean directAllocation = params.getBool(
+          BLOCKCACHE_DIRECT_MEMORY_ALLOCATION, true);
+      
+      BlockCache blockCache;
+      
+      int slabSize = numberOfBlocksPerBank * blockSize;
+      LOG.info(
+          "Number of slabs of block cache [{}] with direct memory allocation set to [{}]",
+          bankCount, directAllocation);
+      LOG.info(
+          "Block cache target memory usage, slab size of [{}] will allocate [{}] slabs and use ~[{}] bytes",
+          new Object[] {slabSize, bankCount,
+              ((long) bankCount * (long) slabSize)});
+      
+      int _1024Size = params.getInt("solr.hdfs.blockcache.bufferstore.1024",
+          8192);
+      int _8192Size = params.getInt("solr.hdfs.blockcache.bufferstore.8192",
+          8192);
+      
+      BufferStore.init(_1024Size, _8192Size, metrics);
+      long totalMemory = (long) bankCount * (long) numberOfBlocksPerBank
+          * (long) blockSize;
+      try {
+        blockCache = new BlockCache(metrics, directAllocation, totalMemory,
+            slabSize, blockSize);
+      } catch (OutOfMemoryError e) {
+        throw new RuntimeException(
+            "The max direct memory is likely too low.  Either increase it (by adding -XX:MaxDirectMemorySize=<size>g -XX:+UseLargePages to your containers startup args)"
+                + " or disable direct allocation using solr.hdfs.blockcache.direct.memory.allocation=false in solrconfig.xml. If you are putting the block cache on the heap,"
+                + " your java heap size might not be large enough."
+                + " Failed allocating ~" + totalMemory / 1000000.0 + " MB.", e);
+      }
+      Cache cache = new BlockDirectoryCache(blockCache, metrics);
+      HdfsDirectory hdfsDirectory = new HdfsDirectory(new Path(path), conf);
+      dir = new BlockDirectory("solrcore", hdfsDirectory, cache, null,
+          blockCacheReadEnabled, blockCacheWriteEnabled);
+    } else {
+      dir = new HdfsDirectory(new Path(path), conf);
+    }
+    
+    boolean nrtCachingDirectory = params.getBool(NRTCACHINGDIRECTORY_ENABLE, true);
+    if (nrtCachingDirectory) {
+      double nrtCacheMaxMergeSizeMB = params.getInt(
+          NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 16);
+      double nrtCacheMaxCacheMB = params.getInt(NRTCACHINGDIRECTORY_MAXCACHEMB,
+          192);
+      
+      return new NRTCachingDirectory(dir, nrtCacheMaxMergeSizeMB,
+          nrtCacheMaxCacheMB);
+    }
+    return dir;
+  }
+  
+  @Override
+  public boolean exists(String path) {
+    Path hdfsDirPath = new Path(path);
+    Configuration conf = getConf();
+    FileSystem fileSystem = null;
+    try {
+      fileSystem = FileSystem.newInstance(hdfsDirPath.toUri(), conf);
+      return fileSystem.exists(hdfsDirPath);
+    } catch (IOException e) {
+      LOG.error("Error checking if hdfs path exists", e);
+      throw new RuntimeException("Error checking if hdfs path exists", e);
+    } finally {
+      IOUtils.closeQuietly(fileSystem);
+    }
+  }
+  
+  private Configuration getConf() {
+    Configuration conf = new Configuration();
+    confDir = params.get(CONFIG_DIRECTORY, null);
+    HdfsUtil.addHdfsResources(conf, confDir);
+    return conf;
+  }
+  
+  protected synchronized void removeDirectory(CacheValue cacheValue)
+      throws IOException {
+    Configuration conf = getConf();
+    FileSystem fileSystem = null;
+    try {
+      fileSystem = FileSystem.newInstance(new URI(cacheValue.path), conf);
+      boolean success = fileSystem.delete(new Path(cacheValue.path), true);
+      if (!success) {
+        throw new RuntimeException("Could not remove directory");
+      }
+    } catch (Exception e) {
+      LOG.error("Could not remove directory", e);
+      throw new SolrException(ErrorCode.SERVER_ERROR,
+          "Could not remove directory", e);
+    } finally {
+      IOUtils.closeQuietly(fileSystem);
+    }
+  }
+  
+  @Override
+  public boolean isAbsolute(String path) {
+    return path.startsWith("hdfs:/");
+  }
+  
+  @Override
+  public boolean isPersistent() {
+    return true;
+  }
+  
+  @Override
+  public boolean searchersReserveCommitPoints() {
+    return true;
+  }
+  
+  @Override
+  public String getDataHome(CoreDescriptor cd) throws IOException {
+    if (hdfsDataDir == null) {
+      throw new SolrException(ErrorCode.SERVER_ERROR, "You must set the "
+          + this.getClass().getSimpleName() + " param " + HDFS_HOME
+          + " for relative dataDir paths to work");
+    }
+    
+    // by default, we go off the instance directory
+    String path;
+    if (cd.getCloudDescriptor() != null) {
+      path = URLEncoder.encode(cd.getCloudDescriptor().getCollectionName(),
+          "UTF-8")
+          + "/"
+          + URLEncoder.encode(cd.getCloudDescriptor().getCoreNodeName(),
+              "UTF-8");
+    } else {
+      path = cd.getName();
+    }
+    
+    return normalize(SolrResourceLoader.normalizeDir(ZkController
+        .trimLeadingAndTrailingSlashes(hdfsDataDir)
+        + "/"
+        + path
+        + "/"
+        + cd.getDataDir()));
+  }
+  
+  public String getConfDir() {
+    return confDir;
+  }
+  
+  private void initKerberos() {
+    String keytabFile = params.get(KERBEROS_KEYTAB, "").trim();
+    if (keytabFile.length() == 0) {
+      throw new IllegalArgumentException(KERBEROS_KEYTAB + " required because "
+          + KERBEROS_ENABLED + " set to true");
+    }
+    String principal = params.get(KERBEROS_PRINCIPAL, "");
+    if (principal.length() == 0) {
+      throw new IllegalArgumentException(KERBEROS_PRINCIPAL
+          + " required because " + KERBEROS_ENABLED + " set to true");
+    }
+    synchronized (HdfsDirectoryFactory.class) {
+      if (kerberosInit == null) {
+        kerberosInit = new Boolean(true);
+        Configuration conf = new Configuration();
+        conf.set("hadoop.security.authentication", "kerberos");
+        UserGroupInformation.setConfiguration(conf);
+        LOG.info(
+            "Attempting to acquire kerberos ticket with keytab: {}, principal: {} ",
+            keytabFile, principal);
+        try {
+          UserGroupInformation.loginUserFromKeytab(principal, keytabFile);
+        } catch (IOException ioe) {
+          throw new RuntimeException(ioe);
+        }
+        LOG.info("Got Kerberos ticket");
+      }
+    }
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java b/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
index 21de0db..11c6793 100644
--- a/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
+++ b/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
@@ -100,7 +100,7 @@
     }
   }
 
-  private List<IndexCommitWrapper> wrap(List<IndexCommit> list) {
+  private List<IndexCommitWrapper> wrap(List<? extends IndexCommit> list) {
     List<IndexCommitWrapper> result = new ArrayList<IndexCommitWrapper>();
     for (IndexCommit indexCommit : list) result.add(new IndexCommitWrapper(indexCommit));
     return result;
@@ -130,7 +130,7 @@
    * Internal use for Lucene... do not explicitly call.
    */
   @Override
-  public void onInit(List list) throws IOException {
+  public void onInit(List<? extends IndexCommit> list) throws IOException {
     List<IndexCommitWrapper> wrapperList = wrap(list);
     deletionPolicy.onInit(wrapperList);
     updateCommitPoints(wrapperList);
@@ -141,7 +141,7 @@
    * Internal use for Lucene... do not explicitly call.
    */
   @Override
-  public void onCommit(List list) throws IOException {
+  public void onCommit(List<? extends IndexCommit> list) throws IOException {
     List<IndexCommitWrapper> wrapperList = wrap(list);
     deletionPolicy.onCommit(wrapperList);
     updateCommitPoints(wrapperList);
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCore.java b/solr/core/src/java/org/apache/solr/core/SolrCore.java
index cac711f..413deaf 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrCore.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrCore.java
@@ -18,8 +18,10 @@
 package org.apache.solr.core;
 
 import java.io.File;
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.OutputStream;
 import java.io.Writer;
 import java.lang.reflect.Constructor;
 import java.net.URL;
@@ -62,6 +64,7 @@
 import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.solr.cloud.CloudDescriptor;
 import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.CommonParams.EchoParamStyle;
@@ -670,11 +673,10 @@
     if (dataDir == null) {
       if (cd.usingDefaultDataDir()) dataDir = config.getDataDir();
       if (dataDir == null) {
-        dataDir = cd.getDataDir();
         try {
+          dataDir = cd.getDataDir();
           if (!directoryFactory.isAbsolute(dataDir)) {
-            dataDir = directoryFactory.normalize(SolrResourceLoader
-                .normalizeDir(cd.getInstanceDir()) + dataDir);
+            dataDir = directoryFactory.getDataHome(cd);
           }
         } catch (IOException e) {
           throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e);
@@ -745,7 +747,6 @@
       this.codec = initCodec(solrConfig, schema);
       
       if (updateHandler == null) {
-        initDirectoryFactory();
         solrCoreState = new DefaultSolrCoreState(getDirectoryFactory());
       } else {
         solrCoreState = updateHandler.getSolrCoreState();
@@ -848,15 +849,63 @@
     
     CoreContainer cc = cd.getCoreContainer();
     
+    if (cc != null) {
+      if (cc.cfg != null && cc.cfg instanceof ConfigSolrXml) {
+        writePropFile(cd, cc);
+      }
+    }
+
     if (cc != null && cc.isZooKeeperAware() && Slice.CONSTRUCTION.equals(cd.getCloudDescriptor().getShardState())) {
       // set update log to buffer before publishing the core
       getUpdateHandler().getUpdateLog().bufferUpdates();
+      
+      cd.getCloudDescriptor().setShardState(null);
+      cd.getCloudDescriptor().setShardRange(null);
+      
     }
     // For debugging   
 //    numOpens.incrementAndGet();
 //    openHandles.put(this, new RuntimeException("unclosed core - name:" + getName() + " refs: " + refCount.get()));
   }
 
+  private void writePropFile(CoreDescriptor cd, CoreContainer cc) {
+    File propFile = new File(cd.getInstanceDir(), "core.properties");
+    if (!propFile.exists()) {
+      propFile.getParentFile().mkdirs();
+      Properties props = new Properties();
+      props.put("name", cd.getName());
+      if (cc.isZooKeeperAware()) {
+        String collection = cd.getCloudDescriptor().getCollectionName();
+        if (collection != null) {
+          props.put("collection", collection);
+        }
+        String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
+        if (coreNodeName != null) {
+          props.put("coreNodeName", coreNodeName);
+        }
+        String roles = cd.getCloudDescriptor().getRoles();
+        if (roles != null) {
+          props.put("roles", roles);
+        }
+        String shardId = cd.getCloudDescriptor().getShardId();
+        if (shardId != null) {
+          props.put("shard", shardId);
+        }
+      }
+      OutputStream out = null;
+      try {
+        out = new FileOutputStream(propFile);
+        props.store(out, "");
+      } catch (IOException e) {
+        throw new SolrException(ErrorCode.SERVER_ERROR, null, e);
+      } finally {
+        if (out != null) {
+          IOUtils.closeQuietly(out);
+        }
+      }
+    }
+  }
+    
   private Codec initCodec(SolrConfig solrConfig, final IndexSchema schema) {
     final PluginInfo info = solrConfig.getPluginInfo(CodecFactory.class.getName());
     final CodecFactory factory;
@@ -1352,6 +1401,10 @@
    * This method acquires openSearcherLock - do not call with searckLock held!
    */
   public RefCounted<SolrIndexSearcher>  openNewSearcher(boolean updateHandlerReopens, boolean realtime) {
+    if (isClosed()) { // catch some errors quicker
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "openNewSearcher called on closed core");
+    }
+
     SolrIndexSearcher tmp;
     RefCounted<SolrIndexSearcher> newestSearcher = null;
     boolean nrt = solrConfig.reopenReaders && updateHandlerReopens;
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCores.java b/solr/core/src/java/org/apache/solr/core/SolrCores.java
index 2d1acf4..5d39796 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrCores.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrCores.java
@@ -17,8 +17,18 @@
  * limitations under the License.
  */
 
+import org.apache.commons.lang.StringUtils;
+import org.apache.solr.cloud.CloudDescriptor;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.core.SolrXMLSerializer.SolrCoreXMLDef;
+import org.apache.solr.util.DOMUtil;
+import org.w3c.dom.Node;
+
+import javax.xml.xpath.XPathExpressionException;
 import java.io.File;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -30,15 +40,6 @@
 import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
 
-import javax.xml.xpath.XPathExpressionException;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.solr.cloud.CloudDescriptor;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.core.SolrXMLSerializer.SolrCoreXMLDef;
-import org.apache.solr.util.DOMUtil;
-import org.w3c.dom.Node;
-
 
 class SolrCores {
   private static SolrXMLSerializer SOLR_XML_SERIALIZER = new SolrXMLSerializer();
@@ -99,70 +100,34 @@
   // We are shutting down. You can't hold the lock on the various lists of cores while they shut down, so we need to
   // make a temporary copy of the names and shut them down outside the lock.
   protected void close() {
-    List<String> coreNames;
-    List<String> transientNames;
-    List<SolrCore> pendingToClose;
+    Collection<SolrCore> coreList = new ArrayList<SolrCore>();
 
     // It might be possible for one of the cores to move from one list to another while we're closing them. So
     // loop through the lists until they're all empty. In particular, the core could have moved from the transient
     // list to the pendingCloses list.
 
-    while (true) {
+    do {
+      coreList.clear();
       synchronized (modifyLock) {
-        coreNames = new ArrayList<String>(cores.keySet());
-        transientNames = new ArrayList<String>(transientCores.keySet());
-        pendingToClose = new ArrayList<SolrCore>(pendingCloses);
+        // make a copy of the cores then clear the map so the core isn't handed out to a request again
+        coreList.addAll(cores.values());
+        cores.clear();
+
+        coreList.addAll(transientCores.values());
+        transientCores.clear();
+
+        coreList.addAll(pendingCloses);
+        pendingCloses.clear();
       }
 
-      if (coreNames.size() == 0 && transientNames.size() == 0 && pendingToClose.size() == 0) break;
-
-      for (String coreName : coreNames) {
-        SolrCore core = cores.get(coreName);
-        if (core == null) {
-          CoreContainer.log.info("Core " + coreName + " moved from core container list before closing.");
-        } else {
-          try {
-            core.close();
-          } catch (Throwable t) {
-            SolrException.log(CoreContainer.log, "Error shutting down core", t);
-          } finally {
-            synchronized (modifyLock) {
-              cores.remove(coreName);
-            }
-          }
-        }
-      }
-
-      for (String coreName : transientNames) {
-        SolrCore core = transientCores.get(coreName);
-        if (core == null) {
-          CoreContainer.log.info("Core " + coreName + " moved from transient core container list before closing.");
-        } else {
-          try {
-            core.close();
-          } catch (Throwable t) {
-            SolrException.log(CoreContainer.log, "Error shutting down core", t);
-          } finally {
-            synchronized (modifyLock) {
-              transientCores.remove(coreName);
-            }
-          }
-        }
-      }
-
-      // We might have some cores that we were _thinking_ about shutting down, so take care of those too.
-      for (SolrCore core : pendingToClose) {
+      for (SolrCore core : coreList) {
         try {
           core.close();
         } catch (Throwable t) {
           SolrException.log(CoreContainer.log, "Error shutting down core", t);
-        } finally {
-          synchronized (modifyLock) {
-            pendingCloses.remove(core);
-          }
         }
       }
-    }
+    } while (coreList.size() > 0);
   }
 
   //WARNING! This should be the _only_ place you put anything into the list of transient cores!
@@ -246,10 +211,18 @@
     synchronized (modifyLock) {
       SolrCore c0 = cores.get(n0);
       SolrCore c1 = cores.get(n1);
-      if (c0 == null)
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No such core: " + n0);
-      if (c1 == null)
-        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No such core: " + n1);
+      if (c0 == null) { // Might be an unloaded transient core
+        c0 = container.getCore(n0);
+        if (c0 == null) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No such core: " + n0);
+        }
+      }
+      if (c1 == null) { // Might be an unloaded transient core
+        c1 = container.getCore(n1);
+        if (c1 == null) {
+          throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No such core: " + n1);
+        }
+      }
       cores.put(n0, c1);
       cores.put(n1, c0);
 
@@ -299,20 +272,20 @@
     }
   }
 
-  protected SolrCore getCoreFromAnyList(String name) {
-    SolrCore core;
-
+  /* If you don't increment the reference count, someone could close the core before you use it. */
+  protected SolrCore  getCoreFromAnyList(String name, boolean incRefCount) {
     synchronized (modifyLock) {
-      core = cores.get(name);
-      if (core != null) {
-        return core;
+      SolrCore core = cores.get(name);
+
+      if (core == null) {
+        core = transientCores.get(name);
       }
 
-      if (dynamicDescriptors.size() == 0) {
-        return null; // Nobody even tried to define any transient cores, so we're done.
+      if (core != null && incRefCount) {
+        core.open();
       }
-      // Now look for already loaded transient cores.
-      return transientCores.get(name);
+
+      return core;
     }
   }
 
@@ -351,15 +324,17 @@
       return coreToOrigName.get(solrCore);
     }
   }
-  
+
   public void persistCores(Config cfg, Properties containerProperties,
       Map<String,String> rootSolrAttribs, Map<String,String> coresAttribs,
-      File file, File configFile, SolrResourceLoader loader) throws XPathExpressionException {
+      Map<String, String> loggingAttribs, Map<String,String> watcherAttribs,
+      Map<String, String> shardHandlerAttrib, Map<String,String> shardHandlerProps,
+      File file, SolrResourceLoader loader) throws XPathExpressionException {
 
-    
+
     List<SolrXMLSerializer.SolrCoreXMLDef> solrCoreXMLDefs = new ArrayList<SolrXMLSerializer.SolrCoreXMLDef>();
     synchronized (modifyLock) {
-      
+
       persistCores(cfg, cores, loader, solrCoreXMLDefs);
       persistCores(cfg, transientCores, loader, solrCoreXMLDefs);
       // add back all the cores that aren't loaded, either in cores or transient
@@ -384,9 +359,13 @@
       solrXMLDef.containerProperties = containerProperties;
       solrXMLDef.solrAttribs = rootSolrAttribs;
       solrXMLDef.coresAttribs = coresAttribs;
+      solrXMLDef.loggingAttribs = loggingAttribs;
+      solrXMLDef.watcherAttribs = watcherAttribs;
+      solrXMLDef.shardHandlerAttribs = shardHandlerAttrib;
+      solrXMLDef.shardHandlerProps = shardHandlerProps;
       SOLR_XML_SERIALIZER.persistFile(file, solrXMLDef);
     }
-    
+
   }
   // Wait here until any pending operations (load, unload or reload) are completed on this core.
   protected SolrCore waitAddPendingCoreOps(String name) {
@@ -419,7 +398,7 @@
         if (! pendingCoreOps.add(name)) {
           CoreContainer.log.warn("Replaced an entry in pendingCoreOps {}, we should not be doing this", name);
         }
-        return getCoreFromAnyList(name); // we might have been _unloading_ the core, so return the core if it was loaded.
+        return getCoreFromAnyList(name, false); // we might have been _unloading_ the core, so return the core if it was loaded.
       }
     }
     return null;
@@ -442,95 +421,162 @@
       addCoreToPersistList(cfg, loader, solrCore.getCoreDescriptor(), getCoreToOrigName(solrCore), solrCoreXMLDefs);
     }
   }
-  
-  private void addCoreProperty(Map<String,String> coreAttribs, SolrResourceLoader loader, Node node, String name,
+
+  private void addCoreProperty(Map<String,String> propMap, SolrResourceLoader loader, Node node, String name,
+                               String value) {
+    addCoreProperty(propMap, loader, node, name, value, null);
+  }
+
+  private void addCoreProperty(Map<String,String> propMap, SolrResourceLoader loader, Node node, String name,
       String value, String defaultValue) {
-    
+
     if (node == null) {
-      coreAttribs.put(name, value);
+      propMap.put(name, value);
       return;
     }
-    
+
     if (node != null) {
       String rawAttribValue = DOMUtil.getAttr(node, name, null);
 
+      if (rawAttribValue == null) {
+        return; // It was never in the original definition.
+      }
+
       if (value == null) {
-        coreAttribs.put(name, rawAttribValue);
+        propMap.put(name, rawAttribValue);
         return;
       }
-      if (rawAttribValue == null && defaultValue != null && value.equals(defaultValue)) {
+
+      // There are some _really stupid_ additions/subtractions of the slash that we should look out for. I'm (EOE)
+      // ashamed of this but it fixes some things and we're throwing persistence away anyway (although
+      // maybe not for core.properties files).
+      String defComp = regularizeAttr(defaultValue);
+
+      if (defComp != null && regularizeAttr(value).equals(defComp)) {
         return;
       }
-      if (rawAttribValue != null && value.equals(DOMUtil.substituteProperty(rawAttribValue, loader.getCoreProperties()))){
-        coreAttribs.put(name, rawAttribValue);
+      String rawComp = regularizeAttr(rawAttribValue);
+      if (rawComp != null && regularizeAttr(value).equals(
+          regularizeAttr(DOMUtil.substituteProperty(rawAttribValue, loader.getCoreProperties())))) {
+        propMap.put(name, rawAttribValue);
       } else {
-        coreAttribs.put(name, value);
+        propMap.put(name, value);
       }
     }
-
   }
 
+  protected String regularizeAttr(String path) {
+    if (path == null)
+      return null;
+    path = path.replace('/', File.separatorChar);
+    path = path.replace('\\', File.separatorChar);
+    if (path.endsWith(File.separator)) {
+      path = path.substring(0, path.length() - 1);
+    }
+    return path;
+  }
   protected void addCoreToPersistList(Config cfg, SolrResourceLoader loader,
       CoreDescriptor dcore, String origCoreName,
       List<SolrCoreXMLDef> solrCoreXMLDefs) throws XPathExpressionException {
-    
-    String coreName = dcore.getProperty(CoreDescriptor.CORE_NAME);
-    
+
     Map<String,String> coreAttribs = new HashMap<String,String>();
+    Properties newProps = new Properties();
 
-    CloudDescriptor cd = dcore.getCloudDescriptor();
-    String collection = null;
-    if (cd != null) collection = cd.getCollectionName();
+    // This is simple, just take anything sent in and saved away in at core creation and write it out.
+    if (dcore.getCreatedProperties().size() > 0) {
+      final List<String> stdNames = new ArrayList<String>(Arrays.asList(CoreDescriptor.standardPropNames));
+      coreAttribs.put(CoreDescriptor.CORE_NAME, dcore.getName()); // NOTE: may have been swapped or renamed!
+      for (String key : dcore.getCreatedProperties().stringPropertyNames()) {
+        if (! stdNames.contains(key) && ! key.startsWith(CoreAdminParams.PROPERTY_PREFIX)) continue;
+        if (key.indexOf(CoreAdminParams.PROPERTY_PREFIX) == 0) {
+          newProps.put(key.substring(CoreAdminParams.PROPERTY_PREFIX.length()), dcore.getCreatedProperties().getProperty(key));
+        } else if (! CoreDescriptor.CORE_NAME.equals(key)) {
+          coreAttribs.put(key, dcore.getCreatedProperties().getProperty(key));
+        }
+      }
+      // Insure instdir is persisted if it's the default since it's checked at startup even if not specified on the
+      // create command.
+      if (! dcore.getCreatedProperties().containsKey(CoreDescriptor.CORE_INSTDIR)) {
+        coreAttribs.put(CoreDescriptor.CORE_INSTDIR, dcore.getRawInstanceDir());
+      }
+    } else {
 
-    if (origCoreName == null) {
-      origCoreName = coreName;
+      String coreName = dcore.getProperty(CoreDescriptor.CORE_NAME);
+
+      CloudDescriptor cd = dcore.getCloudDescriptor();
+      String collection = null;
+      if (cd != null) collection = cd.getCollectionName();
+
+      if (origCoreName == null) {
+        origCoreName = coreName;
+      }
+
+      Node node = null;
+      if (cfg != null) {
+        node = cfg.getNode("/solr/cores/core[@name='" + origCoreName + "']",
+            false);
+      }
+
+      coreAttribs.put(CoreDescriptor.CORE_NAME, coreName);
+      //coreAttribs.put(CoreDescriptor.CORE_INSTDIR, dcore.getRawInstanceDir());
+      addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_INSTDIR, dcore.getRawInstanceDir(), null);
+
+      addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_COLLECTION,
+          StringUtils.isNotBlank(collection) ? collection : dcore.getName());
+
+      addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_DATADIR,
+          dcore.getDataDir());
+      addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_ULOGDIR,
+          dcore.getUlogDir());
+      addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_TRANSIENT,
+          Boolean.toString(dcore.isTransient()));
+      addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_LOADONSTARTUP,
+          Boolean.toString(dcore.isLoadOnStartup()));
+      addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_CONFIG,
+          dcore.getConfigName());
+      addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_SCHEMA,
+          dcore.getSchemaName());
+
+      addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_COLLECTION,
+          collection, dcore.getName());
+
+      String shard = null;
+      String roles = null;
+      String node_name = null;
+      if (cd != null) {
+        shard = cd.getShardId();
+        roles = cd.getRoles();
+        node_name = cd.getCoreNodeName();
+      }
+      addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_SHARD,
+          shard);
+
+      addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_ROLES,
+          roles);
+
+      addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_NODE_NAME,
+          node_name);
+
+
+      for (Object key : dcore.getCoreProperties().keySet()) {
+
+        if (cfg != null) {
+          Node propNode = cfg.getNode("/solr/cores/core[@name='" + origCoreName + "']/property[@name='" + key + "']",
+              false);
+
+          if (propNode != null) { // This means it was in the original DOM element, so just copy it.
+            newProps.put(DOMUtil.getAttr(propNode, "name", null), DOMUtil.getAttr(propNode, "value", null));
+          }
+        }
+      }
     }
-    
-    Properties properties = dcore.getCoreProperties();
-    Node node = null;
-    if (cfg != null) {
-      node = cfg.getNode("/solr/cores/core[@name='" + origCoreName + "']",
-          false);
-    }
-    
-    coreAttribs.put(CoreDescriptor.CORE_NAME, coreName);
-    
-    addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_INSTDIR, dcore.getRawInstanceDir(), null);
 
-    coreAttribs.put(CoreDescriptor.CORE_COLLECTION,
-        StringUtils.isNotBlank(collection) ? collection : dcore.getName());
-    
-    addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_DATADIR, dcore.getDataDir(), null);
-    addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_ULOGDIR, dcore.getUlogDir(), null);
-    addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_TRANSIENT, Boolean.toString(dcore.isTransient()), null);
-    addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_LOADONSTARTUP, Boolean.toString(dcore.isLoadOnStartup()), null);
-    
-    addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_COLLECTION,
-        collection, dcore.getName());
-    
-    String shard = null;
-    String roles = null;
-    if (cd != null) {
-      shard = cd.getShardId();
-      roles = cd.getRoles();
-    }
-    addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_SHARD,
-        shard, null);
-    
-    addCoreProperty(coreAttribs, loader, node, CoreDescriptor.CORE_ROLES,
-        roles, null);
 
-    coreAttribs.put(CoreDescriptor.CORE_LOADONSTARTUP,
-        Boolean.toString(dcore.isLoadOnStartup()));
-    coreAttribs.put(CoreDescriptor.CORE_TRANSIENT,
-        Boolean.toString(dcore.isTransient()));
-    
 
     SolrXMLSerializer.SolrCoreXMLDef solrCoreXMLDef = new SolrXMLSerializer.SolrCoreXMLDef();
     solrCoreXMLDef.coreAttribs = coreAttribs;
-    solrCoreXMLDef.coreProperties = properties;
+    solrCoreXMLDef.coreProperties = newProps;
     solrCoreXMLDefs.add(solrCoreXMLDef);
-
   }
 
   protected Object getModifyLock() {
diff --git a/solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java b/solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java
index 3702d4b..20b51f5 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java
@@ -77,7 +77,7 @@
    * Internal use for Lucene... do not explicitly call.
    */
   @Override
-  public void onInit(List commits) throws IOException {
+  public void onInit(List<? extends IndexCommit> commits) throws IOException {
     // SOLR-4547: log basic data at INFO, add filenames at DEBUG.
     if (commits.isEmpty()) {
       return;
@@ -86,26 +86,26 @@
         new CommitsLoggingInfo(commits));
     log.debug("SolrDeletionPolicy.onInit: commits: {}",
         new CommitsLoggingDebug(commits));
-    updateCommits((List<IndexCommit>) commits);
+    updateCommits(commits);
   }
 
   /**
    * Internal use for Lucene... do not explicitly call.
    */
   @Override
-  public void onCommit(List commits) throws IOException {
+  public void onCommit(List<? extends IndexCommit> commits) throws IOException {
     // SOLR-4547: log basic data at INFO, add filenames at DEBUG.
     log.info("SolrDeletionPolicy.onCommit: commits: {}",
         new CommitsLoggingInfo(commits));
     log.debug("SolrDeletionPolicy.onCommit: commits: {}",
         new CommitsLoggingDebug(commits));
-    updateCommits((List<IndexCommit>) commits);
+    updateCommits(commits);
   }
 
   private static class CommitsLoggingInfo {
-    private List<IndexCommit> commits;
+    private List<? extends IndexCommit> commits;
 
-    public CommitsLoggingInfo(List<IndexCommit> commits) {
+    public CommitsLoggingInfo(List<? extends IndexCommit> commits) {
       this.commits = commits;
     }
 
@@ -135,7 +135,7 @@
   }
 
   private static class CommitsLoggingDebug extends CommitsLoggingInfo {
-    public CommitsLoggingDebug(List<IndexCommit> commits) {
+    public CommitsLoggingDebug(List<? extends IndexCommit> commits) {
       super(commits);
     }
 
@@ -150,7 +150,7 @@
     }
   }
 
-  private void updateCommits(List<IndexCommit> commits) {
+  private void updateCommits(List<? extends IndexCommit> commits) {
     // to be safe, we should only call delete on a commit point passed to us
     // in this specific call (may be across diff IndexWriter instances).
     // this will happen rarely, so just synchronize everything
diff --git a/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java b/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java
index a6d9b2b..a3dd5ac 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java
@@ -17,56 +17,58 @@
 
 package org.apache.solr.core;
 
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.ResourceLoaderAware;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.analysis.util.WordlistLoader;
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.util.IOUtils;
+import org.apache.solr.common.ResourceLoader;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.handler.admin.CoreAdminHandler;
+import org.apache.solr.handler.component.SearchComponent;
+import org.apache.solr.handler.component.ShardHandlerFactory;
+import org.apache.solr.request.SolrRequestHandler;
+import org.apache.solr.response.QueryResponseWriter;
+import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.ManagedIndexSchemaFactory;
+import org.apache.solr.schema.SimilarityFactory;
+import org.apache.solr.search.QParserPlugin;
+import org.apache.solr.update.processor.UpdateRequestProcessorFactory;
+import org.apache.solr.util.FileUtils;
+import org.apache.solr.util.plugin.SolrCoreAware;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.naming.Context;
+import javax.naming.InitialContext;
+import javax.naming.NamingException;
+import javax.naming.NoInitialContextException;
 import java.io.Closeable;
 import java.io.File;
 import java.io.FileFilter;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.lang.reflect.Constructor;
 import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLClassLoader;
-import java.util.*;
+import java.nio.charset.CharacterCodingException;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.lucene.analysis.util.CharFilterFactory;
-import org.apache.lucene.analysis.util.ResourceLoaderAware;
-import org.apache.lucene.analysis.util.TokenFilterFactory;
-import org.apache.lucene.analysis.util.TokenizerFactory;
-import org.apache.lucene.codecs.Codec;
-import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.codecs.DocValuesFormat;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.analysis.util.WordlistLoader;
-import org.apache.solr.common.ResourceLoader;
-import org.apache.solr.handler.admin.CoreAdminHandler;
-import org.apache.solr.handler.component.ShardHandlerFactory;
-import org.apache.solr.schema.ManagedIndexSchemaFactory;
-import org.apache.solr.schema.SimilarityFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.nio.charset.CharacterCodingException;
-import java.nio.charset.Charset;
-import java.lang.reflect.Constructor;
-
-import javax.naming.Context;
-import javax.naming.InitialContext;
-import javax.naming.NamingException;
-import javax.naming.NoInitialContextException;
-
-import org.apache.solr.util.FileUtils;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.handler.component.SearchComponent;
-import org.apache.solr.request.SolrRequestHandler;
-import org.apache.solr.response.QueryResponseWriter;
-import org.apache.solr.schema.FieldType;
-import org.apache.solr.update.processor.UpdateRequestProcessorFactory;
-import org.apache.solr.util.plugin.SolrCoreAware;
-import org.apache.solr.search.QParserPlugin;
-
 /**
  * @since solr 1.3
  */ 
diff --git a/solr/core/src/java/org/apache/solr/core/SolrXMLSerializer.java b/solr/core/src/java/org/apache/solr/core/SolrXMLSerializer.java
index a413997..43760e0 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrXMLSerializer.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrXMLSerializer.java
@@ -17,6 +17,11 @@
  * limitations under the License.
  */
 
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.util.XML;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.io.BufferedWriter;
 import java.io.File;
 import java.io.FileInputStream;
@@ -30,11 +35,6 @@
 import java.util.Properties;
 import java.util.Set;
 
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.XML;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 public class SolrXMLSerializer {
   protected static Logger log = LoggerFactory
       .getLogger(SolrXMLSerializer.class);
@@ -62,6 +62,41 @@
     if (containerProperties != null && !containerProperties.isEmpty()) {
       writeProperties(w, containerProperties, "  ");
     }
+
+    // Output logging section if any
+    if (solrXMLDef.loggingAttribs.size() > 0 || solrXMLDef.watcherAttribs.size() > 0) {
+      w.write(INDENT + "<logging");
+      for (Map.Entry<String, String> ent : solrXMLDef.loggingAttribs.entrySet()) {
+        writeAttribute(w, ent.getKey(), ent.getValue());
+      }
+      w.write(">\n");
+
+      if (solrXMLDef.watcherAttribs.size() > 0) {
+        w.write(INDENT + INDENT + "<watcher");
+        for (Map.Entry<String, String> ent : solrXMLDef.watcherAttribs.entrySet()) {
+          writeAttribute(w, ent.getKey(), ent.getValue());
+        }
+        w.write("/>\n");
+      }
+      w.write(INDENT + "</logging>\n");
+    }
+
+    // Output shard handler section if any
+    if (solrXMLDef.shardHandlerAttribs.size() > 0 || solrXMLDef.shardHandlerProps.size() > 0) {
+      w.write(INDENT + "<shardHandlerFactory");
+      for (Map.Entry<String, String> ent : solrXMLDef.shardHandlerAttribs.entrySet()) {
+        writeAttribute(w, ent.getKey(), ent.getValue());
+      }
+      w.write(">\n");
+      if (solrXMLDef.shardHandlerProps.size() > 0) {
+        for (Map.Entry<String, String> ent : solrXMLDef.shardHandlerProps.entrySet()) {
+          w.write(INDENT + INDENT + "<int name=\"" + ent.getKey() + "\"" + ">" + ent.getValue() + "</int>\n");
+        }
+      }
+      w.write(INDENT + "</shardHandlerFactory>\n");
+    }
+
+
     w.write(INDENT + "<cores");
     Map<String,String> coresAttribs = solrXMLDef.coresAttribs;
     Set<String> coreAttribKeys = coresAttribs.keySet();
@@ -116,7 +151,7 @@
   }
   
   void persistFile(File file, SolrXMLDef solrXMLDef) {
-    log.info("Persisting cores config to " + file);
+    log.info("Persisting cores config to " + file.getAbsolutePath());
     
     File tmpFile = null;
     try {
@@ -198,6 +233,10 @@
     Properties containerProperties;
     Map<String,String> solrAttribs;
     Map<String,String> coresAttribs;
+    Map<String, String> loggingAttribs;
+    Map<String, String> watcherAttribs;
+    Map<String, String> shardHandlerAttribs;
+    Map<String, String> shardHandlerProps;
     List<SolrCoreXMLDef> coresDefs;
   }
   
diff --git a/solr/core/src/java/org/apache/solr/core/ZkContainer.java b/solr/core/src/java/org/apache/solr/core/ZkContainer.java
index c9ed9b3..538f9dd 100644
--- a/solr/core/src/java/org/apache/solr/core/ZkContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/ZkContainer.java
@@ -50,6 +50,7 @@
   private String hostContext;
   private String host;
   private String leaderVoteWait;
+  private Boolean genericCoreNodeNames;
   private int distribUpdateConnTimeout;
 
   public SolrZkServer getZkServer() {
@@ -75,6 +76,10 @@
   public String getLeaderVoteWait() {
     return leaderVoteWait;
   }
+  
+  public boolean getGenericCoreNodeNames() {
+    return genericCoreNodeNames;
+  }
 
   public int getDistribUpdateConnTimeout() {
     return distribUpdateConnTimeout;
@@ -90,7 +95,7 @@
     
   }
   
-  public void initZooKeeper(final CoreContainer cc, String solrHome, String zkHost, int zkClientTimeout, String hostPort, String hostContext, String host, String leaderVoteWait, int distribUpdateConnTimeout, int distribUpdateSoTimeout) {
+  public void initZooKeeper(final CoreContainer cc, String solrHome, String zkHost, int zkClientTimeout, String hostPort, String hostContext, String host, String leaderVoteWait, boolean genericCoreNodeNames, int distribUpdateConnTimeout, int distribUpdateSoTimeout) {
     ZkController zkController = null;
     
     // if zkHost sys property is not set, we are not using ZooKeeper
@@ -108,6 +113,7 @@
     this.hostContext = hostContext;
     this.host = host;
     this.leaderVoteWait = leaderVoteWait;
+    this.genericCoreNodeNames = genericCoreNodeNames;
     this.distribUpdateConnTimeout = distribUpdateConnTimeout;
     this.distribUpdateSoTimeout = distribUpdateSoTimeout;
     
@@ -163,7 +169,7 @@
         }
         zkController = new ZkController(cc, zookeeperHost, zkClientTimeout,
             zkClientConnectTimeout, host, hostPort, hostContext,
-            leaderVoteWait, distribUpdateConnTimeout, distribUpdateSoTimeout,
+            leaderVoteWait, genericCoreNodeNames, distribUpdateConnTimeout, distribUpdateSoTimeout,
             new CurrentCoreDescriptorProvider() {
 
               @Override
diff --git a/solr/core/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java
index c1602a0..6e8c53a 100644
--- a/solr/core/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java
@@ -35,6 +35,7 @@
 import org.apache.solr.schema.FieldType;
 import org.apache.solr.schema.IndexSchema;
 import org.apache.solr.schema.SchemaField;
+import org.apache.solr.util.EmptyEntityResolver;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -90,6 +91,8 @@
     super.init(args);
 
     inputFactory = XMLInputFactory.newInstance();
+    EmptyEntityResolver.configureXMLInputFactory(inputFactory);
+    inputFactory.setXMLReporter(xmllog);
     try {
       // The java 1.6 bundled stax parser (sjsxp) does not currently have a thread-safe
       // XMLInputFactory, as that implementation tries to cache and reuse the
@@ -103,7 +106,6 @@
       // isimplementation specific.
       log.debug("Unable to set the 'reuse-instance' property for the input factory: " + inputFactory);
     }
-    inputFactory.setXMLReporter(xmllog);
   }
 
   /**
diff --git a/solr/core/src/java/org/apache/solr/handler/SnapPuller.java b/solr/core/src/java/org/apache/solr/handler/SnapPuller.java
index 2717473..2a31d2b 100644
--- a/solr/core/src/java/org/apache/solr/handler/SnapPuller.java
+++ b/solr/core/src/java/org/apache/solr/handler/SnapPuller.java
@@ -318,20 +318,25 @@
       long latestVersion = (Long) response.get(CMD_INDEX_VERSION);
       long latestGeneration = (Long) response.get(GENERATION);
 
-      IndexCommit commit;
-      RefCounted<SolrIndexSearcher> searcherRefCounted = null;
-      try {
-        searcherRefCounted = core.getNewestSearcher(false);
-        if (searcherRefCounted == null) {
-          SolrException.log(LOG, "No open searcher found - fetch aborted");
-          return false;
+      // TODO: make sure that getLatestCommit only returns commit points for the main index (i.e. no side-car indexes)
+      IndexCommit commit = core.getDeletionPolicy().getLatestCommit();
+      if (commit == null) {
+        // Presumably the IndexWriter hasn't been opened yet, and hence the deletion policy hasn't been updated with commit points
+        RefCounted<SolrIndexSearcher> searcherRefCounted = null;
+        try {
+          searcherRefCounted = core.getNewestSearcher(false);
+          if (searcherRefCounted == null) {
+            LOG.warn("No open searcher found - fetch aborted");
+            return false;
+          }
+          commit = searcherRefCounted.get().getIndexReader().getIndexCommit();
+        } finally {
+          if (searcherRefCounted != null)
+            searcherRefCounted.decref();
         }
-        commit = searcherRefCounted.get().getIndexReader().getIndexCommit();
-      } finally {
-        if (searcherRefCounted != null)
-          searcherRefCounted.decref();
       }
-      
+
+
       if (latestVersion == 0L) {
         if (forceReplication && commit.getGeneration() != 0) {
           // since we won't get the files for an empty index,
@@ -403,7 +408,14 @@
             successfulInstall = modifyIndexProps(tmpIdxDirName);
             deleteTmpIdxDir  =  false;
           } else {
-            successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
+            solrCore.getUpdateHandler().getSolrCoreState()
+                .closeIndexWriter(core, true);
+            try {
+              successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
+            } finally {
+              solrCore.getUpdateHandler().getSolrCoreState()
+                  .openIndexWriter(core);
+            }
           }
           if (successfulInstall) {
             if (isFullCopyNeeded) {
@@ -426,7 +438,12 @@
             successfulInstall = modifyIndexProps(tmpIdxDirName);
             deleteTmpIdxDir =  false;
           } else {
-            successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
+            solrCore.getUpdateHandler().getSolrCoreState().closeIndexWriter(core, true);
+            try {
+              successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
+            } finally {
+              solrCore.getUpdateHandler().getSolrCoreState().openIndexWriter(core);
+            }
           }
           if (successfulInstall) {
             logReplicationTimeAndConfFiles(modifiedConfFiles, successfulInstall);
@@ -443,7 +460,11 @@
               core.getDirectoryFactory().remove(indexDir);
             }
           }
-          openNewWriterAndSearcher(isFullCopyNeeded);
+          if (isFullCopyNeeded) {
+            solrCore.getUpdateHandler().newIndexWriter(isFullCopyNeeded);
+          }
+          
+          openNewSearcherAndUpdateCommitPoint(isFullCopyNeeded);
         }
         
         replicationStartTime = 0;
@@ -615,11 +636,9 @@
     return sb;
   }
 
-  private void openNewWriterAndSearcher(boolean isFullCopyNeeded) throws IOException {
+  private void openNewSearcherAndUpdateCommitPoint(boolean isFullCopyNeeded) throws IOException {
     SolrQueryRequest req = new LocalSolrQueryRequest(solrCore,
         new ModifiableSolrParams());
-    // reboot the writer on the new index and get a new searcher
-    solrCore.getUpdateHandler().newIndexWriter(isFullCopyNeeded);
     
     RefCounted<SolrIndexSearcher> searcher = null;
     IndexCommit commitPoint;
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
index 97dfa10..3010d48 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
@@ -17,19 +17,6 @@
 
 package org.apache.solr.handler.admin;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.concurrent.Future;
-
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.lucene.index.DirectoryReader;
@@ -57,8 +44,8 @@
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.core.CoreDescriptor;
 import org.apache.solr.core.DirectoryFactory;
-import org.apache.solr.core.SolrCore;
 import org.apache.solr.core.DirectoryFactory.DirContext;
+import org.apache.solr.core.SolrCore;
 import org.apache.solr.handler.RequestHandlerBase;
 import org.apache.solr.request.LocalSolrQueryRequest;
 import org.apache.solr.request.SolrQueryRequest;
@@ -75,6 +62,19 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.Future;
+
 /**
  *
  * @since solr 1.3
@@ -498,14 +498,18 @@
       Iterator<String> parameterNamesIterator = params.getParameterNamesIterator();
       while (parameterNamesIterator.hasNext()) {
           String parameterName = parameterNamesIterator.next();
+          String parameterValue = params.get(parameterName);
+          dcore.addCreatedProperty(parameterName, parameterValue); // Need this junk for silly persistence
+
           if(parameterName.startsWith(CoreAdminParams.PROPERTY_PREFIX)) {
-              String parameterValue = params.get(parameterName);
-              String propertyName = parameterName.substring(CoreAdminParams.PROPERTY_PREFIX.length()); // skip prefix
-              coreProperties.put(propertyName, parameterValue);
+            String propertyName = parameterName.substring(CoreAdminParams.PROPERTY_PREFIX.length()); // skip prefix
+            coreProperties.put(propertyName, parameterValue);
           }
       }
       dcore.setCoreProperties(coreProperties);
-      
+      if (coreContainer.getZkController() != null) {
+        coreContainer.preRegisterInZk(dcore);
+      }
       SolrCore core = coreContainer.create(dcore);
 
       coreContainer.register(name, core, false);
@@ -522,9 +526,24 @@
           SolrException.log(log, null, e);
         }
       }
+      
+      Throwable tc = ex;
+      Throwable c = null;
+      do {
+        tc = tc.getCause();
+        if (tc != null) {
+          c = tc;
+        }
+      } while (tc != null);
+      
+      String rootMsg = "";
+      if (c != null) {
+        rootMsg = " Caused by: " + c.getMessage();
+      }
+      
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
                               "Error CREATEing SolrCore '" + name + "': " +
-                              ex.getMessage(), ex);
+                              ex.getMessage() + rootMsg, ex);
     }
   }
 
@@ -713,7 +732,9 @@
     boolean doPersist = false;
     String fileName = params.get(CoreAdminParams.FILE);
     if (fileName != null) {
-      File file = new File(coreContainer.getConfigFile().getParentFile(), fileName);
+      File file = new File(fileName);
+      if (!file.isAbsolute())
+        file = new File(coreContainer.getConfigFile().getParentFile(), fileName);
       coreContainer.persistFile(file);
       rsp.add("saved", file.getAbsolutePath());
       doPersist = false;
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
index ef36bcf..206fc09 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java
@@ -362,20 +362,18 @@
             Map<String, Replica> sliceShards = slice.getReplicasMap();
 
             // For now, recreate the | delimited list of equivalent servers
-            Set<String> liveNodes = clusterState.getLiveNodes();
             StringBuilder sliceShardsStr = new StringBuilder();
             boolean first = true;
-            for (ZkNodeProps nodeProps : sliceShards.values()) {
-              ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
-              if (!liveNodes.contains(coreNodeProps.getNodeName())
-                  || !coreNodeProps.getState().equals(
+            for (Replica replica : sliceShards.values()) {
+              if (!clusterState.liveNodesContain(replica.getNodeName())
+                  || !replica.getStr(ZkStateReader.STATE_PROP).equals(
                       ZkStateReader.ACTIVE)) continue;
               if (first) {
                 first = false;
               } else {
                 sliceShardsStr.append('|');
               }
-              String url = coreNodeProps.getCoreUrl();
+              String url = ZkCoreNodeProps.getCoreUrl(replica);
               if (url.startsWith("http://"))
                 url = url.substring(7);
               sliceShardsStr.append(url);
diff --git a/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java b/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java
index c350dd5d..d334ffc 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java
@@ -49,9 +49,7 @@
     if (DoubleField.class.isInstance(fieldType) ||
         IntField.class.isInstance(fieldType) ||
         LongField.class.isInstance(fieldType) ||
-        ShortField.class.isInstance(fieldType) ||
         FloatField.class.isInstance(fieldType) ||
-        ByteField.class.isInstance(fieldType) ||
         TrieField.class.isInstance(fieldType) ||
         SortableDoubleField.class.isInstance(fieldType) ||
         SortableIntField.class.isInstance(fieldType) ||
diff --git a/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java b/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java
index e40fb5e..71b6e03 100644
--- a/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java
+++ b/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java
@@ -19,15 +19,20 @@
 import java.io.IOException;
 import java.io.Reader;
 import java.io.StringReader;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.io.IOUtils;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.params.UpdateParams;
 import org.noggit.JSONParser;
 import org.noggit.ObjectBuilder;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.SolrInputField;
-import org.apache.solr.common.params.*;
 import org.apache.solr.common.util.ContentStream;
 import org.apache.solr.handler.RequestHandlerUtils;
 import org.apache.solr.handler.UpdateRequestHandler;
@@ -512,11 +517,13 @@
         case JSONParser.STRING:
           return parser.getString();
         case JSONParser.LONG:
+          return parser.getLong();
         case JSONParser.NUMBER:
+          return parser.getDouble();
         case JSONParser.BIGNUMBER:
           return parser.getNumberChars().toString();
         case JSONParser.BOOLEAN:
-          return Boolean.toString(parser.getBoolean()); // for legacy reasons, single values s are expected to be strings
+          return parser.getBoolean();
         case JSONParser.NULL:
           parser.getNull();
           return null;
diff --git a/solr/core/src/java/org/apache/solr/highlight/PostingsSolrHighlighter.java b/solr/core/src/java/org/apache/solr/highlight/PostingsSolrHighlighter.java
index a1623f4..012c9de 100644
--- a/solr/core/src/java/org/apache/solr/highlight/PostingsSolrHighlighter.java
+++ b/solr/core/src/java/org/apache/solr/highlight/PostingsSolrHighlighter.java
@@ -67,6 +67,7 @@
  *       &lt;str name="hl.bs.variant"&gt;&lt;/str&gt;
  *       &lt;str name="hl.bs.type"&gt;SENTENCE&lt;/str&gt;
  *       &lt;int name="hl.maxAnalyzedChars"&gt;10000&lt;/int&gt;
+ *       &lt;str name="hl.multiValuedSeparatorChar"&gt; &lt;/str&gt;
  *     &lt;/lst&gt;
  *   &lt;/requestHandler&gt;
  * </pre>
@@ -96,6 +97,7 @@
  *    <li>hl.bs.country (string) specifies country code for BreakIterator. default is empty string (root locale)
  *    <li>hl.bs.variant (string) specifies country code for BreakIterator. default is empty string (root locale)
  *    <li>hl.maxAnalyzedChars specifies how many characters at most will be processed in a document.
+ *    <li>hl.multiValuedSeparatorChar specifies the logical separator between values for multi-valued fields.
  *        NOTE: currently hl.maxAnalyzedChars cannot yet be specified per-field
  *  </ul>
  *  
@@ -167,6 +169,15 @@
           String type = params.getFieldParam(field, HighlightParams.BS_TYPE);
           return parseBreakIterator(type, locale);
         }
+
+        @Override
+        protected char getMultiValuedSeparator(String field) {
+          String sep = params.getFieldParam(field, HighlightParams.MULTI_VALUED_SEPARATOR, " ");
+          if (sep.length() != 1) {
+            throw new IllegalArgumentException(HighlightParams.MULTI_VALUED_SEPARATOR + " must be exactly one character.");
+          }
+          return sep.charAt(0);
+        }
       };
       
       Map<String,String[]> snippets = highlighter.highlightFields(fieldNames, query, searcher, docIDs, maxPassages);
diff --git a/solr/core/src/java/org/apache/solr/logging/LogWatcher.java b/solr/core/src/java/org/apache/solr/logging/LogWatcher.java
index 16b5276..d2a23bd 100644
--- a/solr/core/src/java/org/apache/solr/logging/LogWatcher.java
+++ b/solr/core/src/java/org/apache/solr/logging/LogWatcher.java
@@ -137,7 +137,7 @@
       v.size = config.getInt(ConfigSolr.CfgProp.SOLR_LOGGING_WATCHER_SIZE, 50);
       v.threshold = config.get(ConfigSolr.CfgProp.SOLR_LOGGING_WATCHER_THRESHOLD, null);
       if (v.size > 0) {
-        log.info("Registering Log Listener");
+        log.info("Registering Log Listener [{}]", logWatcher.getName());
         logWatcher.registerListener(v);
       }
     }
diff --git a/solr/core/src/java/org/apache/solr/request/SolrQueryRequest.java b/solr/core/src/java/org/apache/solr/request/SolrQueryRequest.java
index 73aba30..d045f43 100644
--- a/solr/core/src/java/org/apache/solr/request/SolrQueryRequest.java
+++ b/solr/core/src/java/org/apache/solr/request/SolrQueryRequest.java
@@ -73,6 +73,9 @@
 
   /** The schema snapshot from core.getLatestSchema() at request creation. */
   public IndexSchema getSchema();
+  
+  /** Replaces the current schema snapshot with the latest from the core. */
+  public void updateSchemaToLatest();
 
   /**
    * Returns a string representing all the important parameters.
diff --git a/solr/core/src/java/org/apache/solr/request/SolrQueryRequestBase.java b/solr/core/src/java/org/apache/solr/request/SolrQueryRequestBase.java
index 1f093e4..1ad75bb 100644
--- a/solr/core/src/java/org/apache/solr/request/SolrQueryRequestBase.java
+++ b/solr/core/src/java/org/apache/solr/request/SolrQueryRequestBase.java
@@ -42,8 +42,8 @@
  */
 public abstract class SolrQueryRequestBase implements SolrQueryRequest {
   protected final SolrCore core;
-  protected final IndexSchema schema;
   protected final SolrParams origParams;
+  protected volatile IndexSchema schema;
   protected SolrParams params;
   protected Map<Object,Object> context;
   protected Iterable<ContentStream> streams;
@@ -112,6 +112,11 @@
     return schema;
   }
 
+  @Override
+  public void updateSchemaToLatest() {
+    schema = core.getLatestSchema();
+  }
+
   /**
    * Frees resources associated with this request, this method <b>must</b>
    * be called when the object is no longer in use.
diff --git a/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java b/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java
index 0e2b494..b89445a 100755
--- a/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java
+++ b/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java
@@ -260,11 +260,9 @@
     KNOWN_TYPES.add(BCDIntField.class);
     KNOWN_TYPES.add(BCDLongField.class);
     KNOWN_TYPES.add(BCDStrField.class);
-    KNOWN_TYPES.add(ByteField.class);
     KNOWN_TYPES.add(DateField.class);
     KNOWN_TYPES.add(DoubleField.class);
     KNOWN_TYPES.add(FloatField.class);
-    KNOWN_TYPES.add(ShortField.class);
     KNOWN_TYPES.add(IntField.class);
     KNOWN_TYPES.add(LongField.class);
     KNOWN_TYPES.add(SortableLongField.class);
diff --git a/solr/core/src/java/org/apache/solr/schema/ByteField.java b/solr/core/src/java/org/apache/solr/schema/ByteField.java
deleted file mode 100644
index 873ce7d..0000000
--- a/solr/core/src/java/org/apache/solr/schema/ByteField.java
+++ /dev/null
@@ -1,98 +0,0 @@
-package org.apache.solr.schema;
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.valuesource.ByteFieldSource;
-import org.apache.lucene.index.GeneralField;
-import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.StorableField;
-import org.apache.lucene.search.SortField;
-
-import org.apache.solr.response.TextResponseWriter;
-import org.apache.solr.search.QParser;
-
-import java.io.IOException;
-import java.util.Map;
-
-/**
- * A numeric field that can contain 8-bit signed two's complement integer 
- * values, encoded as simple Strings.
- *
- * <p>
- * Field values will sort numerically, but Range Queries (and other features 
- * that rely on numeric ranges) will not work as expected: values will be 
- * evaluated in unicode String order, not numeric order.
- * </p>
- *
- * <ul>
- *  <li>Min Value Allowed: -128</li>
- *  <li>Max Value Allowed: 127</li>
- * </ul>
- *
- * @see Byte
- */
-public class ByteField extends PrimitiveFieldType {
-  @Override
-  protected void init(IndexSchema schema, Map<String, String> args) {
-    super.init(schema, args);
-    restrictProps(SORT_MISSING_FIRST | SORT_MISSING_LAST);
-  }
-
-  /////////////////////////////////////////////////////////////
-  @Override
-  public SortField getSortField(SchemaField field, boolean reverse) {
-    field.checkSortability();
-    return new SortField(field.name, SortField.Type.BYTE, reverse);
-  }
-
-  @Override
-  public ValueSource getValueSource(SchemaField field, QParser qparser) {
-    field.checkFieldCacheSource(qparser);
-    return new ByteFieldSource(field.name);
-  }
-
-  @Override
-  public void write(TextResponseWriter writer, String name, StorableField f) throws IOException {
-    String s = f.stringValue();
-
-    // these values may be from a legacy lucene index, which may
-    // not be properly formatted in some output formats, or may
-    // incorrectly have a zero length.
-
-    if (s.length()==0) {
-      // zero length value means someone mistakenly indexed the value
-      // instead of simply leaving it out.  Write a null value instead of a numeric.
-      writer.writeNull(name);
-      return;
-    }
-
-    try {
-      byte val = Byte.parseByte(s);
-      writer.writeInt(name, val);
-    } catch (NumberFormatException e){
-      // can't parse - write out the contents as a string so nothing is lost and
-      // clients don't get a parse error.
-      writer.writeStr(name, s, true);
-    }
-  }
-
-  @Override
-  public Byte toObject(StorableField f) {
-    return Byte.valueOf(toExternal(f));
-  }
-}
diff --git a/solr/core/src/java/org/apache/solr/schema/DateField.java b/solr/core/src/java/org/apache/solr/schema/DateField.java
index 1001b35..faf9c4f 100644
--- a/solr/core/src/java/org/apache/solr/schema/DateField.java
+++ b/solr/core/src/java/org/apache/solr/schema/DateField.java
@@ -111,7 +111,7 @@
  * @see <a href="http://www.w3.org/TR/xmlschema-2/#dateTime">XML schema part 2</a>
  * @deprecated {@link TrieDateField} is recomended for all new schemas
  */
-public class DateField extends PrimitiveFieldType {
+public class DateField extends PrimitiveFieldType implements DateValueFieldType {
 
   public static TimeZone UTC = TimeZone.getTimeZone("UTC");
 
diff --git a/solr/core/src/java/org/apache/solr/schema/DateValueFieldType.java b/solr/core/src/java/org/apache/solr/schema/DateValueFieldType.java
new file mode 100644
index 0000000..c4e7984
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/schema/DateValueFieldType.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.schema;
+
+/**
+ * Marker interface for Date-valued field types.
+ */
+public interface DateValueFieldType {
+}
diff --git a/solr/core/src/java/org/apache/solr/schema/DoubleField.java b/solr/core/src/java/org/apache/solr/schema/DoubleField.java
index 5d2968f..550adae 100644
--- a/solr/core/src/java/org/apache/solr/schema/DoubleField.java
+++ b/solr/core/src/java/org/apache/solr/schema/DoubleField.java
@@ -17,18 +17,20 @@
 
 package org.apache.solr.schema;
 
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
-import org.apache.lucene.index.GeneralField;
-import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.StorableField;
-import org.apache.lucene.search.SortField;
-import org.apache.solr.response.TextResponseWriter;
-import org.apache.solr.search.QParser;
-
 import java.io.IOException;
 import java.util.Map;
 
+import org.apache.lucene.index.StorableField;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
+import org.apache.lucene.search.FieldCache;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
+import org.apache.solr.response.TextResponseWriter;
+import org.apache.solr.search.QParser;
+
 /**
  * A legacy numeric field type that encodes "Double" values as simple Strings.
  * This class should not be used except by people with existing indexes that
@@ -43,7 +45,21 @@
  * 
  * @see TrieDoubleField
  */
-public class DoubleField extends PrimitiveFieldType {
+public class DoubleField extends PrimitiveFieldType implements DoubleValueFieldType {
+
+  private static final FieldCache.DoubleParser PARSER = new FieldCache.DoubleParser() {
+    
+    @Override
+    public TermsEnum termsEnum(Terms terms) throws IOException {
+      return terms.iterator(null);
+    }
+
+    @Override
+    public double parseDouble(BytesRef term) {
+      return Double.parseDouble(term.utf8ToString());
+    }
+  };
+
   @Override
   protected void init(IndexSchema schema, Map<String, String> args) {
     super.init(schema, args);
@@ -54,13 +70,13 @@
   @Override
   public SortField getSortField(SchemaField field, boolean reverse) {
     field.checkSortability();
-    return new SortField(field.name, SortField.Type.DOUBLE, reverse);
+    return new SortField(field.name, PARSER, reverse);
   }
 
   @Override
   public ValueSource getValueSource(SchemaField field, QParser qparser) {
     field.checkFieldCacheSource(qparser);
-    return new DoubleFieldSource(field.name);
+    return new DoubleFieldSource(field.name, PARSER);
   }
 
   @Override
diff --git a/solr/core/src/java/org/apache/solr/schema/DoubleValueFieldType.java b/solr/core/src/java/org/apache/solr/schema/DoubleValueFieldType.java
new file mode 100644
index 0000000..ff9712e
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/schema/DoubleValueFieldType.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.schema;
+
+/**
+ * Marker interface for double-valued field types.
+ */
+public interface DoubleValueFieldType extends NumericValueFieldType {
+}
diff --git a/solr/core/src/java/org/apache/solr/schema/FieldType.java b/solr/core/src/java/org/apache/solr/schema/FieldType.java
index 8acddaa..5d03d38 100644
--- a/solr/core/src/java/org/apache/solr/schema/FieldType.java
+++ b/solr/core/src/java/org/apache/solr/schema/FieldType.java
@@ -17,18 +17,6 @@
 
 package org.apache.solr.schema;
 
-import java.io.IOException;
-import java.io.Reader;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import static org.apache.lucene.analysis.util.AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM; 
-
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
@@ -67,6 +55,18 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
+import java.io.Reader;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.apache.lucene.analysis.util.AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM;
+
 /**
  * Base class for all field types used by an index schema.
  *
@@ -799,6 +799,15 @@
       namedPropertyValues.add(getPropertyName(TOKENIZED), isTokenized());
       // The BINARY property is always false
       // namedPropertyValues.add(getPropertyName(BINARY), hasProperty(BINARY));
+      if (null != getSimilarityFactory()) {
+        namedPropertyValues.add(SIMILARITY, getSimilarityFactory().getNamedPropertyValues());
+      }
+      if (null != getPostingsFormat()) {
+        namedPropertyValues.add(POSTINGS_FORMAT, getPostingsFormat());
+      }
+      if (null != getDocValuesFormat()) {
+        namedPropertyValues.add(DOC_VALUES_FORMAT, getDocValuesFormat());
+      }
     } else { // Don't show defaults
       Set<String> fieldProperties = new HashSet<String>();
       for (String propertyName : FieldProperties.propertyNames) {
@@ -826,15 +835,7 @@
         namedPropertyValues.add(MULTI_TERM_ANALYZER, getAnalyzerProperties(((TextField) this).getMultiTermAnalyzer()));
       }
     }
-    if (null != getSimilarityFactory()) {
-      namedPropertyValues.add(SIMILARITY, getSimilarityFactory().getNamedPropertyValues());
-    }
-    if (null != getPostingsFormat()) {
-      namedPropertyValues.add(POSTINGS_FORMAT, getPostingsFormat());
-    }
-    if (null != getDocValuesFormat()) {
-      namedPropertyValues.add(DOC_VALUES_FORMAT, getDocValuesFormat());
-    }
+
     return namedPropertyValues;
   }
 
diff --git a/solr/core/src/java/org/apache/solr/schema/FloatField.java b/solr/core/src/java/org/apache/solr/schema/FloatField.java
index 75cf087..7e23443 100644
--- a/solr/core/src/java/org/apache/solr/schema/FloatField.java
+++ b/solr/core/src/java/org/apache/solr/schema/FloatField.java
@@ -19,11 +19,15 @@
 
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
+import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
 import org.apache.solr.search.QParser;
 import org.apache.lucene.index.GeneralField;
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.StorableField;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.solr.response.TextResponseWriter;
 
 import java.util.Map;
@@ -42,7 +46,21 @@
  * 
  * @see TrieFloatField
  */
-public class FloatField extends PrimitiveFieldType {
+public class FloatField extends PrimitiveFieldType implements FloatValueFieldType {
+
+  private static final FieldCache.FloatParser PARSER = new FieldCache.FloatParser() {
+    
+    @Override
+    public TermsEnum termsEnum(Terms terms) throws IOException {
+      return terms.iterator(null);
+    }
+
+    @Override
+    public float parseFloat(BytesRef term) {
+      return Float.parseFloat(term.utf8ToString());
+    }
+  };
+
   @Override
   protected void init(IndexSchema schema, Map<String,String> args) {
     super.init(schema, args);
@@ -58,7 +76,7 @@
   @Override
   public ValueSource getValueSource(SchemaField field, QParser qparser) {
     field.checkFieldCacheSource(qparser);
-    return new FloatFieldSource(field.name);
+    return new FloatFieldSource(field.name, PARSER);
   }
 
   @Override
diff --git a/solr/core/src/java/org/apache/solr/schema/FloatValueFieldType.java b/solr/core/src/java/org/apache/solr/schema/FloatValueFieldType.java
new file mode 100644
index 0000000..5606caf
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/schema/FloatValueFieldType.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.schema;
+
+/**
+ * Marker interface for float-valued field types.
+ */
+public interface FloatValueFieldType extends NumericValueFieldType {
+}
diff --git a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
index ad0422d..fbd5c70 100644
--- a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
+++ b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
@@ -311,8 +311,8 @@
    */
   public SchemaField getUniqueKeyField() { return uniqueKeyField; }
 
-  private String uniqueKeyFieldName;
-  private FieldType uniqueKeyFieldType;
+  protected String uniqueKeyFieldName;
+  protected FieldType uniqueKeyFieldType;
 
   /**
    * The raw (field type encoded) value of the Unique Key field for
diff --git a/solr/core/src/java/org/apache/solr/schema/IntField.java b/solr/core/src/java/org/apache/solr/schema/IntField.java
index 3c9320c..2b14867 100644
--- a/solr/core/src/java/org/apache/solr/schema/IntField.java
+++ b/solr/core/src/java/org/apache/solr/schema/IntField.java
@@ -19,11 +19,15 @@
 
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.IntFieldSource;
+import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
 import org.apache.solr.search.QParser;
 import org.apache.lucene.index.GeneralField;
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.StorableField;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.solr.response.TextResponseWriter;
 
 import java.util.Map;
@@ -42,7 +46,21 @@
  * 
  * @see TrieIntField
  */
-public class IntField extends PrimitiveFieldType {
+public class IntField extends PrimitiveFieldType implements IntValueFieldType {
+
+  private static final FieldCache.IntParser PARSER = new FieldCache.IntParser() {
+    
+    @Override
+    public TermsEnum termsEnum(Terms terms) throws IOException {
+      return terms.iterator(null);
+    }
+
+    @Override
+    public int parseInt(BytesRef term) {
+      return Integer.parseInt(term.utf8ToString());
+    }
+  };
+
   @Override
   protected void init(IndexSchema schema, Map<String,String> args) {
     super.init(schema, args);
@@ -52,13 +70,13 @@
   @Override
   public SortField getSortField(SchemaField field,boolean reverse) {
     field.checkSortability();
-    return new SortField(field.name,SortField.Type.INT, reverse);
+    return new SortField(field.name, PARSER, reverse);
   }
 
   @Override
   public ValueSource getValueSource(SchemaField field, QParser qparser) {
     field.checkFieldCacheSource(qparser);
-    return new IntFieldSource(field.name);
+    return new IntFieldSource(field.name, PARSER);
   }
 
   @Override
diff --git a/solr/core/src/java/org/apache/solr/schema/IntValueFieldType.java b/solr/core/src/java/org/apache/solr/schema/IntValueFieldType.java
new file mode 100644
index 0000000..9cf81e1
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/schema/IntValueFieldType.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.schema;
+
+/**
+ * Marker interface for int-valued field types.
+ */
+public interface IntValueFieldType extends NumericValueFieldType {
+}
+
diff --git a/solr/core/src/java/org/apache/solr/schema/LongField.java b/solr/core/src/java/org/apache/solr/schema/LongField.java
index 9f795a7..5b18db8 100644
--- a/solr/core/src/java/org/apache/solr/schema/LongField.java
+++ b/solr/core/src/java/org/apache/solr/schema/LongField.java
@@ -22,7 +22,11 @@
 import org.apache.lucene.index.GeneralField;
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.StorableField;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
 import org.apache.solr.response.TextResponseWriter;
 import org.apache.solr.search.QParser;
 
@@ -42,7 +46,21 @@
  * 
  * @see TrieLongField
  */
-public class LongField extends PrimitiveFieldType {
+public class LongField extends PrimitiveFieldType implements LongValueFieldType {
+
+  private static final FieldCache.LongParser PARSER = new FieldCache.LongParser() {
+    
+    @Override
+    public TermsEnum termsEnum(Terms terms) throws IOException {
+      return terms.iterator(null);
+    }
+
+    @Override
+    public long parseLong(BytesRef term) {
+      return Long.parseLong(term.utf8ToString());
+    }
+  };
+
   @Override
   protected void init(IndexSchema schema, Map<String,String> args) {
     super.init(schema, args);
@@ -54,13 +72,13 @@
   @Override
   public SortField getSortField(SchemaField field,boolean reverse) {
     field.checkSortability();
-    return new SortField(field.name,SortField.Type.LONG, reverse);
+    return new SortField(field.name, PARSER, reverse);
   }
 
   @Override
   public ValueSource getValueSource(SchemaField field, QParser qparser) {
     field.checkFieldCacheSource(qparser);
-    return new LongFieldSource(field.name);
+    return new LongFieldSource(field.name, PARSER);
   }
 
   @Override
diff --git a/solr/core/src/java/org/apache/solr/schema/LongValueFieldType.java b/solr/core/src/java/org/apache/solr/schema/LongValueFieldType.java
new file mode 100644
index 0000000..55b8b51
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/schema/LongValueFieldType.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.schema;
+
+/**
+ * Marker interface for long-valued field types.
+ */
+public interface LongValueFieldType extends NumericValueFieldType {
+}
diff --git a/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java b/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
index a505f83..f3fa92c 100644
--- a/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
+++ b/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
@@ -169,6 +169,12 @@
     return addFields(Arrays.asList(newField));
   }
 
+  public class FieldExistsException extends SolrException {
+    public FieldExistsException(ErrorCode code, String msg) {
+      super(code, msg);
+    }
+  }
+  
   @Override
   public ManagedIndexSchema addFields(Collection<SchemaField> newFields) {
     ManagedIndexSchema newSchema = null;
@@ -183,7 +189,7 @@
           for (SchemaField newField : newFields) {
             if (null != newSchema.getFieldOrNull(newField.getName())) {
               String msg = "Field '" + newField.getName() + "' already exists.";
-              throw new SolrException(ErrorCode.BAD_REQUEST, msg);
+              throw new FieldExistsException(ErrorCode.BAD_REQUEST, msg);
             }
             newSchema.fields.put(newField.getName(), newField);
 
@@ -328,6 +334,8 @@
     newSchema.similarityFactory = similarityFactory;
     newSchema.isExplicitSimilarity = isExplicitSimilarity;
     newSchema.uniqueKeyField = uniqueKeyField;
+    newSchema.uniqueKeyFieldName = uniqueKeyFieldName;
+    newSchema.uniqueKeyFieldType = uniqueKeyFieldType;
 
     if (includeFieldDataStructures) {
       // These need new collections, since addFields() can add members to them
diff --git a/solr/core/src/java/org/apache/solr/schema/NumericValueFieldType.java b/solr/core/src/java/org/apache/solr/schema/NumericValueFieldType.java
new file mode 100644
index 0000000..e223829
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/schema/NumericValueFieldType.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.schema;
+
+/**
+ * Marker interface for numeric-valued field types.
+ */
+public interface NumericValueFieldType {
+}
diff --git a/solr/core/src/java/org/apache/solr/schema/ShortField.java b/solr/core/src/java/org/apache/solr/schema/ShortField.java
deleted file mode 100644
index 7845609..0000000
--- a/solr/core/src/java/org/apache/solr/schema/ShortField.java
+++ /dev/null
@@ -1,101 +0,0 @@
-package org.apache.solr.schema;
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.queries.function.ValueSource;
-import org.apache.lucene.queries.function.valuesource.ShortFieldSource;
-import org.apache.lucene.index.GeneralField;
-import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.StorableField;
-import org.apache.lucene.search.SortField;
-
-import org.apache.solr.response.TextResponseWriter;
-import org.apache.solr.search.QParser;
-
-import java.io.IOException;
-import java.util.Map;
-
-
-/**
- * A numeric field that can contain 16-bit signed two's complement integer
- * values, encoded as simple Strings.
- *
- * <p>
- * Field values will sort numerically, but Range Queries (and other features 
- * that rely on numeric ranges) will not work as expected: values will be 
- * evaluated in unicode String order, not numeric order.
- * </p>
- *
- * <ul>
- *  <li>Min Value Allowed: -32768</li>
- *  <li>Max Value Allowed: 32767</li>
- * </ul>
- *
- * @see Short
- **/
-public class ShortField extends PrimitiveFieldType {
-  @Override
-  protected void init(IndexSchema schema, Map<String, String> args) {
-    super.init(schema, args);
-    restrictProps(SORT_MISSING_FIRST | SORT_MISSING_LAST);
-  }
-
-  /////////////////////////////////////////////////////////////
-
-  @Override
-  public SortField getSortField(SchemaField field, boolean reverse) {
-    field.checkSortability();
-    return new SortField(field.name, SortField.Type.SHORT, reverse);
-  }
-
-  @Override
-  public ValueSource getValueSource(SchemaField field, QParser qparser) {
-    field.checkFieldCacheSource(qparser);
-    return new ShortFieldSource(field.name);
-  }
-
-  @Override
-  public void write(TextResponseWriter writer, String name, StorableField f) throws IOException {
-    String s = f.stringValue();
-
-    // these values may be from a legacy lucene index, which may
-    // not be properly formatted in some output formats, or may
-    // incorrectly have a zero length.
-
-    if (s.length()==0) {
-      // zero length value means someone mistakenly indexed the value
-      // instead of simply leaving it out.  Write a null value instead of a numeric.
-      writer.writeNull(name);
-      return;
-    }
-
-    try {
-      short val = Short.parseShort(s);
-      writer.writeInt(name, val);
-    } catch (NumberFormatException e){
-      // can't parse - write out the contents as a string so nothing is lost and
-      // clients don't get a parse error.
-      writer.writeStr(name, s, true);
-    }
-  }
-
-  @Override
-  public Short toObject(StorableField f) {
-    return Short.valueOf(toExternal(f));
-  }
-
-}
diff --git a/solr/core/src/java/org/apache/solr/schema/SortableDoubleField.java b/solr/core/src/java/org/apache/solr/schema/SortableDoubleField.java
index 65436f4..2f4b0a6 100644
--- a/solr/core/src/java/org/apache/solr/schema/SortableDoubleField.java
+++ b/solr/core/src/java/org/apache/solr/schema/SortableDoubleField.java
@@ -54,7 +54,7 @@
  * @deprecated use {@link DoubleField} or {@link TrieDoubleField} - will be removed in 5.x
  */
 @Deprecated
-public class SortableDoubleField extends PrimitiveFieldType {
+public class SortableDoubleField extends PrimitiveFieldType implements DoubleValueFieldType {
   @Override
   public SortField getSortField(SchemaField field,boolean reverse) {
     return getStringSort(field,reverse);
diff --git a/solr/core/src/java/org/apache/solr/schema/SortableFloatField.java b/solr/core/src/java/org/apache/solr/schema/SortableFloatField.java
index 69db761..e66e255 100644
--- a/solr/core/src/java/org/apache/solr/schema/SortableFloatField.java
+++ b/solr/core/src/java/org/apache/solr/schema/SortableFloatField.java
@@ -55,7 +55,7 @@
  * @deprecated use {@link FloatField} or {@link TrieFloatField} - will be removed in 5.x
  */
 @Deprecated
-public class SortableFloatField extends PrimitiveFieldType {
+public class SortableFloatField extends PrimitiveFieldType implements FloatValueFieldType {
   @Override
   public SortField getSortField(SchemaField field,boolean reverse) {
     return getStringSort(field,reverse);
diff --git a/solr/core/src/java/org/apache/solr/schema/SortableIntField.java b/solr/core/src/java/org/apache/solr/schema/SortableIntField.java
index cbcb913..9558573 100644
--- a/solr/core/src/java/org/apache/solr/schema/SortableIntField.java
+++ b/solr/core/src/java/org/apache/solr/schema/SortableIntField.java
@@ -55,7 +55,7 @@
  * @deprecated use {@link IntField} or {@link TrieIntField} - will be removed in 5.x
  */
 @Deprecated
-public class SortableIntField extends PrimitiveFieldType {
+public class SortableIntField extends PrimitiveFieldType implements IntValueFieldType {
   @Override
   public SortField getSortField(SchemaField field,boolean reverse) {
     return getStringSort(field,reverse);
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieDateField.java b/solr/core/src/java/org/apache/solr/schema/TrieDateField.java
index 9012e54..0a652ef 100755
--- a/solr/core/src/java/org/apache/solr/schema/TrieDateField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieDateField.java
@@ -55,7 +55,7 @@
  * @see DateField
  * @see TrieField
  */
-public class TrieDateField extends DateField {
+public class TrieDateField extends DateField implements DateValueFieldType {
 
   final TrieField wrappedField = new TrieField() {{
     type = TrieTypes.DATE;
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java b/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
index a8884b4..1f0da8a 100755
--- a/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieDoubleField.java
@@ -33,7 +33,7 @@
  * @see Double
  * @see <a href="http://java.sun.com/docs/books/jls/third_edition/html/typesValues.html#4.2.3">Java Language Specification, s4.2.3</a>
  */
-public class TrieDoubleField extends TrieField {
+public class TrieDoubleField extends TrieField implements DoubleValueFieldType {
   {
     type=TrieTypes.DOUBLE;
   }
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java b/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
index 2ea1d14..1163d72 100755
--- a/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieFloatField.java
@@ -33,7 +33,7 @@
  * @see Float
  * @see <a href="http://java.sun.com/docs/books/jls/third_edition/html/typesValues.html#4.2.3">Java Language Specification, s4.2.3</a>
  */
-public class TrieFloatField extends TrieField {
+public class TrieFloatField extends TrieField implements FloatValueFieldType {
   {
     type=TrieTypes.FLOAT;
   }
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieIntField.java b/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
index 4cc2999..e49f59a 100755
--- a/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieIntField.java
@@ -27,7 +27,7 @@
  * 
  * @see Integer
  */
-public class TrieIntField extends TrieField {
+public class TrieIntField extends TrieField implements IntValueFieldType {
   {
     type=TrieTypes.INTEGER;
   }
diff --git a/solr/core/src/java/org/apache/solr/schema/TrieLongField.java b/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
index c20f252..052e4a7 100755
--- a/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
+++ b/solr/core/src/java/org/apache/solr/schema/TrieLongField.java
@@ -27,7 +27,7 @@
  * 
  * @see Long
  */
-public class TrieLongField extends TrieField {
+public class TrieLongField extends TrieField implements LongValueFieldType {
   {
     type=TrieTypes.LONG;
   }
diff --git a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
index b253747..24e4290 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
@@ -193,7 +193,13 @@
     this.name = "Searcher@" + Integer.toHexString(hashCode()) + (name!=null ? " "+name : "");
     log.info("Opening " + this.name);
 
-    Directory dir = this.reader.directory();
+    if (directoryFactory.searchersReserveCommitPoints()) {
+      // reserve commit point for life of searcher
+      core.getDeletionPolicy().saveCommitPoint(
+          reader.getIndexCommit().getGeneration());
+    }
+    
+    Directory dir = getIndexReader().directory();
     
     this.reserveDirectory = reserveDirectory;
     this.createdDirectory = r == null;
@@ -331,12 +337,18 @@
     // super.close();
     // can't use super.close() since it just calls reader.close() and that may only be called once
     // per reader (even if incRef() was previously called).
+    
+    long cpg = reader.getIndexCommit().getGeneration();
     try {
       if (closeReader) reader.decRef();
     } catch (Throwable t) {
       SolrException.log(log, "Problem dec ref'ing reader", t);
     }
 
+    if (directoryFactory.searchersReserveCommitPoints()) {
+      core.getDeletionPolicy().releaseCommitPoint(cpg);
+    }
+
     for (SolrCache cache : cacheList) {
       cache.close();
     }
@@ -1095,41 +1107,12 @@
     DocSetCollector collector = new DocSetCollector(maxDoc()>>6, maxDoc());
 
     if (filter==null) {
-      if (query instanceof TermQuery) {
-        Term t = ((TermQuery)query).getTerm();
-        for (final AtomicReaderContext leaf : leafContexts) {
-          final AtomicReader reader = leaf.reader();
-          collector.setNextReader(leaf);
-          Fields fields = reader.fields();
-          Terms terms = fields.terms(t.field());
-          BytesRef termBytes = t.bytes();
-          
-          Bits liveDocs = reader.getLiveDocs();
-          DocsEnum docsEnum = null;
-          if (terms != null) {
-            final TermsEnum termsEnum = terms.iterator(null);
-            if (termsEnum.seekExact(termBytes, false)) {
-              docsEnum = termsEnum.docs(liveDocs, null, DocsEnum.FLAG_NONE);
-            }
-          }
-
-          if (docsEnum != null) {
-            int docid;
-            while ((docid = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
-              collector.collect(docid);
-            }
-          }
-        }
-      } else {
-        super.search(query,null,collector);
-      }
-      return collector.getDocSet();
-
+      super.search(query,null,collector);
     } else {
       Filter luceneFilter = filter.getTopFilter();
       super.search(query, luceneFilter, collector);
-      return collector.getDocSet();
     }
+    return collector.getDocSet();
   }
 
 
diff --git a/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java b/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java
index b0d7841..e426b8b 100755
--- a/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java
+++ b/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java
@@ -293,7 +293,7 @@
       }
     });
 
-    addParser("geodist", HaversineConstFunction.parser);
+    addParser("geodist", new GeoDistValueSourceParser());
 
     addParser("hsin", new ValueSourceParser() {
       @Override
@@ -309,18 +309,8 @@
         ValueSource one = fp.parseValueSource();
         ValueSource two = fp.parseValueSource();
         if (fp.hasMoreArguments()) {
-
-
-          List<ValueSource> s1 = new ArrayList<ValueSource>();
-          s1.add(one);
-          s1.add(two);
-          pv1 = new VectorValueSource(s1);
-          ValueSource x2 = fp.parseValueSource();
-          ValueSource y2 = fp.parseValueSource();
-          List<ValueSource> s2 = new ArrayList<ValueSource>();
-          s2.add(x2);
-          s2.add(y2);
-          pv2 = new VectorValueSource(s2);
+          pv1 = new VectorValueSource(Arrays.asList(one, two));//x1, y1
+          pv2 = new VectorValueSource(Arrays.asList(fp.parseValueSource(), fp.parseValueSource()));//x2, y2
         } else {
           //check to see if we have multiValue source
           if (one instanceof MultiValueSource && two instanceof MultiValueSource){
diff --git a/solr/core/src/java/org/apache/solr/search/function/distance/GeoDistValueSourceParser.java b/solr/core/src/java/org/apache/solr/search/function/distance/GeoDistValueSourceParser.java
new file mode 100644
index 0000000..789980a
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/search/function/distance/GeoDistValueSourceParser.java
@@ -0,0 +1,165 @@
+package org.apache.solr.search.function.distance;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import com.spatial4j.core.distance.DistanceUtils;
+import com.spatial4j.core.exception.InvalidShapeException;
+import com.spatial4j.core.io.ParseUtils;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.queries.function.valuesource.ConstNumberSource;
+import org.apache.lucene.queries.function.valuesource.DoubleConstValueSource;
+import org.apache.lucene.queries.function.valuesource.MultiValueSource;
+import org.apache.lucene.queries.function.valuesource.VectorValueSource;
+import org.apache.solr.common.params.SpatialParams;
+import org.apache.solr.schema.SchemaField;
+import org.apache.solr.search.FunctionQParser;
+import org.apache.solr.search.SyntaxError;
+import org.apache.solr.search.ValueSourceParser;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Parses "geodist" creating {@link HaversineConstFunction} or {@link HaversineFunction}.
+ */
+public class GeoDistValueSourceParser extends ValueSourceParser {
+
+  @Override
+  public ValueSource parse(FunctionQParser fp) throws SyntaxError {
+    // TODO: dispatch through SpatialQueryable in the future?
+    List<ValueSource> sources = fp.parseValueSourceList();
+
+    // "m" is a multi-value source, "x" is a single-value source
+    // allow (m,m) (m,x,x) (x,x,m) (x,x,x,x)
+    // if not enough points are present, "pt" will be checked first, followed by "sfield".
+
+    MultiValueSource mv1 = null;
+    MultiValueSource mv2 = null;
+
+    if (sources.size() == 0) {
+      // nothing to do now
+    } else if (sources.size() == 1) {
+      ValueSource vs = sources.get(0);
+      if (!(vs instanceof MultiValueSource)) {
+        throw new SyntaxError("geodist - invalid parameters:" + sources);
+      }
+      mv1 = (MultiValueSource)vs;
+    } else if (sources.size() == 2) {
+      ValueSource vs1 = sources.get(0);
+      ValueSource vs2 = sources.get(1);
+
+      if (vs1 instanceof MultiValueSource && vs2 instanceof MultiValueSource) {
+        mv1 = (MultiValueSource)vs1;
+        mv2 = (MultiValueSource)vs2;
+      } else {
+        mv1 = makeMV(sources, sources);
+      }
+    } else if (sources.size()==3) {
+      ValueSource vs1 = sources.get(0);
+      ValueSource vs2 = sources.get(1);
+      if (vs1 instanceof MultiValueSource) {     // (m,x,x)
+        mv1 = (MultiValueSource)vs1;
+        mv2 = makeMV(sources.subList(1, 3), sources);
+      } else {                                   // (x,x,m)
+        mv1 = makeMV(sources.subList(0, 2), sources);
+        vs1 = sources.get(2);
+        if (!(vs1 instanceof MultiValueSource)) {
+          throw new SyntaxError("geodist - invalid parameters:" + sources);
+        }
+        mv2 = (MultiValueSource)vs1;
+      }
+    } else if (sources.size()==4) {
+      mv1 = makeMV(sources.subList(0, 2), sources);
+      mv2 = makeMV(sources.subList(2, 4), sources);
+    } else if (sources.size() > 4) {
+      throw new SyntaxError("geodist - invalid parameters:" + sources);
+    }
+
+    if (mv1 == null) {
+      mv1 = parsePoint(fp);
+      mv2 = parseSfield(fp);
+    } else if (mv2 == null) {
+      mv2 = parsePoint(fp);
+      if (mv2 == null)
+        mv2 = parseSfield(fp);
+    }
+
+    if (mv1 == null || mv2 == null) {
+      throw new SyntaxError("geodist - not enough parameters:" + sources);
+    }
+
+    // We have all the parameters at this point, now check if one of the points is constant
+    double[] constants;
+    constants = getConstants(mv1);
+    MultiValueSource other = mv2;
+    if (constants == null) {
+      constants = getConstants(mv2);
+      other = mv1;
+    }
+
+    if (constants != null && other instanceof VectorValueSource) {
+      return new HaversineConstFunction(constants[0], constants[1], (VectorValueSource)other);
+    }
+
+    return new HaversineFunction(mv1, mv2, DistanceUtils.EARTH_MEAN_RADIUS_KM, true);
+  }
+
+  /** make a MultiValueSource from two non MultiValueSources */
+  private VectorValueSource makeMV(List<ValueSource> sources, List<ValueSource> orig) throws SyntaxError {
+    ValueSource vs1 = sources.get(0);
+    ValueSource vs2 = sources.get(1);
+
+    if (vs1 instanceof MultiValueSource || vs2 instanceof MultiValueSource) {
+      throw new SyntaxError("geodist - invalid parameters:" + orig);
+    }
+    return  new VectorValueSource(sources);
+  }
+
+  private MultiValueSource parsePoint(FunctionQParser fp) throws SyntaxError {
+    String pt = fp.getParam(SpatialParams.POINT);
+    if (pt == null) return null;
+    double[] point = null;
+    try {
+      point = ParseUtils.parseLatitudeLongitude(pt);
+    } catch (InvalidShapeException e) {
+      throw new SyntaxError("Bad spatial pt:" + pt);
+    }
+    return new VectorValueSource(Arrays.<ValueSource>asList(new DoubleConstValueSource(point[0]), new DoubleConstValueSource(point[1])));
+  }
+
+  private double[] getConstants(MultiValueSource vs) {
+    if (!(vs instanceof VectorValueSource)) return null;
+    List<ValueSource> sources = ((VectorValueSource)vs).getSources();
+    if (sources.get(0) instanceof ConstNumberSource && sources.get(1) instanceof ConstNumberSource) {
+      return new double[] { ((ConstNumberSource) sources.get(0)).getDouble(), ((ConstNumberSource) sources.get(1)).getDouble()};
+    }
+    return null;
+  }
+
+  private MultiValueSource parseSfield(FunctionQParser fp) throws SyntaxError {
+    String sfield = fp.getParam(SpatialParams.FIELD);
+    if (sfield == null) return null;
+    SchemaField sf = fp.getReq().getSchema().getField(sfield);
+    ValueSource vs = sf.getType().getValueSource(sf, fp);
+    if (!(vs instanceof MultiValueSource)) {
+      throw new SyntaxError("Spatial field must implement MultiValueSource:" + sf);
+    }
+    return (MultiValueSource)vs;
+  }
+
+}
diff --git a/solr/core/src/java/org/apache/solr/search/function/distance/HaversineConstFunction.java b/solr/core/src/java/org/apache/solr/search/function/distance/HaversineConstFunction.java
index b9efc02..3352720 100755
--- a/solr/core/src/java/org/apache/solr/search/function/distance/HaversineConstFunction.java
+++ b/solr/core/src/java/org/apache/solr/search/function/distance/HaversineConstFunction.java
@@ -16,27 +16,15 @@
  * limitations under the License.
  */
 
+import com.spatial4j.core.distance.DistanceUtils;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
-import org.apache.lucene.queries.function.valuesource.ConstNumberSource;
-import org.apache.lucene.queries.function.valuesource.DoubleConstValueSource;
-import org.apache.lucene.queries.function.valuesource.MultiValueSource;
 import org.apache.lucene.queries.function.valuesource.VectorValueSource;
 import org.apache.lucene.search.IndexSearcher;
-import com.spatial4j.core.io.ParseUtils;
-import com.spatial4j.core.distance.DistanceUtils;
-import com.spatial4j.core.exception.InvalidShapeException;
-import org.apache.solr.common.params.SpatialParams;
-import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.FunctionQParser;
-import org.apache.solr.search.SyntaxError;
-import org.apache.solr.search.ValueSourceParser;
 
 import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
 import java.util.Map;
 
 import static com.spatial4j.core.distance.DistanceUtils.DEGREES_TO_RADIANS;
@@ -46,135 +34,6 @@
  */
 public class HaversineConstFunction extends ValueSource {
 
-  public static ValueSourceParser parser = new ValueSourceParser() {
-    @Override
-    public ValueSource parse(FunctionQParser fp) throws SyntaxError
-    {
-      // TODO: dispatch through SpatialQueryable in the future?
-      List<ValueSource> sources = fp.parseValueSourceList();
-
-      // "m" is a multi-value source, "x" is a single-value source
-      // allow (m,m) (m,x,x) (x,x,m) (x,x,x,x)
-      // if not enough points are present, "pt" will be checked first, followed by "sfield".      
-
-      MultiValueSource mv1 = null;
-      MultiValueSource mv2 = null;
-
-      if (sources.size() == 0) {
-        // nothing to do now
-      } else if (sources.size() == 1) {
-        ValueSource vs = sources.get(0);
-        if (!(vs instanceof MultiValueSource)) {
-          throw new SyntaxError("geodist - invalid parameters:" + sources);
-        }
-        mv1 = (MultiValueSource)vs;
-      } else if (sources.size() == 2) {
-        ValueSource vs1 = sources.get(0);
-        ValueSource vs2 = sources.get(1);
-
-        if (vs1 instanceof MultiValueSource && vs2 instanceof MultiValueSource) {
-          mv1 = (MultiValueSource)vs1;
-          mv2 = (MultiValueSource)vs2;
-        } else {
-          mv1 = makeMV(sources, sources);
-        }
-      } else if (sources.size()==3) {
-        ValueSource vs1 = sources.get(0);
-        ValueSource vs2 = sources.get(1);
-        if (vs1 instanceof MultiValueSource) {     // (m,x,x)
-          mv1 = (MultiValueSource)vs1;
-          mv2 = makeMV(sources.subList(1,3), sources);
-        } else {                                   // (x,x,m)
-          mv1 = makeMV(sources.subList(0,2), sources);
-          vs1 = sources.get(2);
-          if (!(vs1 instanceof MultiValueSource)) {
-            throw new SyntaxError("geodist - invalid parameters:" + sources);
-          }
-          mv2 = (MultiValueSource)vs1;
-        }
-      } else if (sources.size()==4) {
-        mv1 = makeMV(sources.subList(0,2), sources);
-        mv2 = makeMV(sources.subList(2,4), sources);
-      } else if (sources.size() > 4) {
-        throw new SyntaxError("geodist - invalid parameters:" + sources);
-      }
-
-      if (mv1 == null) {
-        mv1 = parsePoint(fp);
-        mv2 = parseSfield(fp);
-      } else if (mv2 == null) {
-        mv2 = parsePoint(fp);
-        if (mv2 == null)
-          mv2 = parseSfield(fp);
-      }
-
-      if (mv1 == null || mv2 == null) {
-        throw new SyntaxError("geodist - not enough parameters:" + sources);
-      }
-
-      // We have all the parameters at this point, now check if one of the points is constant
-      double[] constants;
-      constants = getConstants(mv1);
-      MultiValueSource other = mv2;
-      if (constants == null) {
-        constants = getConstants(mv2);
-        other = mv1;
-      }
-
-      if (constants != null && other instanceof VectorValueSource) {
-        return new HaversineConstFunction(constants[0], constants[1], (VectorValueSource)other);
-      }      
-
-      return new HaversineFunction(mv1, mv2, DistanceUtils.EARTH_MEAN_RADIUS_KM, true);
-    }
-  };
-
-  /** make a MultiValueSource from two non MultiValueSources */
-  private static VectorValueSource makeMV(List<ValueSource> sources, List<ValueSource> orig) throws SyntaxError {
-    ValueSource vs1 = sources.get(0);
-    ValueSource vs2 = sources.get(1);
-
-    if (vs1 instanceof MultiValueSource || vs2 instanceof MultiValueSource) {
-      throw new SyntaxError("geodist - invalid parameters:" + orig);
-    }
-    return  new VectorValueSource(sources);
-  }
-
-  private static MultiValueSource parsePoint(FunctionQParser fp) throws SyntaxError {
-    String pt = fp.getParam(SpatialParams.POINT);
-    if (pt == null) return null;
-    double[] point = null;
-    try {
-      point = ParseUtils.parseLatitudeLongitude(pt);
-    } catch (InvalidShapeException e) {
-      throw new SyntaxError("Bad spatial pt:" + pt);
-    }
-    return new VectorValueSource(Arrays.<ValueSource>asList(new DoubleConstValueSource(point[0]),new DoubleConstValueSource(point[1])));
-  }
-
-  private static double[] getConstants(MultiValueSource vs) {
-    if (!(vs instanceof VectorValueSource)) return null;
-    List<ValueSource> sources = ((VectorValueSource)vs).getSources();
-    if (sources.get(0) instanceof ConstNumberSource && sources.get(1) instanceof ConstNumberSource) {
-      return new double[] { ((ConstNumberSource) sources.get(0)).getDouble(), ((ConstNumberSource) sources.get(1)).getDouble()};
-    }
-    return null;
-  }
-
-  private static MultiValueSource parseSfield(FunctionQParser fp) throws SyntaxError {
-    String sfield = fp.getParam(SpatialParams.FIELD);
-    if (sfield == null) return null;
-    SchemaField sf = fp.getReq().getSchema().getField(sfield);
-    ValueSource vs = sf.getType().getValueSource(sf, fp);
-    if (!(vs instanceof MultiValueSource)) {
-      throw new SyntaxError("Spatial field must implement MultiValueSource:" + sf);
-    }
-    return (MultiValueSource)vs;
-  }
-
-
-  //////////////////////////////////////////////////////////////////////////////////////
-
   private final double latCenter;
   private final double lonCenter;
   private final VectorValueSource p2;  // lat+lon, just saved for display/debugging
@@ -184,7 +43,6 @@
   private final double latCenterRad_cos; // cos(latCenter)
   private static final double EARTH_MEAN_DIAMETER = DistanceUtils.EARTH_MEAN_RADIUS_KM * 2;
 
-
   public HaversineConstFunction(double latCenter, double lonCenter, VectorValueSource vs) {
     this.latCenter = latCenter;
     this.lonCenter = lonCenter;
@@ -257,4 +115,5 @@
   public String description() {
     return name() + '(' + p2 + ',' + latCenter + ',' + lonCenter + ')';
   }
+
 }
diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/SearchGroupShardResponseProcessor.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/SearchGroupShardResponseProcessor.java
index aaab3d0..8dea1d3 100644
--- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/SearchGroupShardResponseProcessor.java
+++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/SearchGroupShardResponseProcessor.java
@@ -20,8 +20,11 @@
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.grouping.SearchGroup;
 import org.apache.lucene.util.BytesRef;
+import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.common.SolrException;
+import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.handler.component.ResponseBuilder;
 import org.apache.solr.handler.component.ShardRequest;
 import org.apache.solr.handler.component.ShardResponse;
@@ -31,6 +34,8 @@
 import org.apache.solr.search.grouping.distributed.shardresultserializer.SearchGroupsResultTransformer;
 
 import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
 import java.util.*;
 
 /**
@@ -61,7 +66,38 @@
     try {
       int maxElapsedTime = 0;
       int hitCountDuringFirstPhase = 0;
+
+      NamedList<Object> shardInfo = null;
+      if (rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
+        shardInfo = new SimpleOrderedMap<Object>();
+        rb.rsp.getValues().add(ShardParams.SHARDS_INFO + ".firstPhase", shardInfo);
+      }
+
       for (ShardResponse srsp : shardRequest.responses) {
+        if (shardInfo != null) {
+          SimpleOrderedMap<Object> nl = new SimpleOrderedMap<Object>();
+
+          if (srsp.getException() != null) {
+            Throwable t = srsp.getException();
+            if (t instanceof SolrServerException) {
+              t = ((SolrServerException) t).getCause();
+            }
+            nl.add("error", t.toString());
+            StringWriter trace = new StringWriter();
+            t.printStackTrace(new PrintWriter(trace));
+            nl.add("trace", trace.toString());
+          } else {
+            nl.add("numFound", (Integer) srsp.getSolrResponse().getResponse().get("totalHitCount"));
+          }
+          if (srsp.getSolrResponse() != null) {
+            nl.add("time", srsp.getSolrResponse().getElapsedTime());
+          }
+
+          shardInfo.add(srsp.getShard(), nl);
+        }
+        if (rb.req.getParams().getBool(ShardParams.SHARDS_TOLERANT, false) && srsp.getException() != null) {
+          continue; // continue if there was an error and we're tolerant.  
+        }
         maxElapsedTime = (int) Math.max(maxElapsedTime, srsp.getSolrResponse().getElapsedTime());
         @SuppressWarnings("unchecked")
         NamedList<NamedList> firstPhaseResult = (NamedList<NamedList>) srsp.getSolrResponse().getResponse().get("firstPhase");
diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java
index 46c1650..1f41b07 100644
--- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java
+++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java
@@ -23,8 +23,11 @@
 import org.apache.lucene.search.grouping.GroupDocs;
 import org.apache.lucene.search.grouping.TopGroups;
 import org.apache.lucene.util.BytesRef;
+import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.common.SolrException;
+import org.apache.solr.common.params.ShardParams;
 import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.handler.component.ResponseBuilder;
 import org.apache.solr.handler.component.ShardDoc;
 import org.apache.solr.handler.component.ShardRequest;
@@ -35,6 +38,8 @@
 import org.apache.solr.search.grouping.distributed.shardresultserializer.TopGroupsResultTransformer;
 
 import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -76,18 +81,66 @@
     }
 
     TopGroupsResultTransformer serializer = new TopGroupsResultTransformer(rb);
+
+    NamedList<Object> shardInfo = null;
+    if (rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
+      shardInfo = new SimpleOrderedMap<Object>();
+      rb.rsp.getValues().add(ShardParams.SHARDS_INFO, shardInfo);
+    }
+
     for (ShardResponse srsp : shardRequest.responses) {
+      SimpleOrderedMap<Object> individualShardInfo = null;
+      if (shardInfo != null) {
+        individualShardInfo = new SimpleOrderedMap<Object>();
+
+        if (srsp.getException() != null) {
+          Throwable t = srsp.getException();
+          if (t instanceof SolrServerException) {
+            t = ((SolrServerException) t).getCause();
+          }
+          individualShardInfo.add("error", t.toString());
+          StringWriter trace = new StringWriter();
+          t.printStackTrace(new PrintWriter(trace));
+          individualShardInfo.add("trace", trace.toString());
+        } else {
+          // summary for successful shard response is added down below
+        }
+        if (srsp.getSolrResponse() != null) {
+          individualShardInfo.add("time", srsp.getSolrResponse().getElapsedTime());
+        }
+
+        shardInfo.add(srsp.getShard(), individualShardInfo);
+      }
+      if (rb.req.getParams().getBool(ShardParams.SHARDS_TOLERANT, false) && srsp.getException() != null) {
+        continue; // continue if there was an error and we're tolerant.  
+      }
       NamedList<NamedList> secondPhaseResult = (NamedList<NamedList>) srsp.getSolrResponse().getResponse().get("secondPhase");
       Map<String, ?> result = serializer.transformToNative(secondPhaseResult, groupSort, sortWithinGroup, srsp.getShard());
+      int numFound = 0;
+      float maxScore = Float.NaN;
       for (String field : commandTopGroups.keySet()) {
         TopGroups<BytesRef> topGroups = (TopGroups<BytesRef>) result.get(field);
         if (topGroups == null) {
           continue;
         }
+        if (individualShardInfo != null) { // keep track of this when shards.info=true
+          numFound += topGroups.totalHitCount;
+          if (Float.isNaN(maxScore) || topGroups.maxScore > maxScore) maxScore = topGroups.maxScore;
+        }
         commandTopGroups.get(field).add(topGroups);
       }
       for (String query : queries) {
-        commandTopDocs.get(query).add((QueryCommandResult) result.get(query));
+        QueryCommandResult queryCommandResult = (QueryCommandResult) result.get(query);
+        if (individualShardInfo != null) { // keep track of this when shards.info=true
+          numFound += queryCommandResult.getMatches();
+          float thisMax = queryCommandResult.getTopDocs().getMaxScore();
+          if (Float.isNaN(maxScore) || thisMax > maxScore) maxScore = thisMax;
+        }
+        commandTopDocs.get(query).add(queryCommandResult);
+      }
+      if (individualShardInfo != null) { // when shards.info=true
+        individualShardInfo.add("numFound", numFound);
+        individualShardInfo.add("maxScore", maxScore);
       }
     }
     try {
diff --git a/solr/core/src/java/org/apache/solr/search/similarities/SweetSpotSimilarityFactory.java b/solr/core/src/java/org/apache/solr/search/similarities/SweetSpotSimilarityFactory.java
index 6a6c582..7094e6a 100644
--- a/solr/core/src/java/org/apache/solr/search/similarities/SweetSpotSimilarityFactory.java
+++ b/solr/core/src/java/org/apache/solr/search/similarities/SweetSpotSimilarityFactory.java
@@ -180,7 +180,7 @@
   private static final class HyperbolicSweetSpotSimilarity 
     extends SweetSpotSimilarity {
     @Override
-    public float tf(int freq) {
+    public float tf(float freq) {
       return hyperbolicTf(freq);
     }
   };
diff --git a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
index 2be1fe2..72067fd 100644
--- a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
+++ b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
@@ -17,35 +17,6 @@
 
 package org.apache.solr.servlet;
 
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
-import java.net.HttpURLConnection;
-import java.net.URL;
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Enumeration;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.WeakHashMap;
-
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
 import org.apache.commons.io.IOUtils;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
@@ -81,6 +52,34 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.WeakHashMap;
+
 /**
  * This filter looks at the incoming URL maps them to handlers defined in solrconfig.xml
  *
@@ -116,12 +115,11 @@
   {
     log.info("SolrDispatchFilter.init()");
 
-    CoreContainer.Initializer init = createInitializer();
     try {
       // web.xml configuration
       this.pathPrefix = config.getInitParameter( "path-prefix" );
 
-      this.cores = init.initialize();
+      this.cores = createCoreContainer();
       log.info("user.dir=" + System.getProperty("user.dir"));
     }
     catch( Throwable t ) {
@@ -132,15 +130,20 @@
 
     log.info("SolrDispatchFilter.init() done");
   }
+
+  /**
+   * Override this to change CoreContainer initialization
+   * @return a CoreContainer to hold this server's cores
+   */
+  protected CoreContainer createCoreContainer() {
+    CoreContainer cores = new CoreContainer();
+    cores.load();
+    return cores;
+  }
   
   public CoreContainer getCores() {
     return cores;
   }
-
-  /** Method to override to change how CoreContainer initialization is performed. */
-  protected CoreContainer.Initializer createInitializer() {
-    return new CoreContainer.Initializer();
-  }
   
   @Override
   public void destroy() {
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BlockCache.java b/solr/core/src/java/org/apache/solr/store/blockcache/BlockCache.java
new file mode 100644
index 0000000..a6cdf64
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/blockcache/BlockCache.java
@@ -0,0 +1,199 @@
+package org.apache.solr.store.blockcache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.nio.ByteBuffer;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap;
+import com.googlecode.concurrentlinkedhashmap.EvictionListener;
+
+public class BlockCache {
+  
+  public static final int _128M = 134217728;
+  public static final int _32K = 32768;
+  private final ConcurrentMap<BlockCacheKey,BlockCacheLocation> cache;
+  private final ByteBuffer[] banks;
+  private final BlockLocks[] locks;
+  private final AtomicInteger[] lockCounters;
+  private final int blockSize;
+  private final int numberOfBlocksPerBank;
+  private final int maxEntries;
+  private final Metrics metrics;
+  
+  public BlockCache(Metrics metrics, boolean directAllocation, long totalMemory) {
+    this(metrics, directAllocation, totalMemory, _128M);
+  }
+  
+  public BlockCache(Metrics metrics, boolean directAllocation,
+      long totalMemory, int slabSize) {
+    this(metrics, directAllocation, totalMemory, slabSize, _32K);
+  }
+  
+  public BlockCache(Metrics metrics, boolean directAllocation,
+      long totalMemory, int slabSize, int blockSize) {
+    this.metrics = metrics;
+    numberOfBlocksPerBank = slabSize / blockSize;
+    int numberOfBanks = (int) (totalMemory / slabSize);
+    
+    banks = new ByteBuffer[numberOfBanks];
+    locks = new BlockLocks[numberOfBanks];
+    lockCounters = new AtomicInteger[numberOfBanks];
+    maxEntries = (numberOfBlocksPerBank * numberOfBanks) - 1;
+    for (int i = 0; i < numberOfBanks; i++) {
+      if (directAllocation) {
+        banks[i] = ByteBuffer.allocateDirect(numberOfBlocksPerBank * blockSize);
+      } else {
+        banks[i] = ByteBuffer.allocate(numberOfBlocksPerBank * blockSize);
+      }
+      locks[i] = new BlockLocks(numberOfBlocksPerBank);
+      lockCounters[i] = new AtomicInteger();
+    }
+    
+    EvictionListener<BlockCacheKey,BlockCacheLocation> listener = new EvictionListener<BlockCacheKey,BlockCacheLocation>() {
+      @Override
+      public void onEviction(BlockCacheKey key, BlockCacheLocation location) {
+        releaseLocation(location);
+      }
+    };
+    cache = new ConcurrentLinkedHashMap.Builder<BlockCacheKey,BlockCacheLocation>()
+        .maximumWeightedCapacity(maxEntries).listener(listener).build();
+    this.blockSize = blockSize;
+  }
+  
+  private void releaseLocation(BlockCacheLocation location) {
+    if (location == null) {
+      return;
+    }
+    int bankId = location.getBankId();
+    int block = location.getBlock();
+    location.setRemoved(true);
+    locks[bankId].clear(block);
+    lockCounters[bankId].decrementAndGet();
+    metrics.blockCacheEviction.incrementAndGet();
+    metrics.blockCacheSize.decrementAndGet();
+  }
+  
+  public boolean store(BlockCacheKey blockCacheKey, int blockOffset,
+      byte[] data, int offset, int length) {
+    if (length + blockOffset > blockSize) {
+      throw new RuntimeException("Buffer size exceeded, expecting max ["
+          + blockSize + "] got length [" + length + "] with blockOffset ["
+          + blockOffset + "]");
+    }
+    BlockCacheLocation location = cache.get(blockCacheKey);
+    boolean newLocation = false;
+    if (location == null) {
+      newLocation = true;
+      location = new BlockCacheLocation();
+      if (!findEmptyLocation(location)) {
+        return false;
+      }
+    }
+    if (location.isRemoved()) {
+      return false;
+    }
+    int bankId = location.getBankId();
+    int bankOffset = location.getBlock() * blockSize;
+    ByteBuffer bank = getBank(bankId);
+    bank.position(bankOffset + blockOffset);
+    bank.put(data, offset, length);
+    if (newLocation) {
+      releaseLocation(cache.put(blockCacheKey.clone(), location));
+      metrics.blockCacheSize.incrementAndGet();
+    }
+    return true;
+  }
+  
+  public boolean fetch(BlockCacheKey blockCacheKey, byte[] buffer,
+      int blockOffset, int off, int length) {
+    BlockCacheLocation location = cache.get(blockCacheKey);
+    if (location == null) {
+      return false;
+    }
+    if (location.isRemoved()) {
+      return false;
+    }
+    int bankId = location.getBankId();
+    int offset = location.getBlock() * blockSize;
+    location.touch();
+    ByteBuffer bank = getBank(bankId);
+    bank.position(offset + blockOffset);
+    bank.get(buffer, off, length);
+    return true;
+  }
+  
+  public boolean fetch(BlockCacheKey blockCacheKey, byte[] buffer) {
+    checkLength(buffer);
+    return fetch(blockCacheKey, buffer, 0, 0, blockSize);
+  }
+  
+  private boolean findEmptyLocation(BlockCacheLocation location) {
+    // This is a tight loop that will try and find a location to
+    // place the block before giving up
+    for (int j = 0; j < 10; j++) {
+      OUTER: for (int bankId = 0; bankId < banks.length; bankId++) {
+        AtomicInteger bitSetCounter = lockCounters[bankId];
+        BlockLocks bitSet = locks[bankId];
+        if (bitSetCounter.get() == numberOfBlocksPerBank) {
+          // if bitset is full
+          continue OUTER;
+        }
+        // this check needs to spin, if a lock was attempted but not obtained
+        // the rest of the bank should not be skipped
+        int bit = bitSet.nextClearBit(0);
+        INNER: while (bit != -1) {
+          if (bit >= numberOfBlocksPerBank) {
+            // bit set is full
+            continue OUTER;
+          }
+          if (!bitSet.set(bit)) {
+            // lock was not obtained
+            // this restarts at 0 because another block could have been unlocked
+            // while this was executing
+            bit = bitSet.nextClearBit(0);
+            continue INNER;
+          } else {
+            // lock obtained
+            location.setBankId(bankId);
+            location.setBlock(bit);
+            bitSetCounter.incrementAndGet();
+            return true;
+          }
+        }
+      }
+    }
+    return false;
+  }
+  
+  private void checkLength(byte[] buffer) {
+    if (buffer.length != blockSize) {
+      throw new RuntimeException("Buffer wrong size, expecting [" + blockSize
+          + "] got [" + buffer.length + "]");
+    }
+  }
+  
+  private ByteBuffer getBank(int bankId) {
+    return banks[bankId].duplicate();
+  }
+  
+  public int getSize() {
+    return cache.size();
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BlockCacheKey.java b/solr/core/src/java/org/apache/solr/store/blockcache/BlockCacheKey.java
new file mode 100644
index 0000000..d0daefe
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/blockcache/BlockCacheKey.java
@@ -0,0 +1,69 @@
+package org.apache.solr.store.blockcache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class BlockCacheKey implements Cloneable {
+  
+  private long block;
+  private int file;
+  
+  public long getBlock() {
+    return block;
+  }
+  
+  public int getFile() {
+    return file;
+  }
+  
+  public void setBlock(long block) {
+    this.block = block;
+  }
+  
+  public void setFile(int file) {
+    this.file = file;
+  }
+  
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + (int) (block ^ (block >>> 32));
+    result = prime * result + file;
+    return result;
+  }
+  
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) return true;
+    if (obj == null) return false;
+    if (getClass() != obj.getClass()) return false;
+    BlockCacheKey other = (BlockCacheKey) obj;
+    if (block != other.block) return false;
+    if (file != other.file) return false;
+    return true;
+  }
+  
+  @Override
+  public BlockCacheKey clone() {
+    try {
+      return (BlockCacheKey) super.clone();
+    } catch (CloneNotSupportedException e) {
+      throw new RuntimeException(e);
+    }
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BlockCacheLocation.java b/solr/core/src/java/org/apache/solr/store/blockcache/BlockCacheLocation.java
new file mode 100644
index 0000000..968628f
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/blockcache/BlockCacheLocation.java
@@ -0,0 +1,67 @@
+package org.apache.solr.store.blockcache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+public class BlockCacheLocation {
+  
+  private int block;
+  private int bankId;
+  private long lastAccess = System.currentTimeMillis();
+  private long accesses;
+  private AtomicBoolean removed = new AtomicBoolean(false);
+  
+  public void setBlock(int block) {
+    this.block = block;
+  }
+  
+  public void setBankId(int bankId) {
+    this.bankId = bankId;
+  }
+  
+  public int getBlock() {
+    return block;
+  }
+  
+  public int getBankId() {
+    return bankId;
+  }
+  
+  public void touch() {
+    lastAccess = System.currentTimeMillis();
+    accesses++;
+  }
+  
+  public long getLastAccess() {
+    return lastAccess;
+  }
+  
+  public long getNumberOfAccesses() {
+    return accesses;
+  }
+  
+  public boolean isRemoved() {
+    return removed.get();
+  }
+  
+  public void setRemoved(boolean removed) {
+    this.removed.set(removed);
+  }
+  
+}
\ No newline at end of file
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectory.java b/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectory.java
new file mode 100644
index 0000000..d199002
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectory.java
@@ -0,0 +1,385 @@
+package org.apache.solr.store.blockcache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Set;
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.Lock;
+import org.apache.lucene.store.LockFactory;
+import org.apache.solr.store.hdfs.HdfsDirectory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class BlockDirectory extends Directory {
+  public static Logger LOG = LoggerFactory.getLogger(BlockDirectory.class);
+  
+  public static final long BLOCK_SHIFT = 13; // 2^13 = 8,192 bytes per block
+  public static final long BLOCK_MOD = 0x1FFF;
+  public static final int BLOCK_SIZE = 1 << BLOCK_SHIFT;
+  
+  public static long getBlock(long pos) {
+    return pos >>> BLOCK_SHIFT;
+  }
+  
+  public static long getPosition(long pos) {
+    return pos & BLOCK_MOD;
+  }
+  
+  public static long getRealPosition(long block, long positionInBlock) {
+    return (block << BLOCK_SHIFT) + positionInBlock;
+  }
+  
+  public static Cache NO_CACHE = new Cache() {
+    
+    @Override
+    public void update(String name, long blockId, int blockOffset,
+        byte[] buffer, int offset, int length) {}
+    
+    @Override
+    public boolean fetch(String name, long blockId, int blockOffset, byte[] b,
+        int off, int lengthToReadInBlock) {
+      return false;
+    }
+    
+    @Override
+    public void delete(String name) {
+      
+    }
+    
+    @Override
+    public long size() {
+      return 0;
+    }
+    
+    @Override
+    public void renameCacheFile(String source, String dest) {}
+  };
+  
+  private Directory directory;
+  private int blockSize;
+  private String dirName;
+  private Cache cache;
+  private Set<String> blockCacheFileTypes;
+  private final boolean blockCacheReadEnabled;
+  private final boolean blockCacheWriteEnabled;
+  
+  public BlockDirectory(String dirName, Directory directory, Cache cache,
+      Set<String> blockCacheFileTypes, boolean blockCacheReadEnabled,
+      boolean blockCacheWriteEnabled) throws IOException {
+    this.dirName = dirName;
+    this.directory = directory;
+    blockSize = BLOCK_SIZE;
+    this.cache = cache;
+    if (blockCacheFileTypes == null || blockCacheFileTypes.isEmpty()) {
+      this.blockCacheFileTypes = null;
+    } else {
+      this.blockCacheFileTypes = blockCacheFileTypes;
+    }
+    this.blockCacheReadEnabled = blockCacheReadEnabled;
+    if (!blockCacheReadEnabled) {
+      LOG.info("Block cache on read is disabled");
+    }
+    this.blockCacheWriteEnabled = blockCacheWriteEnabled;
+    if (!blockCacheWriteEnabled) {
+      LOG.info("Block cache on write is disabled");
+    }
+    if (directory.getLockFactory() != null) {
+      setLockFactory(directory.getLockFactory());
+    }
+  }
+  
+  private IndexInput openInput(String name, int bufferSize, IOContext context)
+      throws IOException {
+    final IndexInput source = directory.openInput(name, context);
+    if (useReadCache(name, context)) {
+      return new CachedIndexInput(source, blockSize, name,
+          getFileCacheName(name), cache, bufferSize);
+    }
+    return source;
+  }
+  
+  private boolean isCachableFile(String name) {
+    for (String ext : blockCacheFileTypes) {
+      if (name.endsWith(ext)) {
+        return true;
+      }
+    }
+    return false;
+  }
+  
+  @Override
+  public IndexInput openInput(final String name, IOContext context)
+      throws IOException {
+    return openInput(name, blockSize, context);
+  }
+  
+  static class CachedIndexInput extends CustomBufferedIndexInput {
+    
+    private IndexInput _source;
+    private int _blockSize;
+    private long _fileLength;
+    private String _cacheName;
+    private Cache _cache;
+    
+    public CachedIndexInput(IndexInput source, int blockSize, String name,
+        String cacheName, Cache cache, int bufferSize) {
+      super(name, bufferSize);
+      _source = source;
+      _blockSize = blockSize;
+      _fileLength = source.length();
+      _cacheName = cacheName;
+      _cache = cache;
+    }
+    
+    @Override
+    public IndexInput clone() {
+      CachedIndexInput clone = (CachedIndexInput) super.clone();
+      clone._source = (IndexInput) _source.clone();
+      return clone;
+    }
+    
+    @Override
+    public long length() {
+      return _source.length();
+    }
+    
+    @Override
+    protected void seekInternal(long pos) throws IOException {}
+    
+    @Override
+    protected void readInternal(byte[] b, int off, int len) throws IOException {
+      long position = getFilePointer();
+      while (len > 0) {
+        int length = fetchBlock(position, b, off, len);
+        position += length;
+        len -= length;
+        off += length;
+      }
+    }
+    
+    private int fetchBlock(long position, byte[] b, int off, int len)
+        throws IOException {
+      // read whole block into cache and then provide needed data
+      long blockId = getBlock(position);
+      int blockOffset = (int) getPosition(position);
+      int lengthToReadInBlock = Math.min(len, _blockSize - blockOffset);
+      if (checkCache(blockId, blockOffset, b, off, lengthToReadInBlock)) {
+        return lengthToReadInBlock;
+      } else {
+        readIntoCacheAndResult(blockId, blockOffset, b, off,
+            lengthToReadInBlock);
+      }
+      return lengthToReadInBlock;
+    }
+    
+    private void readIntoCacheAndResult(long blockId, int blockOffset,
+        byte[] b, int off, int lengthToReadInBlock) throws IOException {
+      long position = getRealPosition(blockId, 0);
+      int length = (int) Math.min(_blockSize, _fileLength - position);
+      _source.seek(position);
+      
+      byte[] buf = BufferStore.takeBuffer(_blockSize);
+      _source.readBytes(buf, 0, length);
+      System.arraycopy(buf, blockOffset, b, off, lengthToReadInBlock);
+      _cache.update(_cacheName, blockId, 0, buf, 0, _blockSize);
+      BufferStore.putBuffer(buf);
+    }
+    
+    private boolean checkCache(long blockId, int blockOffset, byte[] b,
+        int off, int lengthToReadInBlock) {
+      return _cache.fetch(_cacheName, blockId, blockOffset, b, off,
+          lengthToReadInBlock);
+    }
+    
+    @Override
+    protected void closeInternal() throws IOException {
+      _source.close();
+    }
+  }
+  
+  @Override
+  public void close() throws IOException {
+    try {
+      String[] files = listAll();
+      
+      for (String file : files) {
+        cache.delete(getFileCacheName(file));
+      }
+      
+    } catch (FileNotFoundException e) {
+      // the local file system folder may be gone
+    } finally {
+      directory.close();
+    }
+  }
+  
+  String getFileCacheName(String name) throws IOException {
+    return getFileCacheLocation(name) + ":" + getFileModified(name);
+  }
+  
+  private long getFileModified(String name) throws IOException {
+    if (directory instanceof FSDirectory) {
+      File directory = ((FSDirectory) this.directory).getDirectory();
+      File file = new File(directory, name);
+      if (!file.exists()) {
+        throw new FileNotFoundException("File [" + name + "] not found");
+      }
+      return file.lastModified();
+    } else if (directory instanceof HdfsDirectory) {
+      return ((HdfsDirectory) directory).fileModified(name);
+    } else {
+      throw new RuntimeException("Not supported");
+    }
+  }
+  
+  public void clearLock(String name) throws IOException {
+    directory.clearLock(name);
+  }
+  
+  String getFileCacheLocation(String name) {
+    return dirName + "/" + name;
+  }
+  
+  @Override
+  public void copy(Directory to, String src, String dest, IOContext context)
+      throws IOException {
+    directory.copy(to, src, dest, context);
+  }
+  
+  public LockFactory getLockFactory() {
+    return directory.getLockFactory();
+  }
+  
+  public String getLockID() {
+    return directory.getLockID();
+  }
+  
+  public Lock makeLock(String name) {
+    return directory.makeLock(name);
+  }
+  
+  public void setLockFactory(LockFactory lockFactory) throws IOException {
+    directory.setLockFactory(lockFactory);
+  }
+  
+  @Override
+  public void sync(Collection<String> names) throws IOException {
+    directory.sync(names);
+  }
+  
+  // @SuppressWarnings("deprecation")
+  // public void sync(String name) throws IOException {
+  // _directory.sync(name);
+  // }
+  
+  public String toString() {
+    return directory.toString();
+  }
+  
+  /**
+   * Determine whether read caching should be used for a particular
+   * file/context.
+   */
+  boolean useReadCache(String name, IOContext context) {
+    if (!blockCacheReadEnabled) {
+      return false;
+    }
+    if (blockCacheFileTypes != null && !isCachableFile(name)) {
+      return false;
+    }
+    switch (context.context) {
+      default: {
+        return true;
+      }
+    }
+  }
+  
+  /**
+   * Determine whether write caching should be used for a particular
+   * file/context.
+   */
+  boolean useWriteCache(String name, IOContext context) {
+    if (!blockCacheWriteEnabled) {
+      return false;
+    }
+    if (blockCacheFileTypes != null && !isCachableFile(name)) {
+      return false;
+    }
+    switch (context.context) {
+      case MERGE: {
+        // we currently don't cache any merge context writes
+        return false;
+      }
+      default: {
+        return true;
+      }
+    }
+  }
+  
+  @Override
+  public IndexOutput createOutput(String name, IOContext context)
+      throws IOException {
+    IndexOutput dest = directory.createOutput(name, context);
+    if (useWriteCache(name, context)) {
+      return new CachedIndexOutput(this, dest, blockSize, name, cache,
+          blockSize);
+    }
+    return dest;
+  }
+  
+  public void deleteFile(String name) throws IOException {
+    cache.delete(getFileCacheName(name));
+    directory.deleteFile(name);
+  }
+  
+  public boolean fileExists(String name) throws IOException {
+    return directory.fileExists(name);
+  }
+  
+  public long fileLength(String name) throws IOException {
+    return directory.fileLength(name);
+  }
+  
+  // @SuppressWarnings("deprecation")
+  // public long fileModified(String name) throws IOException {
+  // return _directory.fileModified(name);
+  // }
+  
+  public String[] listAll() throws IOException {
+    return directory.listAll();
+  }
+  
+  // @SuppressWarnings("deprecation")
+  // public void touchFile(String name) throws IOException {
+  // _directory.touchFile(name);
+  // }
+  
+  public Directory getDirectory() {
+    return directory;
+  }
+  
+}
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectoryCache.java b/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectoryCache.java
new file mode 100644
index 0000000..41ca9bb
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectoryCache.java
@@ -0,0 +1,87 @@
+package org.apache.solr.store.blockcache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class BlockDirectoryCache implements Cache {
+  private BlockCache blockCache;
+  private AtomicInteger counter = new AtomicInteger();
+  private Map<String,Integer> names = new ConcurrentHashMap<String,Integer>();
+  private Metrics metrics;
+  
+  public BlockDirectoryCache(BlockCache blockCache, Metrics metrics) {
+    this.blockCache = blockCache;
+    this.metrics = metrics;
+  }
+  
+  @Override
+  public void delete(String name) {
+    names.remove(name);
+  }
+  
+  @Override
+  public void update(String name, long blockId, int blockOffset, byte[] buffer,
+      int offset, int length) {
+    Integer file = names.get(name);
+    if (file == null) {
+      file = counter.incrementAndGet();
+      names.put(name, file);
+    }
+    BlockCacheKey blockCacheKey = new BlockCacheKey();
+    blockCacheKey.setBlock(blockId);
+    blockCacheKey.setFile(file);
+    blockCache.store(blockCacheKey, blockOffset, buffer, offset, length);
+  }
+  
+  @Override
+  public boolean fetch(String name, long blockId, int blockOffset, byte[] b,
+      int off, int lengthToReadInBlock) {
+    Integer file = names.get(name);
+    if (file == null) {
+      return false;
+    }
+    BlockCacheKey blockCacheKey = new BlockCacheKey();
+    blockCacheKey.setBlock(blockId);
+    blockCacheKey.setFile(file);
+    boolean fetch = blockCache.fetch(blockCacheKey, b, blockOffset, off,
+        lengthToReadInBlock);
+    if (fetch) {
+      metrics.blockCacheHit.incrementAndGet();
+    } else {
+      metrics.blockCacheMiss.incrementAndGet();
+    }
+    return fetch;
+  }
+  
+  @Override
+  public long size() {
+    return blockCache.getSize();
+  }
+  
+  @Override
+  public void renameCacheFile(String source, String dest) {
+    Integer file = names.remove(source);
+    // possible if the file is empty
+    if (file != null) {
+      names.put(dest, file);
+    }
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BlockLocks.java b/solr/core/src/java/org/apache/solr/store/blockcache/BlockLocks.java
new file mode 100644
index 0000000..f29ac61
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/blockcache/BlockLocks.java
@@ -0,0 +1,96 @@
+package org.apache.solr.store.blockcache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.concurrent.atomic.AtomicLongArray;
+
+import org.apache.lucene.util.OpenBitSet;
+
+public class BlockLocks {
+  
+  private AtomicLongArray bits;
+  private int wlen;
+  
+  public BlockLocks(long numBits) {
+    int length = OpenBitSet.bits2words(numBits);
+    bits = new AtomicLongArray(length);
+    wlen = length;
+  }
+  
+  /**
+   * Find the next clear bit in the bit set.
+   * 
+   * @param index
+   *          index
+   * @return next next bit
+   */
+  public int nextClearBit(int index) {
+    int i = index >> 6;
+    if (i >= wlen) return -1;
+    int subIndex = index & 0x3f; // index within the word
+    long word = ~bits.get(i) >> subIndex; // skip all the bits to the right of
+                                          // index
+    if (word != 0) {
+      return (i << 6) + subIndex + Long.numberOfTrailingZeros(word);
+    }
+    while (++i < wlen) {
+      word = ~bits.get(i);
+      if (word != 0) {
+        return (i << 6) + Long.numberOfTrailingZeros(word);
+      }
+    }
+    return -1;
+  }
+  
+  /**
+   * Thread safe set operation that will set the bit if and only if the bit was
+   * not previously set.
+   * 
+   * @param index
+   *          the index position to set.
+   * @return returns true if the bit was set and false if it was already set.
+   */
+  public boolean set(int index) {
+    int wordNum = index >> 6; // div 64
+    int bit = index & 0x3f; // mod 64
+    long bitmask = 1L << bit;
+    long word, oword;
+    do {
+      word = bits.get(wordNum);
+      // if set another thread stole the lock
+      if ((word & bitmask) != 0) {
+        return false;
+      }
+      oword = word;
+      word |= bitmask;
+    } while (!bits.compareAndSet(wordNum, oword, word));
+    return true;
+  }
+  
+  public void clear(int index) {
+    int wordNum = index >> 6;
+    int bit = index & 0x03f;
+    long bitmask = 1L << bit;
+    long word, oword;
+    do {
+      word = bits.get(wordNum);
+      oword = word;
+      word &= ~bitmask;
+    } while (!bits.compareAndSet(wordNum, oword, word));
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BufferStore.java b/solr/core/src/java/org/apache/solr/store/blockcache/BufferStore.java
new file mode 100644
index 0000000..70ced2e
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/blockcache/BufferStore.java
@@ -0,0 +1,110 @@
+package org.apache.solr.store.blockcache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class BufferStore {
+  
+  public static Logger LOG = LoggerFactory.getLogger(BufferStore.class);
+  
+  private static BlockingQueue<byte[]> _1024 = setupBuffers(1024, 1);
+  private static BlockingQueue<byte[]> _8192 = setupBuffers(8192, 1);
+  public static AtomicLong shardBuffercacheLost = new AtomicLong();
+  public static AtomicLong shardBuffercacheAllocate1024 = new AtomicLong();
+  public static AtomicLong shardBuffercacheAllocate8192 = new AtomicLong();
+  public static AtomicLong shardBuffercacheAllocateOther = new AtomicLong();
+  
+  public static void init(int _1024Size, int _8192Size, Metrics metrics) {
+
+    LOG.info("Initializing the 1024 buffers with [{}] buffers.", _1024Size);
+    _1024 = setupBuffers(1024, _1024Size);
+    LOG.info("Initializing the 8192 buffers with [{}] buffers.", _8192Size);
+    _8192 = setupBuffers(8192, _8192Size);
+    shardBuffercacheLost = metrics.shardBuffercacheLost;
+    shardBuffercacheAllocate1024 = metrics.shardBuffercacheAllocate1024;
+    shardBuffercacheAllocate8192 = metrics.shardBuffercacheAllocate8192;
+    shardBuffercacheAllocateOther = metrics.shardBuffercacheAllocateOther;
+  }
+  
+  private static BlockingQueue<byte[]> setupBuffers(int bufferSize, int count) {
+    BlockingQueue<byte[]> queue = new ArrayBlockingQueue<byte[]>(count);
+    for (int i = 0; i < count; i++) {
+      queue.add(new byte[bufferSize]);
+    }
+    return queue;
+  }
+  
+  public static byte[] takeBuffer(int bufferSize) {
+    switch (bufferSize) {
+      case 1024:
+        return newBuffer1024(_1024.poll());
+      case 8192:
+        return newBuffer8192(_8192.poll());
+      default:
+        return newBuffer(bufferSize);
+    }
+  }
+  
+  public static void putBuffer(byte[] buffer) {
+    if (buffer == null) {
+      return;
+    }
+    int bufferSize = buffer.length;
+    switch (bufferSize) {
+      case 1024:
+        checkReturn(_1024.offer(buffer));
+        return;
+      case 8192:
+        checkReturn(_8192.offer(buffer));
+        return;
+    }
+  }
+  
+  private static void checkReturn(boolean offer) {
+    if (!offer) {
+      shardBuffercacheLost.incrementAndGet();
+    }
+  }
+  
+  private static byte[] newBuffer1024(byte[] buf) {
+    if (buf != null) {
+      return buf;
+    }
+    shardBuffercacheAllocate1024.incrementAndGet();
+    return new byte[1024];
+  }
+  
+  private static byte[] newBuffer8192(byte[] buf) {
+    if (buf != null) {
+      return buf;
+    }
+    shardBuffercacheAllocate8192.incrementAndGet();
+    return new byte[8192];
+  }
+  
+  private static byte[] newBuffer(int size) {
+    shardBuffercacheAllocateOther.incrementAndGet();
+    return new byte[size];
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/Cache.java b/solr/core/src/java/org/apache/solr/store/blockcache/Cache.java
new file mode 100644
index 0000000..7e70ad0
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/blockcache/Cache.java
@@ -0,0 +1,62 @@
+package org.apache.solr.store.blockcache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface Cache {
+  
+  /**
+   * Remove a file from the cache.
+   * 
+   * @param name
+   *          cache file name
+   */
+  void delete(String name);
+  
+  /**
+   * Update the content of the specified cache file. Creates cache entry if
+   * necessary.
+   * 
+   */
+  void update(String name, long blockId, int blockOffset, byte[] buffer,
+      int offset, int length);
+  
+  /**
+   * Fetch the specified cache file content.
+   * 
+   * @return true if cached content found, otherwise return false
+   */
+  boolean fetch(String name, long blockId, int blockOffset, byte[] b, int off,
+      int lengthToReadInBlock);
+  
+  /**
+   * Number of entries in the cache.
+   */
+  long size();
+  
+  /**
+   * Expert: Rename the specified file in the cache. Allows a file to be moved
+   * without invalidating the cache.
+   * 
+   * @param source
+   *          original name
+   * @param dest
+   *          final name
+   */
+  void renameCacheFile(String source, String dest);
+  
+}
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/CachedIndexOutput.java b/solr/core/src/java/org/apache/solr/store/blockcache/CachedIndexOutput.java
new file mode 100644
index 0000000..6e3c92e
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/blockcache/CachedIndexOutput.java
@@ -0,0 +1,91 @@
+package org.apache.solr.store.blockcache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.store.IndexOutput;
+
+/*
+ * Cache the blocks as they are written. The cache file name is the name of
+ * the file until the file is closed, at which point the cache is updated
+ * to include the last modified date (which is unknown until that point).
+ */
+public class CachedIndexOutput extends ReusedBufferedIndexOutput {
+  private final BlockDirectory directory;
+  private final IndexOutput dest;
+  private final int blockSize;
+  private final String name;
+  private final String location;
+  private final Cache cache;
+  
+  public CachedIndexOutput(BlockDirectory directory, IndexOutput dest,
+      int blockSize, String name, Cache cache, int bufferSize) {
+    super(bufferSize);
+    this.directory = directory;
+    this.dest = dest;
+    this.blockSize = blockSize;
+    this.name = name;
+    this.location = directory.getFileCacheLocation(name);
+    this.cache = cache;
+  }
+  
+  @Override
+  public void flushInternal() throws IOException {
+    dest.flush();
+  }
+  
+  @Override
+  public void closeInternal() throws IOException {
+    dest.close();
+    cache.renameCacheFile(location, directory.getFileCacheName(name));
+  }
+  
+  @Override
+  public void seekInternal(long pos) throws IOException {
+    throw new IOException("Seek not supported");
+  }
+  
+  private int writeBlock(long position, byte[] b, int offset, int length)
+      throws IOException {
+    // read whole block into cache and then provide needed data
+    long blockId = BlockDirectory.getBlock(position);
+    int blockOffset = (int) BlockDirectory.getPosition(position);
+    int lengthToWriteInBlock = Math.min(length, blockSize - blockOffset);
+    
+    // write the file and copy into the cache
+    dest.writeBytes(b, offset, lengthToWriteInBlock);
+    cache.update(location, blockId, blockOffset, b, offset,
+        lengthToWriteInBlock);
+    
+    return lengthToWriteInBlock;
+  }
+  
+  @Override
+  public void writeInternal(byte[] b, int offset, int length)
+      throws IOException {
+    long position = getBufferStart();
+    while (length > 0) {
+      int len = writeBlock(position, b, offset, length);
+      position += len;
+      length -= len;
+      offset += len;
+    }
+  }
+  
+}
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/CustomBufferedIndexInput.java b/solr/core/src/java/org/apache/solr/store/blockcache/CustomBufferedIndexInput.java
new file mode 100644
index 0000000..d8ce739
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/blockcache/CustomBufferedIndexInput.java
@@ -0,0 +1,272 @@
+package org.apache.solr.store.blockcache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.EOFException;
+import java.io.IOException;
+
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+
+public abstract class CustomBufferedIndexInput extends IndexInput {
+  
+  public static final int BUFFER_SIZE = 1024;
+  
+  private int bufferSize = BUFFER_SIZE;
+  
+  protected byte[] buffer;
+  
+  private long bufferStart = 0; // position in file of buffer
+  private int bufferLength = 0; // end of valid bytes
+  private int bufferPosition = 0; // next byte to read
+  
+  @Override
+  public byte readByte() throws IOException {
+    if (bufferPosition >= bufferLength) refill();
+    return buffer[bufferPosition++];
+  }
+  
+  public CustomBufferedIndexInput(String resourceDesc) {
+    this(resourceDesc, BUFFER_SIZE);
+  }
+  
+  public CustomBufferedIndexInput(String resourceDesc, int bufferSize) {
+    super(resourceDesc);
+    checkBufferSize(bufferSize);
+    this.bufferSize = bufferSize;
+  }
+  
+  private void checkBufferSize(int bufferSize) {
+    if (bufferSize <= 0) throw new IllegalArgumentException(
+        "bufferSize must be greater than 0 (got " + bufferSize + ")");
+  }
+  
+  @Override
+  public void readBytes(byte[] b, int offset, int len) throws IOException {
+    readBytes(b, offset, len, true);
+  }
+  
+  @Override
+  public void readBytes(byte[] b, int offset, int len, boolean useBuffer)
+      throws IOException {
+    
+    if (len <= (bufferLength - bufferPosition)) {
+      // the buffer contains enough data to satisfy this request
+      if (len > 0) // to allow b to be null if len is 0...
+      System.arraycopy(buffer, bufferPosition, b, offset, len);
+      bufferPosition += len;
+    } else {
+      // the buffer does not have enough data. First serve all we've got.
+      int available = bufferLength - bufferPosition;
+      if (available > 0) {
+        System.arraycopy(buffer, bufferPosition, b, offset, available);
+        offset += available;
+        len -= available;
+        bufferPosition += available;
+      }
+      // and now, read the remaining 'len' bytes:
+      if (useBuffer && len < bufferSize) {
+        // If the amount left to read is small enough, and
+        // we are allowed to use our buffer, do it in the usual
+        // buffered way: fill the buffer and copy from it:
+        refill();
+        if (bufferLength < len) {
+          // Throw an exception when refill() could not read len bytes:
+          System.arraycopy(buffer, 0, b, offset, bufferLength);
+          throw new IOException("read past EOF");
+        } else {
+          System.arraycopy(buffer, 0, b, offset, len);
+          bufferPosition = len;
+        }
+      } else {
+        // The amount left to read is larger than the buffer
+        // or we've been asked to not use our buffer -
+        // there's no performance reason not to read it all
+        // at once. Note that unlike the previous code of
+        // this function, there is no need to do a seek
+        // here, because there's no need to reread what we
+        // had in the buffer.
+        long after = bufferStart + bufferPosition + len;
+        if (after > length()) throw new IOException("read past EOF");
+        readInternal(b, offset, len);
+        bufferStart = after;
+        bufferPosition = 0;
+        bufferLength = 0; // trigger refill() on read
+      }
+    }
+  }
+  
+  @Override
+  public int readInt() throws IOException {
+    if (4 <= (bufferLength - bufferPosition)) {
+      return ((buffer[bufferPosition++] & 0xFF) << 24)
+          | ((buffer[bufferPosition++] & 0xFF) << 16)
+          | ((buffer[bufferPosition++] & 0xFF) << 8)
+          | (buffer[bufferPosition++] & 0xFF);
+    } else {
+      return super.readInt();
+    }
+  }
+  
+  @Override
+  public long readLong() throws IOException {
+    if (8 <= (bufferLength - bufferPosition)) {
+      final int i1 = ((buffer[bufferPosition++] & 0xff) << 24)
+          | ((buffer[bufferPosition++] & 0xff) << 16)
+          | ((buffer[bufferPosition++] & 0xff) << 8)
+          | (buffer[bufferPosition++] & 0xff);
+      final int i2 = ((buffer[bufferPosition++] & 0xff) << 24)
+          | ((buffer[bufferPosition++] & 0xff) << 16)
+          | ((buffer[bufferPosition++] & 0xff) << 8)
+          | (buffer[bufferPosition++] & 0xff);
+      return (((long) i1) << 32) | (i2 & 0xFFFFFFFFL);
+    } else {
+      return super.readLong();
+    }
+  }
+  
+  @Override
+  public int readVInt() throws IOException {
+    if (5 <= (bufferLength - bufferPosition)) {
+      byte b = buffer[bufferPosition++];
+      int i = b & 0x7F;
+      for (int shift = 7; (b & 0x80) != 0; shift += 7) {
+        b = buffer[bufferPosition++];
+        i |= (b & 0x7F) << shift;
+      }
+      return i;
+    } else {
+      return super.readVInt();
+    }
+  }
+  
+  @Override
+  public long readVLong() throws IOException {
+    if (9 <= bufferLength - bufferPosition) {
+      byte b = buffer[bufferPosition++];
+      long i = b & 0x7F;
+      for (int shift = 7; (b & 0x80) != 0; shift += 7) {
+        b = buffer[bufferPosition++];
+        i |= (b & 0x7FL) << shift;
+      }
+      return i;
+    } else {
+      return super.readVLong();
+    }
+  }
+  
+  private void refill() throws IOException {
+    long start = bufferStart + bufferPosition;
+    long end = start + bufferSize;
+    if (end > length()) // don't read past EOF
+    end = length();
+    int newLength = (int) (end - start);
+    if (newLength <= 0) throw new EOFException("read past EOF");
+    
+    if (buffer == null) {
+      buffer = BufferStore.takeBuffer(bufferSize);
+      seekInternal(bufferStart);
+    }
+    readInternal(buffer, 0, newLength);
+    bufferLength = newLength;
+    bufferStart = start;
+    bufferPosition = 0;
+  }
+  
+  @Override
+  public final void close() throws IOException {
+    closeInternal();
+    BufferStore.putBuffer(buffer);
+    buffer = null;
+  }
+  
+  protected abstract void closeInternal() throws IOException;
+  
+  /**
+   * Expert: implements buffer refill. Reads bytes from the current position in
+   * the input.
+   * 
+   * @param b
+   *          the array to read bytes into
+   * @param offset
+   *          the offset in the array to start storing bytes
+   * @param length
+   *          the number of bytes to read
+   */
+  protected abstract void readInternal(byte[] b, int offset, int length)
+      throws IOException;
+  
+  @Override
+  public long getFilePointer() {
+    return bufferStart + bufferPosition;
+  }
+  
+  @Override
+  public void seek(long pos) throws IOException {
+    if (pos >= bufferStart && pos < (bufferStart + bufferLength)) bufferPosition = (int) (pos - bufferStart); // seek
+                                                                                                              // within
+                                                                                                              // buffer
+    else {
+      bufferStart = pos;
+      bufferPosition = 0;
+      bufferLength = 0; // trigger refill() on read()
+      seekInternal(pos);
+    }
+  }
+  
+  /**
+   * Expert: implements seek. Sets current position in this file, where the next
+   * {@link #readInternal(byte[],int,int)} will occur.
+   * 
+   * @see #readInternal(byte[],int,int)
+   */
+  protected abstract void seekInternal(long pos) throws IOException;
+  
+  @Override
+  public IndexInput clone() {
+    CustomBufferedIndexInput clone = (CustomBufferedIndexInput) super.clone();
+    
+    clone.buffer = null;
+    clone.bufferLength = 0;
+    clone.bufferPosition = 0;
+    clone.bufferStart = getFilePointer();
+    
+    return clone;
+  }
+  
+  /**
+   * Flushes the in-memory bufer to the given output, copying at most
+   * <code>numBytes</code>.
+   * <p>
+   * <b>NOTE:</b> this method does not refill the buffer, however it does
+   * advance the buffer position.
+   * 
+   * @return the number of bytes actually flushed from the in-memory buffer.
+   */
+  protected int flushBuffer(IndexOutput out, long numBytes) throws IOException {
+    int toCopy = bufferLength - bufferPosition;
+    if (toCopy > numBytes) {
+      toCopy = (int) numBytes;
+    }
+    if (toCopy > 0) {
+      out.writeBytes(buffer, bufferPosition, toCopy);
+      bufferPosition += toCopy;
+    }
+    return toCopy;
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/Metrics.java b/solr/core/src/java/org/apache/solr/store/blockcache/Metrics.java
new file mode 100644
index 0000000..fce1b9d
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/blockcache/Metrics.java
@@ -0,0 +1,126 @@
+package org.apache.solr.store.blockcache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics.MetricsContext;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.metrics.MetricsUtil;
+import org.apache.hadoop.metrics.Updater;
+import org.apache.hadoop.metrics.jvm.JvmMetrics;
+
+public class Metrics implements Updater {
+  
+  public static class MethodCall {
+    public AtomicLong invokes = new AtomicLong();
+    public AtomicLong times = new AtomicLong();
+  }
+
+  public AtomicLong blockCacheHit = new AtomicLong(0);
+  public AtomicLong blockCacheMiss = new AtomicLong(0);
+  public AtomicLong blockCacheEviction = new AtomicLong(0);
+  public AtomicLong blockCacheSize = new AtomicLong(0);
+  public AtomicLong rowReads = new AtomicLong(0);
+  public AtomicLong rowWrites = new AtomicLong(0);
+  public AtomicLong recordReads = new AtomicLong(0);
+  public AtomicLong recordWrites = new AtomicLong(0);
+  public AtomicLong queriesExternal = new AtomicLong(0);
+  public AtomicLong queriesInternal = new AtomicLong(0);
+  public AtomicLong shardBuffercacheAllocate1024 = new AtomicLong(0);
+  public AtomicLong shardBuffercacheAllocate8192 = new AtomicLong(0);
+  public AtomicLong shardBuffercacheAllocateOther = new AtomicLong(0);
+  public AtomicLong shardBuffercacheLost = new AtomicLong(0);
+  public Map<String,MethodCall> methodCalls = new ConcurrentHashMap<String, MethodCall>();
+  
+  public AtomicLong tableCount = new AtomicLong(0);
+  public AtomicLong rowCount = new AtomicLong(0);
+  public AtomicLong recordCount = new AtomicLong(0);
+  public AtomicLong indexCount = new AtomicLong(0);
+  public AtomicLong indexMemoryUsage = new AtomicLong(0);
+  public AtomicLong segmentCount = new AtomicLong(0);
+
+  private MetricsRecord metricsRecord;
+  private long previous = System.nanoTime();
+
+  public static void main(String[] args) throws InterruptedException {
+    Configuration conf = new Configuration();
+    Metrics metrics = new Metrics(conf);
+    MethodCall methodCall = new MethodCall();
+    metrics.methodCalls.put("test",methodCall);
+    for (int i = 0; i < 100; i++) {
+      metrics.blockCacheHit.incrementAndGet();
+      metrics.blockCacheMiss.incrementAndGet();
+      methodCall.invokes.incrementAndGet();
+      methodCall.times.addAndGet(56000000);
+      Thread.sleep(500);
+    }
+  }
+
+  public Metrics(Configuration conf) {
+    JvmMetrics.init("blockcache", Long.toString(System.currentTimeMillis()));
+    MetricsContext metricsContext = MetricsUtil.getContext("blockcache");
+    metricsRecord = MetricsUtil.createRecord(metricsContext, "metrics");
+    metricsContext.registerUpdater(this);
+  }
+
+  @Override
+  public void doUpdates(MetricsContext context) {
+    synchronized (this) {
+      long now = System.nanoTime();
+      float seconds = (now - previous) / 1000000000.0f;
+      metricsRecord.setMetric("blockcache.hit", getPerSecond(blockCacheHit.getAndSet(0), seconds));
+      metricsRecord.setMetric("blockcache.miss", getPerSecond(blockCacheMiss.getAndSet(0), seconds));
+      metricsRecord.setMetric("blockcache.eviction", getPerSecond(blockCacheEviction.getAndSet(0), seconds));
+      metricsRecord.setMetric("blockcache.size", blockCacheSize.get());
+      metricsRecord.setMetric("row.reads", getPerSecond(rowReads.getAndSet(0), seconds));
+      metricsRecord.setMetric("row.writes", getPerSecond(rowWrites.getAndSet(0), seconds));
+      metricsRecord.setMetric("record.reads", getPerSecond(recordReads.getAndSet(0), seconds));
+      metricsRecord.setMetric("record.writes", getPerSecond(recordWrites.getAndSet(0), seconds));
+      metricsRecord.setMetric("query.external", getPerSecond(queriesExternal.getAndSet(0), seconds));
+      metricsRecord.setMetric("query.internal", getPerSecond(queriesInternal.getAndSet(0), seconds));
+      for (Entry<String,MethodCall> entry : methodCalls.entrySet()) {
+        String key = entry.getKey();
+        MethodCall value = entry.getValue();
+        long invokes = value.invokes.getAndSet(0);
+        long times = value.times.getAndSet(0);
+        
+        float avgTimes = (times / (float) invokes) / 1000000000.0f;
+        metricsRecord.setMetric("methodcalls." + key + ".count", getPerSecond(invokes, seconds));
+        metricsRecord.setMetric("methodcalls." + key + ".time", avgTimes);
+      }
+      metricsRecord.setMetric("tables", tableCount.get());
+      metricsRecord.setMetric("rows", rowCount.get());
+      metricsRecord.setMetric("records", recordCount.get());
+      metricsRecord.setMetric("index.count", indexCount.get());
+      metricsRecord.setMetric("index.memoryusage", indexMemoryUsage.get());
+      metricsRecord.setMetric("index.segments", segmentCount.get());
+      previous = now;
+    }
+    metricsRecord.update();
+  }
+
+  private float getPerSecond(long value, float seconds) {
+    return (float) (value / seconds);
+  }
+
+}
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/ReusedBufferedIndexOutput.java b/solr/core/src/java/org/apache/solr/store/blockcache/ReusedBufferedIndexOutput.java
new file mode 100644
index 0000000..52b68dd
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/blockcache/ReusedBufferedIndexOutput.java
@@ -0,0 +1,178 @@
+package org.apache.solr.store.blockcache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.store.IndexOutput;
+
+public abstract class ReusedBufferedIndexOutput extends IndexOutput {
+  
+  public static final int BUFFER_SIZE = 1024;
+  
+  private int bufferSize = BUFFER_SIZE;
+  
+  protected byte[] buffer;
+  
+  /** position in the file of buffer */
+  private long bufferStart = 0;
+  /** end of valid bytes */
+  private int bufferLength = 0;
+  /** next byte to write */
+  private int bufferPosition = 0;
+  /** total length of the file */
+  private long fileLength = 0;
+  
+  public ReusedBufferedIndexOutput() {
+    this(BUFFER_SIZE);
+  }
+  
+  public ReusedBufferedIndexOutput(int bufferSize) {
+    checkBufferSize(bufferSize);
+    this.bufferSize = bufferSize;
+    buffer = BufferStore.takeBuffer(this.bufferSize);
+  }
+  
+  protected long getBufferStart() {
+    return bufferStart;
+  }
+  
+  private void checkBufferSize(int bufferSize) {
+    if (bufferSize <= 0) throw new IllegalArgumentException(
+        "bufferSize must be greater than 0 (got " + bufferSize + ")");
+  }
+  
+  /** Write the buffered bytes to cache */
+  private void flushBufferToCache() throws IOException {
+    writeInternal(buffer, 0, bufferLength);
+    
+    bufferStart += bufferLength;
+    bufferLength = 0;
+    bufferPosition = 0;
+  }
+  
+  protected abstract void flushInternal() throws IOException;
+  
+  @Override
+  public void flush() throws IOException {
+    flushBufferToCache();
+    flushInternal();
+  }
+  
+  protected abstract void closeInternal() throws IOException;
+  
+  @Override
+  public void close() throws IOException {
+    flushBufferToCache();
+    closeInternal();
+    BufferStore.putBuffer(buffer);
+    buffer = null;
+  }
+  
+  @Override
+  public long getFilePointer() {
+    return bufferStart + bufferPosition;
+  }
+  
+  protected abstract void seekInternal(long pos) throws IOException;
+  
+  @Override
+  public long length() throws IOException {
+    return fileLength;
+  }
+  
+  @Override
+  public void writeByte(byte b) throws IOException {
+    if (bufferPosition >= bufferSize) {
+      flushBufferToCache();
+    }
+    if (getFilePointer() >= fileLength) {
+      fileLength++;
+    }
+    buffer[bufferPosition++] = b;
+    if (bufferPosition > bufferLength) {
+      bufferLength = bufferPosition;
+    }
+  }
+  
+  /**
+   * Expert: implements buffer flushing to cache. Writes bytes to the current
+   * position in the output.
+   * 
+   * @param b
+   *          the array of bytes to write
+   * @param offset
+   *          the offset in the array of bytes to write
+   * @param length
+   *          the number of bytes to write
+   */
+  protected abstract void writeInternal(byte[] b, int offset, int length)
+      throws IOException;
+  
+  @Override
+  public void writeBytes(byte[] b, int offset, int length) throws IOException {
+    if (getFilePointer() + length > fileLength) {
+      fileLength = getFilePointer() + length;
+    }
+    if (length <= bufferSize - bufferPosition) {
+      // the buffer contains enough space to satisfy this request
+      if (length > 0) { // to allow b to be null if len is 0...
+        System.arraycopy(b, offset, buffer, bufferPosition, length);
+      }
+      bufferPosition += length;
+      if (bufferPosition > bufferLength) {
+        bufferLength = bufferPosition;
+      }
+    } else {
+      // the buffer does not have enough space. First buffer all we've got.
+      int available = bufferSize - bufferPosition;
+      if (available > 0) {
+        System.arraycopy(b, offset, buffer, bufferPosition, available);
+        offset += available;
+        length -= available;
+        bufferPosition = bufferSize;
+        bufferLength = bufferSize;
+      }
+      
+      flushBufferToCache();
+      
+      // and now, write the remaining 'length' bytes:
+      if (length < bufferSize) {
+        // If the amount left to write is small enough do it in the usual
+        // buffered way:
+        System.arraycopy(b, offset, buffer, 0, length);
+        bufferPosition = length;
+        bufferLength = length;
+      } else {
+        // The amount left to write is larger than the buffer
+        // there's no performance reason not to write it all
+        // at once.
+        writeInternal(b, offset, length);
+        bufferStart += length;
+        bufferPosition = 0;
+        bufferLength = 0;
+      }
+      
+    }
+  }
+  
+  @Override
+  protected Object clone() throws CloneNotSupportedException {
+    throw new CloneNotSupportedException();
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/package.html b/solr/core/src/java/org/apache/solr/store/blockcache/package.html
new file mode 100644
index 0000000..a5328c2
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/blockcache/package.html
@@ -0,0 +1,29 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<html>
+<head>
+   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+</head>
+<body>
+<p>
+An HDFS blockcache implementation.
+
+
+</p>
+</body>
+</html>
diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java
new file mode 100644
index 0000000..47c0230
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java
@@ -0,0 +1,262 @@
+package org.apache.solr.store.hdfs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.NoLockFactory;
+import org.apache.solr.store.blockcache.CustomBufferedIndexInput;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class HdfsDirectory extends Directory {
+  public static Logger LOG = LoggerFactory.getLogger(HdfsDirectory.class);
+  
+  public static final int BUFFER_SIZE = 8192;
+  
+  private static final String LF_EXT = ".lf";
+  protected static final String SEGMENTS_GEN = "segments.gen";
+  protected static final IndexOutput NULL_WRITER = new NullIndexOutput();
+  protected Path hdfsDirPath;
+  protected Configuration configuration;
+  
+  private final FileSystem fileSystem;
+  
+  public HdfsDirectory(Path hdfsDirPath, Configuration configuration)
+      throws IOException {
+    assert hdfsDirPath.toString().startsWith("hdfs:/") : hdfsDirPath.toString();
+    setLockFactory(NoLockFactory.getNoLockFactory());
+    this.hdfsDirPath = hdfsDirPath;
+    this.configuration = configuration;
+    fileSystem = FileSystem.newInstance(hdfsDirPath.toUri(), configuration);
+    try {
+      if (!fileSystem.exists(hdfsDirPath)) {
+        fileSystem.mkdirs(hdfsDirPath);
+      }
+    } catch (Exception e) {
+      IOUtils.closeQuietly(fileSystem);
+      throw new RuntimeException("Problem creating directory: " + hdfsDirPath,
+          e);
+    }
+  }
+  
+  @Override
+  public void close() throws IOException {
+    LOG.info("Closing hdfs directory {}", hdfsDirPath);
+    fileSystem.close();
+  }
+  
+  @Override
+  public IndexOutput createOutput(String name, IOContext context)
+      throws IOException {
+    if (SEGMENTS_GEN.equals(name)) {
+      return NULL_WRITER;
+    }
+    HdfsFileWriter writer = new HdfsFileWriter(getFileSystem(), new Path(
+        hdfsDirPath, name));
+    return new HdfsIndexOutput(writer);
+  }
+  
+  private String[] getNormalNames(List<String> files) {
+    int size = files.size();
+    for (int i = 0; i < size; i++) {
+      String str = files.get(i);
+      files.set(i, toNormalName(str));
+    }
+    return files.toArray(new String[] {});
+  }
+  
+  private String toNormalName(String name) {
+    if (name.endsWith(LF_EXT)) {
+      return name.substring(0, name.length() - 3);
+    }
+    return name;
+  }
+  
+  @Override
+  public IndexInput openInput(String name, IOContext context)
+      throws IOException {
+    return openInput(name, BUFFER_SIZE);
+  }
+  
+  private IndexInput openInput(String name, int bufferSize) throws IOException {
+    return new HdfsNormalIndexInput(name, getFileSystem(), new Path(
+        hdfsDirPath, name), BUFFER_SIZE);
+  }
+  
+  @Override
+  public void deleteFile(String name) throws IOException {
+    Path path = new Path(hdfsDirPath, name);
+    LOG.debug("Deleting {}", path);
+    getFileSystem().delete(path, false);
+  }
+  
+  @Override
+  public boolean fileExists(String name) throws IOException {
+    return getFileSystem().exists(new Path(hdfsDirPath, name));
+  }
+  
+  @Override
+  public long fileLength(String name) throws IOException {
+    return HdfsFileReader.getLength(getFileSystem(),
+        new Path(hdfsDirPath, name));
+  }
+  
+  public long fileModified(String name) throws IOException {
+    FileStatus fileStatus = getFileSystem().getFileStatus(
+        new Path(hdfsDirPath, name));
+    return fileStatus.getModificationTime();
+  }
+  
+  @Override
+  public String[] listAll() throws IOException {
+    FileStatus[] listStatus = getFileSystem().listStatus(hdfsDirPath);
+    List<String> files = new ArrayList<String>();
+    if (listStatus == null) {
+      return new String[] {};
+    }
+    for (FileStatus status : listStatus) {
+      if (!status.isDirectory()) {
+        files.add(status.getPath().getName());
+      }
+    }
+    return getNormalNames(files);
+  }
+  
+  public Path getHdfsDirPath() {
+    return hdfsDirPath;
+  }
+  
+  public FileSystem getFileSystem() {
+    return fileSystem;
+  }
+  
+  public Configuration getConfiguration() {
+    return configuration;
+  }
+  
+  static class HdfsNormalIndexInput extends CustomBufferedIndexInput {
+    public static Logger LOG = LoggerFactory
+        .getLogger(HdfsNormalIndexInput.class);
+    
+    private final Path path;
+    private final FSDataInputStream inputStream;
+    private final long length;
+    private boolean clone = false;
+    
+    public HdfsNormalIndexInput(String name, FileSystem fileSystem, Path path,
+        int bufferSize) throws IOException {
+      super(name);
+      this.path = path;
+      LOG.debug("Opening normal index input on {}", path);
+      FileStatus fileStatus = fileSystem.getFileStatus(path);
+      length = fileStatus.getLen();
+      inputStream = fileSystem.open(path, bufferSize);
+    }
+    
+    @Override
+    protected void readInternal(byte[] b, int offset, int length)
+        throws IOException {
+      inputStream.read(getFilePointer(), b, offset, length);
+    }
+    
+    @Override
+    protected void seekInternal(long pos) throws IOException {
+      inputStream.seek(pos);
+    }
+    
+    @Override
+    protected void closeInternal() throws IOException {
+      LOG.debug("Closing normal index input on {}", path);
+      if (!clone) {
+        inputStream.close();
+      }
+    }
+    
+    @Override
+    public long length() {
+      return length;
+    }
+    
+    @Override
+    public IndexInput clone() {
+      HdfsNormalIndexInput clone = (HdfsNormalIndexInput) super.clone();
+      clone.clone = true;
+      return clone;
+    }
+  }
+  
+  static class HdfsIndexOutput extends IndexOutput {
+    
+    private HdfsFileWriter writer;
+    
+    public HdfsIndexOutput(HdfsFileWriter writer) {
+      this.writer = writer;
+    }
+    
+    @Override
+    public void close() throws IOException {
+      writer.close();
+    }
+    
+    @Override
+    public void flush() throws IOException {
+      writer.flush();
+    }
+    
+    @Override
+    public long getFilePointer() {
+      return writer.getPosition();
+    }
+    
+    @Override
+    public long length() {
+      return writer.length();
+    }
+    
+    @Override
+    public void writeByte(byte b) throws IOException {
+      writer.writeByte(b);
+    }
+    
+    @Override
+    public void writeBytes(byte[] b, int offset, int length) throws IOException {
+      writer.writeBytes(b, offset, length);
+    }
+  }
+  
+  @Override
+  public void sync(Collection<String> names) throws IOException {
+    LOG.debug("Sync called on {}", Arrays.toString(names.toArray()));
+  }
+  
+}
diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsFileReader.java b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsFileReader.java
new file mode 100644
index 0000000..8a53793
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsFileReader.java
@@ -0,0 +1,102 @@
+package org.apache.solr.store.hdfs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.lucene.store.DataInput;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class HdfsFileReader extends DataInput {
+  
+  public static Logger LOG = LoggerFactory.getLogger(HdfsFileReader.class);
+  
+  private final Path path;
+  private FSDataInputStream inputStream;
+  private long length;
+  private boolean isClone;
+  
+  public HdfsFileReader(FileSystem fileSystem, Path path, int bufferSize)
+      throws IOException {
+    this.path = path;
+    LOG.debug("Opening reader on {}", path);
+    if (!fileSystem.exists(path)) {
+      throw new FileNotFoundException(path.toString());
+    }
+    inputStream = fileSystem.open(path, bufferSize);
+    FileStatus fileStatus = fileSystem.getFileStatus(path);
+    length = fileStatus.getLen();
+  }
+  
+  public HdfsFileReader(FileSystem fileSystem, Path path) throws IOException {
+    this(fileSystem, path, HdfsDirectory.BUFFER_SIZE);
+  }
+  
+  public long length() {
+    return length;
+  }
+  
+  public void seek(long pos) throws IOException {
+    inputStream.seek(pos);
+  }
+  
+  public void close() throws IOException {
+    if (!isClone) {
+      inputStream.close();
+    }
+    LOG.debug("Closing reader on {}", path);
+  }
+  
+  /**
+   * This method should never be used!
+   */
+  @Override
+  public byte readByte() throws IOException {
+    LOG.warn("Should not be used!");
+    return inputStream.readByte();
+  }
+  
+  @Override
+  public void readBytes(byte[] b, int offset, int len) throws IOException {
+    while (len > 0) {
+      int lenRead = inputStream.read(b, offset, len);
+      offset += lenRead;
+      len -= lenRead;
+    }
+  }
+  
+  public static long getLength(FileSystem fileSystem, Path path)
+      throws IOException {
+    FileStatus fileStatus = fileSystem.getFileStatus(path);
+    return fileStatus.getLen();
+  }
+  
+  @Override
+  public DataInput clone() {
+    HdfsFileReader reader = (HdfsFileReader) super.clone();
+    reader.isClone = true;
+    return reader;
+  }
+  
+}
diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsFileWriter.java b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsFileWriter.java
new file mode 100644
index 0000000..941f9f6
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsFileWriter.java
@@ -0,0 +1,95 @@
+package org.apache.solr.store.hdfs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.lucene.store.DataOutput;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class HdfsFileWriter extends DataOutput {
+  public static Logger LOG = LoggerFactory.getLogger(HdfsFileWriter.class);
+  
+  public static final String HDFS_SYNC_BLOCK = "solr.hdfs.sync.block";
+  
+  private final Path path;
+  private FSDataOutputStream outputStream;
+  private long currentPosition;
+  
+  public HdfsFileWriter(FileSystem fileSystem, Path path) throws IOException {
+    LOG.debug("Creating writer on {}", path);
+    this.path = path;
+    
+    Configuration conf = fileSystem.getConf();
+    FsServerDefaults fsDefaults = fileSystem.getServerDefaults(path);
+    EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.CREATE,
+        CreateFlag.OVERWRITE);
+    if (Boolean.getBoolean(HDFS_SYNC_BLOCK)) {
+      flags.add(CreateFlag.SYNC_BLOCK);
+    }
+    outputStream = fileSystem.create(path, FsPermission.getDefault()
+        .applyUMask(FsPermission.getUMask(conf)), flags, fsDefaults
+        .getFileBufferSize(), fsDefaults.getReplication(), fsDefaults
+        .getBlockSize(), null);
+  }
+  
+  public long length() {
+    return currentPosition;
+  }
+  
+  public void seek(long pos) throws IOException {
+    LOG.error("Invalid seek called on {}", path);
+    throw new IOException("Seek not supported");
+  }
+  
+  public void flush() throws IOException {
+    // flush to the network, not guarantees it makes it to the DN (vs hflush)
+    outputStream.flush();
+    LOG.debug("Flushed file {}", path);
+  }
+  
+  public void close() throws IOException {
+    outputStream.close();
+    LOG.debug("Closed writer on {}", path);
+  }
+  
+  @Override
+  public void writeByte(byte b) throws IOException {
+    outputStream.write(b & 0xFF);
+    currentPosition++;
+  }
+  
+  @Override
+  public void writeBytes(byte[] b, int offset, int length) throws IOException {
+    outputStream.write(b, offset, length);
+    currentPosition += length;
+  }
+  
+  public long getPosition() {
+    return currentPosition;
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLockFactory.java b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLockFactory.java
new file mode 100644
index 0000000..ecf113a
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsLockFactory.java
@@ -0,0 +1,138 @@
+package org.apache.solr.store.hdfs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.lucene.store.Lock;
+import org.apache.lucene.store.LockFactory;
+import org.apache.lucene.store.LockReleaseFailedException;
+import org.apache.solr.util.IOUtils;
+
+public class HdfsLockFactory extends LockFactory {
+  
+  private Path lockPath;
+  private Configuration configuration;
+  
+  public HdfsLockFactory(Path lockPath, Configuration configuration) {
+    this.lockPath = lockPath;
+    this.configuration = configuration;
+  }
+  
+  @Override
+  public Lock makeLock(String lockName) {
+    
+    if (lockPrefix != null) {
+      lockName = lockPrefix + "-" + lockName;
+    }
+    
+    HdfsLock lock = new HdfsLock(lockPath, lockName, configuration);
+    
+    return lock;
+  }
+  
+  @Override
+  public void clearLock(String lockName) throws IOException {
+    FileSystem fs = null;
+    try {
+      fs = FileSystem.newInstance(lockPath.toUri(), configuration);
+      
+      if (fs.exists(lockPath)) {
+        if (lockPrefix != null) {
+          lockName = lockPrefix + "-" + lockName;
+        }
+        
+        Path lockFile = new Path(lockPath, lockName);
+
+        if (fs.exists(lockFile) && !fs.delete(lockFile, false)) {
+          throw new IOException("Cannot delete " + lockFile);
+        }
+      }
+    } finally {
+      IOUtils.closeQuietly(fs);
+    }
+  }
+  
+  public Path getLockPath() {
+    return lockPath;
+  }
+  
+  public void setLockPath(Path lockPath) {
+    this.lockPath = lockPath;
+  }
+  
+  static class HdfsLock extends Lock {
+    
+    private Path lockPath;
+    private String lockName;
+    private Configuration conf;
+    
+    public HdfsLock(Path lockPath, String lockName, Configuration conf) {
+      this.lockPath = lockPath;
+      this.lockName = lockName;
+      this.conf = conf;
+    }
+    
+    @Override
+    public boolean obtain() throws IOException {
+      FSDataOutputStream file = null;
+      FileSystem fs = null;
+      try {
+        fs = FileSystem.newInstance(lockPath.toUri(), conf);
+        
+        file = fs.create(new Path(lockPath, lockName), false);
+      } catch (IOException e) {
+        return false;
+      } finally {
+        IOUtils.closeQuietly(file);
+        IOUtils.closeQuietly(fs);
+      }
+      return true;
+    }
+    
+    @Override
+    public void release() throws IOException {
+      FileSystem fs = FileSystem.newInstance(lockPath.toUri(), conf);
+      try {
+        if (fs.exists(new Path(lockPath, lockName))
+            && !fs.delete(new Path(lockPath, lockName), false)) throw new LockReleaseFailedException(
+            "failed to delete " + new Path(lockPath, lockName));
+      } finally {
+        IOUtils.closeQuietly(fs);
+      }
+    }
+    
+    @Override
+    public boolean isLocked() throws IOException {
+      boolean isLocked = false;
+      FileSystem fs = FileSystem.newInstance(lockPath.toUri(), conf);
+      try {
+        isLocked = fs.exists(new Path(lockPath, lockName));
+      } finally {
+        IOUtils.closeQuietly(fs);
+      }
+      return isLocked;
+    }
+    
+  }
+  
+}
diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/NullIndexOutput.java b/solr/core/src/java/org/apache/solr/store/hdfs/NullIndexOutput.java
new file mode 100644
index 0000000..0295385
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/hdfs/NullIndexOutput.java
@@ -0,0 +1,66 @@
+package org.apache.solr.store.hdfs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.store.IndexOutput;
+
+public class NullIndexOutput extends IndexOutput {
+  
+  private long pos;
+  private long length;
+  
+  @Override
+  public void close() throws IOException {
+    
+  }
+  
+  @Override
+  public void flush() throws IOException {
+    
+  }
+  
+  @Override
+  public long getFilePointer() {
+    return pos;
+  }
+  
+  @Override
+  public long length() throws IOException {
+    return length;
+  }
+  
+  @Override
+  public void writeByte(byte b) throws IOException {
+    pos++;
+  }
+  
+  @Override
+  public void writeBytes(byte[] b, int offset, int length) throws IOException {
+    pos += length;
+    updateLength();
+  }
+  
+  private void updateLength() {
+    if (pos > length) {
+      length = pos;
+    }
+  }
+  
+}
diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/package.html b/solr/core/src/java/org/apache/solr/store/hdfs/package.html
new file mode 100644
index 0000000..cfe928e
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/store/hdfs/package.html
@@ -0,0 +1,29 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<html>
+<head>
+   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+</head>
+<body>
+<p>
+An HDFS Directory implementation.
+
+
+</p>
+</body>
+</html>
diff --git a/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java b/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
index 2b34544..ef0a2a9 100644
--- a/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
+++ b/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
@@ -187,6 +187,76 @@
       }
     }
   }
+  
+  @Override
+  public synchronized void closeIndexWriter(SolrCore core, boolean rollback)
+      throws IOException {
+    log.info("Closing IndexWriter...");
+    String coreName = core.getName();
+    synchronized (writerPauseLock) {
+      if (closed) {
+        throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Already closed");
+      }
+      
+      // we need to wait for the Writer to fall out of use
+      // first lets stop it from being lent out
+      pauseWriter = true;
+      // then lets wait until its out of use
+      log.info("Waiting until IndexWriter is unused... core=" + coreName);
+      
+      while (!writerFree) {
+        try {
+          writerPauseLock.wait(100);
+        } catch (InterruptedException e) {}
+        
+        if (closed) {
+          throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
+              "SolrCoreState already closed");
+        }
+      }
+      
+      if (indexWriter != null) {
+        if (!rollback) {
+          try {
+            log.info("Closing old IndexWriter... core=" + coreName);
+            indexWriter.close();
+          } catch (Throwable t) {
+            SolrException.log(log, "Error closing old IndexWriter. core="
+                + coreName, t);
+          }
+        } else {
+          try {
+            log.info("Rollback old IndexWriter... core=" + coreName);
+            indexWriter.rollback();
+          } catch (Throwable t) {
+            SolrException.log(log, "Error rolling back old IndexWriter. core="
+                + coreName, t);
+          }
+        }
+      }
+      
+    }
+  }
+  
+  @Override
+  public synchronized void openIndexWriter(SolrCore core) throws IOException {
+    log.info("Creating new IndexWriter...");
+    synchronized (writerPauseLock) {
+      if (closed) {
+        throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Already closed");
+      }
+      
+      try {
+        indexWriter = createMainIndexWriter(core, "DirectUpdateHandler2");
+        log.info("New IndexWriter is ready to be used.");
+        // we need to null this so it picks up the new writer next get call
+        refCntWriter = null;
+      } finally {
+        pauseWriter = false;
+        writerPauseLock.notifyAll();
+      }
+    }
+  }
 
   @Override
   public synchronized void rollbackIndexWriter(SolrCore core) throws IOException {
diff --git a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
index 9fc36b4..e13a41e 100644
--- a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
+++ b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
@@ -108,6 +108,8 @@
     softCommitTracker = new CommitTracker("Soft", core, softCommitDocsUpperBound, softCommitTimeUpperBound, true, true);
     
     commitWithinSoftCommit = updateHandlerInfo.commitWithinSoftCommit;
+
+
   }
   
   public DirectUpdateHandler2(SolrCore core, UpdateHandler updateHandler) {
@@ -125,6 +127,13 @@
     softCommitTracker = new CommitTracker("Soft", core, softCommitDocsUpperBound, softCommitTimeUpperBound, updateHandlerInfo.openSearcher, true);
     
     commitWithinSoftCommit = updateHandlerInfo.commitWithinSoftCommit;
+
+    UpdateLog existingLog = updateHandler.getUpdateLog();
+    if (this.ulog != null && this.ulog == existingLog) {
+      // If we are reusing the existing update log, inform the log that it's update handler has changed.
+      // We do this as late as possible.
+      this.ulog.init(this, core);
+    }
   }
 
   private void deleteAll() throws IOException {
@@ -531,11 +540,17 @@
           }
           
           // SolrCore.verbose("writer.commit() start writer=",writer);
-          final Map<String,String> commitData = new HashMap<String,String>();
-          commitData.put(SolrIndexWriter.COMMIT_TIME_MSEC_KEY,
-              String.valueOf(System.currentTimeMillis()));
-          writer.setCommitData(commitData);
-          writer.commit();
+
+          if (writer.hasUncommittedChanges()) {
+            final Map<String,String> commitData = new HashMap<String,String>();
+            commitData.put(SolrIndexWriter.COMMIT_TIME_MSEC_KEY,
+                String.valueOf(System.currentTimeMillis()));
+            writer.setCommitData(commitData);
+            writer.commit();
+          } else {
+            log.info("No uncommitted changes. Skipping IW.commit.");
+          }
+
           // SolrCore.verbose("writer.commit() end");
           numDocsPending.set(0);
           callPostCommitCallbacks();
diff --git a/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java b/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java
new file mode 100644
index 0000000..935774d
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java
@@ -0,0 +1,581 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.util.DataInputInputStream;
+import org.apache.solr.common.util.FastInputStream;
+import org.apache.solr.common.util.FastOutputStream;
+import org.apache.solr.common.util.JavaBinCodec;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ *  Log Format: List{Operation, Version, ...}
+ *  ADD, VERSION, DOC
+ *  DELETE, VERSION, ID_BYTES
+ *  DELETE_BY_QUERY, VERSION, String
+ *
+ *  TODO: keep two files, one for [operation, version, id] and the other for the actual
+ *  document data.  That way we could throw away document log files more readily
+ *  while retaining the smaller operation log files longer (and we can retrieve
+ *  the stored fields from the latest documents from the index).
+ *
+ *  This would require keeping all source fields stored of course.
+ *
+ *  This would also allow to not log document data for requests with commit=true
+ *  in them (since we know that if the request succeeds, all docs will be committed)
+ *
+ */
+public class HdfsTransactionLog extends TransactionLog {
+  public static Logger log = LoggerFactory.getLogger(HdfsTransactionLog.class);
+
+
+  Path tlogFile;
+
+  
+  private FSDataOutputStream tlogOutStream;
+  private FileSystem fs;
+
+  HdfsTransactionLog(FileSystem fs, Path tlogFile, Collection<String> globalStrings) {
+    this(fs, tlogFile, globalStrings, false);
+  }
+
+  HdfsTransactionLog(FileSystem fs, Path tlogFile, Collection<String> globalStrings, boolean openExisting) {
+    super();
+    boolean success = false;
+    this.fs = fs;
+
+    try {
+      if (debug) {
+        //log.debug("New TransactionLog file=" + tlogFile + ", exists=" + tlogFile.exists() + ", size=" + tlogFile.length() + ", openExisting=" + openExisting);
+      }
+      this.tlogFile = tlogFile;
+      
+      // TODO: look into forcefully taking over any lease
+      if (fs.exists(tlogFile) && openExisting) {
+        tlogOutStream = fs.append(tlogFile);
+      } else {
+        fs.delete(tlogFile, false);
+        
+        tlogOutStream = fs.create(tlogFile, (short)1);
+        tlogOutStream.hsync();
+      }
+
+      fos = new FastOutputStream(tlogOutStream, new byte[65536], 0);
+      long start = tlogOutStream.getPos(); 
+
+      if (openExisting) {
+        if (start > 0) {
+          readHeader(null);
+          
+         // we should already be at the end 
+         // raf.seek(start);
+
+        //  assert channel.position() == start;
+          fos.setWritten(start);    // reflect that we aren't starting at the beginning
+          //assert fos.size() == channel.size();
+        } else {
+          addGlobalStrings(globalStrings);
+        }
+      } else {
+        if (start > 0) {
+          log.error("New transaction log already exists:" + tlogFile + " size=" + tlogOutStream.size());
+        }
+
+        addGlobalStrings(globalStrings);
+      }
+
+      success = true;
+
+    } catch (IOException e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
+    } finally {
+      if (!success && tlogOutStream != null) {
+        try {
+          tlogOutStream.close();
+        } catch (Exception e) {
+          log.error("Error closing tlog file (after error opening)", e);
+        }
+      }
+    }
+  }
+
+  @Override
+  public boolean endsWithCommit() throws IOException {
+    long size;
+    synchronized (this) {
+      fos.flush();
+      tlogOutStream.hflush();
+      size = fos.size();
+    }
+
+    
+    // the end of the file should have the end message (added during a commit) plus a 4 byte size
+    byte[] buf = new byte[ END_MESSAGE.length() ];
+    long pos = size - END_MESSAGE.length() - 4;
+    if (pos < 0) return false;
+    
+    FSDataFastInputStream dis = new FSDataFastInputStream(fs.open(tlogFile), pos);
+    try {
+    //ChannelFastInputStream is = new ChannelFastInputStream(channel, pos);
+    dis.read(buf);
+    for (int i=0; i<buf.length; i++) {
+      if (buf[i] != END_MESSAGE.charAt(i)) return false;
+    }
+    } finally {
+      dis.close();
+    }
+    return true;
+  }
+  
+  // This could mess with any readers or reverse readers that are open, or anything that might try to do a log lookup.
+  // This should only be used to roll back buffered updates, not actually applied updates.
+  @Override
+  public void rollback(long pos) throws IOException {
+    synchronized (this) {
+      assert snapshot_size == pos;
+      fos.flush();
+      tlogOutStream.hflush();
+      // TODO: how do we rollback with hdfs?? We need HDFS-3107
+      //raf.setLength(pos);
+      fos.setWritten(pos);
+      assert fos.size() == pos;
+      numRecords = snapshot_numRecords;
+    }
+  }
+
+  private void readHeader(FastInputStream fis) throws IOException {
+    // read existing header
+    boolean closeFis = false;
+    if (fis == null) closeFis = true;
+    fis = fis != null ? fis : new FSDataFastInputStream(fs.open(tlogFile), 0);
+    Map header = null;
+    try {
+      LogCodec codec = new LogCodec(resolver);
+      header = (Map) codec.unmarshal(fis);
+      
+      fis.readInt(); // skip size
+    } finally {
+      if (fis != null && closeFis) {
+        fis.close();
+      }
+    }
+    // needed to read other records
+
+    synchronized (this) {
+      globalStringList = (List<String>)header.get("strings");
+      globalStringMap = new HashMap<String, Integer>(globalStringList.size());
+      for (int i=0; i<globalStringList.size(); i++) {
+        globalStringMap.put( globalStringList.get(i), i+1);
+      }
+    }
+  }
+
+  @Override
+  public long writeCommit(CommitUpdateCommand cmd, int flags) {
+    LogCodec codec = new LogCodec(resolver);
+    synchronized (this) {
+      try {
+        long pos = fos.size();   // if we had flushed, this should be equal to channel.position()
+
+        if (pos == 0) {
+          writeLogHeader(codec);
+          pos = fos.size();
+        }
+        
+        codec.init(fos);
+        codec.writeTag(JavaBinCodec.ARR, 3);
+        codec.writeInt(UpdateLog.COMMIT | flags);  // should just take one byte
+        codec.writeLong(cmd.getVersion());
+        codec.writeStr(END_MESSAGE);  // ensure these bytes are (almost) last in the file
+
+        endRecord(pos);
+        
+        fos.flush();  // flush since this will be the last record in a log fill
+        tlogOutStream.hflush();
+
+        //assert fos.size() == channel.size();
+
+        return pos;
+      } catch (IOException e) {
+        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
+      }
+    }
+  }
+
+
+  /* This method is thread safe */
+  @Override
+  public Object lookup(long pos) {
+    // A negative position can result from a log replay (which does not re-log, but does
+    // update the version map.  This is OK since the node won't be ACTIVE when this happens.
+    if (pos < 0) return null;
+
+    try {
+      // make sure any unflushed buffer has been flushed
+      synchronized (this) {
+        // TODO: optimize this by keeping track of what we have flushed up to
+        fos.flushBuffer();
+        
+        // flush to hdfs
+        tlogOutStream.hflush();
+        /***
+         System.out.println("###flushBuffer to " + fos.size() + " raf.length()=" + raf.length() + " pos="+pos);
+        if (fos.size() != raf.length() || pos >= fos.size() ) {
+          throw new RuntimeException("ERROR" + "###flushBuffer to " + fos.size() + " raf.length()=" + raf.length() + " pos="+pos);
+        }
+        ***/
+      }
+
+      FSDataFastInputStream dis = new FSDataFastInputStream(fs.open(tlogFile),
+          pos);
+      try {
+        dis.seek(pos);
+        LogCodec codec = new LogCodec(resolver);
+        return codec.readVal(new FastInputStream(dis));
+      } finally {
+        dis.close();
+      }
+    } catch (IOException e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "pos=" + pos, e);
+    }
+  }
+
+  @Override
+  public void finish(UpdateLog.SyncLevel syncLevel) {
+    if (syncLevel == UpdateLog.SyncLevel.NONE) return;
+    try {
+      synchronized (this) {
+        fos.flushBuffer();
+        
+        // we must flush to hdfs
+        // TODO: we probably don't need to
+        // hsync below if we do this - I
+        // think they are equivalent.
+        tlogOutStream.hflush();
+      }
+
+      if (syncLevel == UpdateLog.SyncLevel.FSYNC) {
+        // Since fsync is outside of synchronized block, we can end up with a partial
+        // last record on power failure (which is OK, and does not represent an error...
+        // we just need to be aware of it when reading).
+        
+        //raf.getFD().sync();
+        tlogOutStream.hsync();
+      }
+
+    } catch (IOException e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
+    }
+  }
+  
+  @Override
+  protected void close() {
+    try {
+      if (debug) {
+        log.debug("Closing tlog" + this);
+      }
+
+      synchronized (this) {
+        fos.flush();
+        tlogOutStream.hflush();
+        fos.close();
+
+        tlogOutStream.close();
+      }
+
+      if (deleteOnClose) {
+        fs.delete(tlogFile, true);
+      }
+    } catch (IOException e) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
+    }
+  }
+
+  public String toString() {
+    return "hdfs tlog{file=" + tlogFile.toString() + " refcount=" + refcount.get() + "}";
+  }
+
+  /** Returns a reader that can be used while a log is still in use.
+   * Currently only *one* LogReader may be outstanding, and that log may only
+   * be used from a single thread. */
+  @Override
+  public LogReader getReader(long startingPos) {
+    return new HDFSLogReader(startingPos);
+  }
+
+  /** Returns a single threaded reverse reader */
+  @Override
+  public ReverseReader getReverseReader() throws IOException {
+    return new HDFSReverseReader();
+  }
+
+
+  public class HDFSLogReader extends LogReader{
+    FSDataFastInputStream fis;
+    private LogCodec codec = new LogCodec(resolver);
+
+    public HDFSLogReader(long startingPos) {
+      super();
+      incref();
+      try {
+        FSDataInputStream fdis = fs.open(tlogFile);
+        fis = new FSDataFastInputStream(fdis, startingPos);
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      }
+    }
+
+    /** Returns the next object from the log, or null if none available.
+     *
+     * @return The log record, or null if EOF
+     * @throws IOException If there is a low-level I/O error.
+     */
+    public Object next() throws IOException, InterruptedException {
+      long pos = fis.position();
+
+
+      synchronized (HdfsTransactionLog.this) {
+        if (trace) {
+          log.trace("Reading log record.  pos="+pos+" currentSize="+fos.size());
+        }
+
+        if (pos >= fos.size()) {
+          return null;
+        }
+       
+        fos.flushBuffer();
+        tlogOutStream.hflush();
+        
+        // we actually need a new reader
+        fis.close();
+        try {
+          FSDataInputStream fdis = fs.open(tlogFile);
+          fis = new FSDataFastInputStream(fdis, pos);
+        } catch (IOException e) {
+          throw new RuntimeException(e);
+        }
+        
+      }
+      if (pos == 0) {
+        readHeader(fis);
+
+        // shouldn't currently happen - header and first record are currently written at the same time
+        synchronized (HdfsTransactionLog.this) {
+          if (fis.position() >= fos.size()) {
+            return null;
+          }
+          pos = fis.position();
+        }
+      }
+
+      tlogOutStream.hflush();
+      Object o = codec.readVal(fis);
+
+      // skip over record size
+      int size = fis.readInt();
+      assert size == fis.position() - pos - 4;
+
+      return o;
+    }
+
+    public void close() {
+      try {
+        fis.close();
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      }
+      decref();
+    }
+
+    @Override
+    public String toString() {
+      synchronized (HdfsTransactionLog.this) {
+        return "LogReader{" + "file=" + tlogFile + ", position=" + fis.position() + ", end=" + fos.size() + "}";
+      }
+    }
+
+  }
+
+  public class HDFSReverseReader extends ReverseReader {
+    FSDataFastInputStream fis;
+    private LogCodec codec = new LogCodec(resolver) {
+      @Override
+      public SolrInputDocument readSolrInputDocument(DataInputInputStream dis) {
+        // Given that the SolrInputDocument is last in an add record, it's OK to just skip
+        // reading it completely.
+        return null;
+      }
+    };
+
+    int nextLength;  // length of the next record (the next one closer to the start of the log file)
+    long prevPos;    // where we started reading from last time (so prevPos - nextLength == start of next record)
+
+    public HDFSReverseReader() throws IOException {
+      incref();
+
+      long sz;
+      synchronized (HdfsTransactionLog.this) {
+        fos.flushBuffer();
+        
+        // this must be an hflush
+        tlogOutStream.hflush();
+        sz = fos.size();
+        //assert sz == channel.size();
+      }
+
+      fis = new FSDataFastInputStream(fs.open(tlogFile), 0);
+      
+      if (sz >=4) {
+        // readHeader(fis);  // should not be needed
+        prevPos = sz - 4;
+        fis.seek(prevPos);
+        nextLength = fis.readInt();
+      }
+    }
+
+
+    /** Returns the next object from the log, or null if none available.
+     *
+     * @return The log record, or null if EOF
+     * @throws IOException If there is a low-level I/O error.
+     */
+    public Object next() throws IOException {
+      if (prevPos <= 0) return null;
+
+      long endOfThisRecord = prevPos;
+
+      int thisLength = nextLength;
+
+      long recordStart = prevPos - thisLength;  // back up to the beginning of the next record
+      prevPos = recordStart - 4;  // back up 4 more to read the length of the next record
+
+      if (prevPos <= 0) return null;  // this record is the header
+
+      long bufferPos = fis.getBufferPos();
+      if (prevPos >= bufferPos) {
+        // nothing to do... we're within the current buffer
+      } else {
+        // Position buffer so that this record is at the end.
+        // For small records, this will cause subsequent calls to next() to be within the buffer.
+        long seekPos =  endOfThisRecord - fis.getBufferSize();
+        seekPos = Math.min(seekPos, prevPos); // seek to the start of the record if it's larger then the block size.
+        seekPos = Math.max(seekPos, 0);
+        fis.seek(seekPos);
+        fis.peek();  // cause buffer to be filled
+      }
+
+      fis.seek(prevPos);
+      nextLength = fis.readInt();     // this is the length of the *next* record (i.e. closer to the beginning)
+
+      // TODO: optionally skip document data
+      Object o = codec.readVal(fis);
+
+      // assert fis.position() == prevPos + 4 + thisLength;  // this is only true if we read all the data (and we currently skip reading SolrInputDocument
+      return o;
+    }
+
+    /* returns the position in the log file of the last record returned by next() */
+    public long position() {
+      return prevPos + 4;  // skip the length
+    }
+
+    public void close() {
+      try {
+        fis.close();
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      }
+      decref();
+    }
+
+    @Override
+    public String toString() {
+      synchronized (HdfsTransactionLog.this) {
+        return "LogReader{" + "file=" + tlogFile + ", position=" + fis.position() + ", end=" + fos.size() + "}";
+      }
+    }
+
+
+  }
+
+}
+
+
+
+class FSDataFastInputStream extends FastInputStream {
+  private FSDataInputStream fis;
+
+  public FSDataFastInputStream(FSDataInputStream fis, long chPosition) {
+    // super(null, new byte[10],0,0);    // a small buffer size for testing purposes
+    super(null);
+    this.fis = fis;
+    super.readFromStream = chPosition;
+  }
+
+  @Override
+  public int readWrappedStream(byte[] target, int offset, int len) throws IOException {
+    return fis.read(readFromStream, target, offset, len);
+  }
+
+  public void seek(long position) throws IOException {
+    if (position <= readFromStream && position >= getBufferPos()) {
+      // seek within buffer
+      pos = (int)(position - getBufferPos());
+    } else {
+      // long currSize = ch.size();   // not needed - underlying read should handle (unless read never done)
+      // if (position > currSize) throw new EOFException("Read past EOF: seeking to " + position + " on file of size " + currSize + " file=" + ch);
+      readFromStream = position;
+      end = pos = 0;
+    }
+    assert position() == position;
+  }
+
+  /** where is the start of the buffer relative to the whole file */
+  public long getBufferPos() {
+    return readFromStream - end;
+  }
+
+  public int getBufferSize() {
+    return buf.length;
+  }
+
+  @Override
+  public void close() throws IOException {
+    fis.close();
+  }
+  
+  @Override
+  public String toString() {
+    return "readFromStream="+readFromStream +" pos="+pos +" end="+end + " bufferPos="+getBufferPos() + " position="+position() ;
+  }
+}
+
+
diff --git a/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java b/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java
new file mode 100644
index 0000000..e209c85
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/HdfsUpdateLog.java
@@ -0,0 +1,354 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Locale;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.lucene.util.BytesRef;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.core.PluginInfo;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.util.HdfsUtil;
+import org.apache.solr.util.IOUtils;
+
+/** @lucene.experimental */
+public class HdfsUpdateLog extends UpdateLog {
+  
+  private FileSystem fs;
+  private Path tlogDir;
+  private String confDir;
+
+  public HdfsUpdateLog() {
+    
+  }
+  
+  public HdfsUpdateLog(String confDir) {
+    this.confDir = confDir;
+  }
+  
+  public FileSystem getFs() {
+    return fs;
+  }
+  
+  // HACK
+  // while waiting for HDFS-3107, instead of quickly
+  // dropping, we slowly apply
+  // This is somewhat brittle, but current usage
+  // allows for it
+  @Override
+  public boolean dropBufferedUpdates() {
+    Future<RecoveryInfo> future = applyBufferedUpdates();
+    if (future != null) {
+      try {
+        future.get();
+      } catch (InterruptedException e) {
+        throw new RuntimeException(e);
+      } catch (ExecutionException e) {
+        throw new RuntimeException(e);
+      }
+    }
+    return true;
+  }
+  
+  @Override
+  public void init(PluginInfo info) {
+    dataDir = (String) info.initArgs.get("dir");
+    
+    defaultSyncLevel = SyncLevel.getSyncLevel((String) info.initArgs
+        .get("syncLevel"));
+    
+  }
+
+  private Configuration getConf() {
+    Configuration conf = new Configuration();
+    if (confDir != null) {
+      HdfsUtil.addHdfsResources(conf, confDir);
+    }
+    
+    return conf;
+  }
+  
+  @Override
+  public void init(UpdateHandler uhandler, SolrCore core) {
+    
+    // ulogDir from CoreDescriptor overrides
+    String ulogDir = core.getCoreDescriptor().getUlogDir();
+
+    if (ulogDir != null) {
+      dataDir = ulogDir;
+    }
+    if (dataDir == null || dataDir.length()==0) {
+      dataDir = core.getDataDir();
+    }
+    
+    if (!core.getDirectoryFactory().isAbsolute(dataDir)) {
+      try {
+        dataDir = core.getDirectoryFactory().getDataHome(core.getCoreDescriptor());
+      } catch (IOException e) {
+        throw new SolrException(ErrorCode.SERVER_ERROR, e);
+      }
+    }
+    
+    try {
+      fs = FileSystem.newInstance(new Path(dataDir).toUri(), getConf());
+    } catch (IOException e) {
+      throw new SolrException(ErrorCode.SERVER_ERROR, e);
+    }
+    
+    this.uhandler = uhandler;
+    
+    if (dataDir.equals(lastDataDir)) {
+      if (debug) {
+        log.debug("UpdateHandler init: tlogDir=" + tlogDir + ", next id=" + id,
+            " this is a reopen... nothing else to do.");
+      }
+      
+      versionInfo.reload();
+      
+      // on a normal reopen, we currently shouldn't have to do anything
+      return;
+    }
+    lastDataDir = dataDir;
+    tlogDir = new Path(dataDir, TLOG_NAME);
+    
+    try {
+      if (!fs.exists(tlogDir)) {
+        boolean success = fs.mkdirs(tlogDir);
+        if (!success) {
+          throw new RuntimeException("Could not create directory:" + tlogDir);
+        }
+      }
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+    
+    tlogFiles = getLogList(fs, tlogDir);
+    id = getLastLogId() + 1; // add 1 since we will create a new log for the
+                             // next update
+    
+    if (debug) {
+      log.debug("UpdateHandler init: tlogDir=" + tlogDir + ", existing tlogs="
+          + Arrays.asList(tlogFiles) + ", next id=" + id);
+    }
+    
+    TransactionLog oldLog = null;
+    for (String oldLogName : tlogFiles) {
+      Path f = new Path(tlogDir, oldLogName);
+      try {
+        oldLog = new HdfsTransactionLog(fs, f, null, true);
+        addOldLog(oldLog, false); // don't remove old logs on startup since more
+                                  // than one may be uncapped.
+      } catch (Exception e) {
+        SolrException.log(log, "Failure to open existing log file (non fatal) "
+            + f, e);
+        try {
+          fs.delete(f, false);
+        } catch (IOException e1) {
+          throw new RuntimeException(e1);
+        }
+      }
+    }
+    
+    // Record first two logs (oldest first) at startup for potential tlog
+    // recovery.
+    // It's possible that at abnormal shutdown both "tlog" and "prevTlog" were
+    // uncapped.
+    for (TransactionLog ll : logs) {
+      newestLogsOnStartup.addFirst(ll);
+      if (newestLogsOnStartup.size() >= 2) break;
+    }
+    
+    try {
+      versionInfo = new VersionInfo(this, 256);
+    } catch (SolrException e) {
+      log.error("Unable to use updateLog: " + e.getMessage(), e);
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
+          "Unable to use updateLog: " + e.getMessage(), e);
+    }
+    
+    // TODO: these startingVersions assume that we successfully recover from all
+    // non-complete tlogs.
+    HdfsUpdateLog.RecentUpdates startingUpdates = getRecentUpdates();
+    try {
+      startingVersions = startingUpdates.getVersions(numRecordsToKeep);
+      startingOperation = startingUpdates.getLatestOperation();
+      
+      // populate recent deletes list (since we can't get that info from the
+      // index)
+      for (int i = startingUpdates.deleteList.size() - 1; i >= 0; i--) {
+        DeleteUpdate du = startingUpdates.deleteList.get(i);
+        oldDeletes.put(new BytesRef(du.id), new LogPtr(-1, du.version));
+      }
+      
+      // populate recent deleteByQuery commands
+      for (int i = startingUpdates.deleteByQueryList.size() - 1; i >= 0; i--) {
+        Update update = startingUpdates.deleteByQueryList.get(i);
+        List<Object> dbq = (List<Object>) update.log.lookup(update.pointer);
+        long version = (Long) dbq.get(1);
+        String q = (String) dbq.get(2);
+        trackDeleteByQuery(q, version);
+      }
+      
+    } finally {
+      startingUpdates.close();
+    }
+    
+  }
+  
+  @Override
+  public String getLogDir() {
+    return tlogDir.toUri().toString();
+  }
+  
+  public static String[] getLogList(FileSystem fs, Path tlogDir) {
+    final String prefix = TLOG_NAME + '.';
+    assert fs != null;
+    FileStatus[] fileStatuses;
+    try {
+      fileStatuses = fs.listStatus(tlogDir, new PathFilter() {
+        
+        @Override
+        public boolean accept(Path path) {
+          return path.getName().startsWith(prefix);
+        }
+      });
+    } catch (FileNotFoundException e) {
+      throw new RuntimeException(e);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+    String[] names = new String[fileStatuses.length];
+    for (int i = 0; i < fileStatuses.length; i++) {
+      names[i] = fileStatuses[i].getPath().getName();
+    }
+    Arrays.sort(names);
+
+    return names;
+  }
+  
+  @Override
+  public void close(boolean committed) {
+    synchronized (this) {
+      super.close(committed);
+      IOUtils.closeQuietly(fs);
+    }
+  }
+  
+  @Override
+  protected void ensureLog() {
+    if (tlog == null) {
+      String newLogName = String.format(Locale.ROOT, LOG_FILENAME_PATTERN,
+          TLOG_NAME, id);
+      tlog = new HdfsTransactionLog(fs, new Path(tlogDir, newLogName),
+          globalStrings);
+    }
+  }
+  
+  /**
+   * Clears the logs on the file system. Only call before init.
+   * 
+   * @param core the SolrCore
+   * @param ulogPluginInfo the init info for the UpdateHandler
+   */
+  @Override
+  public void clearLog(SolrCore core, PluginInfo ulogPluginInfo) {
+    if (ulogPluginInfo == null) return;
+    Path tlogDir = new Path(getTlogDir(core, ulogPluginInfo));
+    try {
+      if (fs.exists(tlogDir)) {
+        String[] files = getLogList(tlogDir);
+        for (String file : files) {
+          Path f = new Path(tlogDir, file);
+          boolean s = fs.delete(f, false);
+          if (!s) {
+            log.error("Could not remove tlog file:" + f);
+          }
+        }
+      }
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+  
+  private String[] getLogList(Path tlogDir) throws FileNotFoundException, IOException {
+    final String prefix = TLOG_NAME+'.';
+    FileStatus[] files = fs.listStatus(tlogDir, new PathFilter() {
+      
+      @Override
+      public boolean accept(Path name) {
+        return name.getName().startsWith(prefix);
+      }
+    });
+    List<String> fileList = new ArrayList<String>(files.length);
+    for (FileStatus file : files) {
+      fileList.add(file.getPath().getName());
+    }
+    return fileList.toArray(new String[0]);
+  }
+
+  /**
+   * Returns true if we were able to drop buffered updates and return to the
+   * ACTIVE state
+   */
+  // public boolean dropBufferedUpdates() {
+  // versionInfo.blockUpdates();
+  // try {
+  // if (state != State.BUFFERING) return false;
+  //
+  // if (log.isInfoEnabled()) {
+  // log.info("Dropping buffered updates " + this);
+  // }
+  //
+  // // since we blocked updates, this synchronization shouldn't strictly be
+  // necessary.
+  // synchronized (this) {
+  // if (tlog != null) {
+  // tlog.rollback(recoveryInfo.positionOfStart);
+  // }
+  // }
+  //
+  // state = State.ACTIVE;
+  // operationFlags &= ~FLAG_GAP;
+  // } catch (IOException e) {
+  // SolrException.log(log,"Error attempting to roll back log", e);
+  // return false;
+  // }
+  // finally {
+  // versionInfo.unblockUpdates();
+  // }
+  // return true;
+  // }
+  
+  public String toString() {
+    return "HDFSUpdateLog{state=" + getState() + ", tlog=" + tlog + "}";
+  }
+  
+}
diff --git a/solr/core/src/java/org/apache/solr/update/LoggingInfoStream.java b/solr/core/src/java/org/apache/solr/update/LoggingInfoStream.java
new file mode 100644
index 0000000..e710c0a
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/LoggingInfoStream.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update;
+
+import org.apache.lucene.util.InfoStream;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+/**
+ * An {@link InfoStream} implementation which passes messages on to Solr's logging.
+ */
+public class LoggingInfoStream extends InfoStream {
+  public static final Logger log = LoggerFactory.getLogger(LoggingInfoStream.class);
+
+  @Override
+  public void message(String component, String message) {
+    log.info("[" + component + "][" + Thread.currentThread().getName() + "]: " + message);
+  }
+
+  @Override
+  public boolean isEnabled(String component) {
+    // ignore testpoints so this can be used with tests without flooding logs with verbose messages
+    return !"TP".equals(component) && log.isInfoEnabled();
+  }
+
+  @Override
+  public void close() throws IOException {}
+}
diff --git a/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java b/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
index 6bec6d4..766755c 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java
@@ -114,11 +114,11 @@
     
     // make sure any pending deletes are flushed
     flushDeletes(1);
-    
+
     // TODO: this is brittle
     // need to make a clone since these commands may be reused
     AddUpdateCommand clone = new AddUpdateCommand(null);
-    
+
     clone.solrDoc = cmd.solrDoc;
     clone.commitWithin = cmd.commitWithin;
     clone.overwrite = cmd.overwrite;
@@ -135,10 +135,79 @@
       }
       alist.add(addRequest);
     }
-    
+
     flushAdds(maxBufferedAddsPerServer);
   }
-  
+
+  /**
+   * Synchronous (blocking) add to specified node. Any error returned from node is propagated.
+   */
+  public void syncAdd(AddUpdateCommand cmd, Node node, ModifiableSolrParams params) throws IOException {
+    log.info("SYNCADD on {} : {}", node, cmd.getPrintableId());
+    checkResponses(false);
+    // flush all pending deletes
+    flushDeletes(1);
+    // flush all pending adds
+    flushAdds(1);
+    // finish with the pending requests
+    checkResponses(false);
+
+    UpdateRequestExt ureq = new UpdateRequestExt();
+    ureq.add(cmd.solrDoc, cmd.commitWithin, cmd.overwrite);
+    ureq.setParams(params);
+    syncRequest(node, ureq);
+  }
+
+  public void syncDelete(DeleteUpdateCommand cmd, List<Node> nodes, ModifiableSolrParams params) throws IOException {
+    log.info("SYNCDELETE on {} : ", nodes, cmd);
+    checkResponses(false);
+    // flush all pending adds
+    flushAdds(1);
+    // flush all pending deletes
+    flushDeletes(1);
+    // finish pending requests
+    checkResponses(false);
+
+    DeleteUpdateCommand clonedCmd = clone(cmd);
+    DeleteRequest deleteRequest = new DeleteRequest();
+    deleteRequest.cmd = clonedCmd;
+    deleteRequest.params = params;
+
+    UpdateRequestExt ureq = new UpdateRequestExt();
+    if (cmd.isDeleteById()) {
+      ureq.deleteById(cmd.getId(), cmd.getVersion());
+    } else {
+      ureq.deleteByQuery(cmd.query);
+    }
+    ureq.setParams(params);
+    for (Node node : nodes) {
+      syncRequest(node, ureq);
+    }
+  }
+
+  private void syncRequest(Node node, UpdateRequestExt ureq) {
+    Request sreq = new Request();
+    sreq.node = node;
+    sreq.ureq = ureq;
+
+    String url = node.getUrl();
+    String fullUrl;
+    if (!url.startsWith("http://") && !url.startsWith("https://")) {
+      fullUrl = "http://" + url;
+    } else {
+      fullUrl = url;
+    }
+
+    HttpSolrServer server = new HttpSolrServer(fullUrl,
+        updateShardHandler.getHttpClient());
+
+    try {
+      sreq.ursp = server.request(ureq);
+    } catch (Exception e) {
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Failed synchronous update on shard " + sreq.node, sreq.exception);
+    }
+  }
+
   public void distribCommit(CommitUpdateCommand cmd, List<Node> nodes,
       ModifiableSolrParams params) throws IOException {
     
diff --git a/solr/core/src/java/org/apache/solr/update/SolrCoreState.java b/solr/core/src/java/org/apache/solr/update/SolrCoreState.java
index cad8671..5464d28 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrCoreState.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrCoreState.java
@@ -88,6 +88,26 @@
    */
   public abstract void newIndexWriter(SolrCore core, boolean rollback) throws IOException;
   
+  
+  /**
+   * Expert method that closes the IndexWriter - you must call {@link #openIndexWriter(SolrCore)}
+   * in a finally block after calling this method.
+   * 
+   * @param core that the IW belongs to
+   * @param rollback true if IW should rollback rather than close
+   * @throws IOException If there is a low-level I/O error.
+   */
+  public abstract void closeIndexWriter(SolrCore core, boolean rollback) throws IOException;
+  
+  /**
+   * Expert method that opens the IndexWriter - you must call {@link #closeIndexWriter(SolrCore, boolean)}
+   * first, and then call this method in a finally block.
+   * 
+   * @param core that the IW belongs to
+   * @throws IOException If there is a low-level I/O error.
+   */
+  public abstract void openIndexWriter(SolrCore core) throws IOException;
+  
   /**
    * Get the current IndexWriter. If a new IndexWriter must be created, use the
    * settings from the given {@link SolrCore}.
diff --git a/solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java b/solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java
index 6beb7b4..6c3155c 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java
@@ -17,12 +17,15 @@
 
 package org.apache.solr.update;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.lucene.index.*;
 import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
 import org.apache.lucene.util.InfoStream;
+import org.apache.lucene.util.PrintStreamInfoStream;
 import org.apache.lucene.util.Version;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.SolrConfig;
 import org.apache.solr.core.PluginInfo;
 import org.apache.solr.schema.IndexSchema;
@@ -30,6 +33,10 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.PrintStream;
 import java.util.List;
 
 /**
@@ -43,7 +50,14 @@
   public static final String DEFAULT_MERGE_SCHEDULER_CLASSNAME = ConcurrentMergeScheduler.class.getName();
   public final Version luceneVersion;
   
+  /**
+   * The explicit value of &lt;useCompoundFile&gt; specified on this index config
+   * @deprecated use {@link #getUseCompoundFile}
+   */
+  @Deprecated
   public final boolean useCompoundFile;
+  private boolean effectiveUseCompountFileSetting;
+
   public final int maxBufferedDocs;
   public final int maxMergeDocs;
   public final int maxIndexingThreads;
@@ -59,7 +73,7 @@
   
   public final PluginInfo mergedSegmentWarmerInfo;
   
-  public String infoStreamFile = null;
+  public InfoStream infoStream = InfoStream.NO_OUTPUT;
 
   // Available lock types
   public final static String LOCK_TYPE_SIMPLE = "simple";
@@ -73,7 +87,7 @@
   @SuppressWarnings("deprecation")
   private SolrIndexConfig(SolrConfig solrConfig) {
     luceneVersion = solrConfig.luceneMatchVersion;
-    useCompoundFile = false;
+    useCompoundFile = effectiveUseCompountFileSetting = false;
     maxBufferedDocs = -1;
     maxMergeDocs = -1;
     maxIndexingThreads = IndexWriterConfig.DEFAULT_MAX_THREAD_STATES;
@@ -121,6 +135,7 @@
 
     defaultMergePolicyClassName = def.defaultMergePolicyClassName;
     useCompoundFile=solrConfig.getBool(prefix+"/useCompoundFile", def.useCompoundFile);
+    effectiveUseCompountFileSetting = useCompoundFile;
     maxBufferedDocs=solrConfig.getInt(prefix+"/maxBufferedDocs",def.maxBufferedDocs);
     maxMergeDocs=solrConfig.getInt(prefix+"/maxMergeDocs",def.maxMergeDocs);
     maxIndexingThreads=solrConfig.getInt(prefix+"/maxIndexingThreads",def.maxIndexingThreads);
@@ -134,13 +149,17 @@
     mergePolicyInfo = getPluginInfo(prefix + "/mergePolicy", solrConfig, def.mergePolicyInfo);
     
     termIndexInterval = solrConfig.getInt(prefix + "/termIndexInterval", def.termIndexInterval);
-    
+
     boolean infoStreamEnabled = solrConfig.getBool(prefix + "/infoStream", false);
     if(infoStreamEnabled) {
-      infoStreamFile= solrConfig.get(prefix + "/infoStream/@file", null);
-      log.info("IndexWriter infoStream debug log is enabled: " + infoStreamFile);
+      String infoStreamFile = solrConfig.get(prefix + "/infoStream/@file", null);
+      if (infoStreamFile == null) {
+        log.info("IndexWriter infoStream solr logging is enabled");
+        infoStream = new LoggingInfoStream();
+      } else {
+        throw new IllegalArgumentException("Remove @file from <infoStream> to output messages to solr's logfile");
+      }
     }
-    
     mergedSegmentWarmerInfo = getPluginInfo(prefix + "/mergedSegmentWarmer", solrConfig, def.mergedSegmentWarmerInfo);
     if (mergedSegmentWarmerInfo != null && solrConfig.reopenReaders == false) {
       throw new IllegalArgumentException("Supplying a mergedSegmentWarmer will do nothing since reopenReaders is false");
@@ -188,6 +207,11 @@
     iwc.setSimilarity(schema.getSimilarity());
     iwc.setMergePolicy(buildMergePolicy(schema));
     iwc.setMergeScheduler(buildMergeScheduler(schema));
+    iwc.setInfoStream(infoStream);
+
+    // do this after buildMergePolicy since the backcompat logic 
+    // there may modify the effective useCompoundFile
+    iwc.setUseCompoundFile(getUseCompoundFile());
 
     if (maxIndexingThreads != -1) {
       iwc.setMaxThreadStates(maxIndexingThreads);
@@ -199,13 +223,22 @@
                                                                         IndexReaderWarmer.class,
                                                                         null,
                                                                         new Class[] { InfoStream.class },
-                                                                        new Object[] { InfoStream.NO_OUTPUT });
+                                                                        new Object[] { iwc.getInfoStream() });
       iwc.setMergedSegmentWarmer(warmer);
     }
 
     return iwc;
   }
 
+  /**
+   * Builds a MergePolicy, may also modify the value returned by
+   * getUseCompoundFile() for use by the IndexWriterConfig if 
+   * "useCompoundFile" is specified as an init arg for 
+   * an out of the box MergePolicy that no longer supports it
+   *
+   * @see #fixUseCFMergePolicyInitArg
+   * @see #getUseCompoundFile
+   */
   private MergePolicy buildMergePolicy(IndexSchema schema) {
     String mpClassName = mergePolicyInfo == null ? defaultMergePolicyClassName : mergePolicyInfo.className;
 
@@ -213,25 +246,31 @@
 
     if (policy instanceof LogMergePolicy) {
       LogMergePolicy logMergePolicy = (LogMergePolicy) policy;
+      fixUseCFMergePolicyInitArg(LogMergePolicy.class);
 
       if (maxMergeDocs != -1)
         logMergePolicy.setMaxMergeDocs(maxMergeDocs);
 
-      logMergePolicy.setUseCompoundFile(useCompoundFile);
+      logMergePolicy.setNoCFSRatio(getUseCompoundFile() ? 1.0 : 0.0);
 
       if (mergeFactor != -1)
         logMergePolicy.setMergeFactor(mergeFactor);
+
+
     } else if (policy instanceof TieredMergePolicy) {
       TieredMergePolicy tieredMergePolicy = (TieredMergePolicy) policy;
+      fixUseCFMergePolicyInitArg(TieredMergePolicy.class);
       
-      tieredMergePolicy.setUseCompoundFile(useCompoundFile);
+      tieredMergePolicy.setNoCFSRatio(getUseCompoundFile() ? 1.0 : 0.0);
       
       if (mergeFactor != -1) {
         tieredMergePolicy.setMaxMergeAtOnce(mergeFactor);
         tieredMergePolicy.setSegmentsPerTier(mergeFactor);
       }
-    } else {
-      log.warn("Use of compound file format or mergefactor cannot be configured if merge policy is not an instance of LogMergePolicy or TieredMergePolicy. The configured policy's defaults will be used.");
+
+
+    } else if (mergeFactor != -1) {
+      log.warn("Use of <mergeFactor> cannot be configured if merge policy is not an instance of LogMergePolicy or TieredMergePolicy. The configured policy's defaults will be used.");
     }
 
     if (mergePolicyInfo != null)
@@ -244,9 +283,58 @@
     String msClassName = mergeSchedulerInfo == null ? SolrIndexConfig.DEFAULT_MERGE_SCHEDULER_CLASSNAME : mergeSchedulerInfo.className;
     MergeScheduler scheduler = schema.getResourceLoader().newInstance(msClassName, MergeScheduler.class);
 
-    if (mergeSchedulerInfo != null)
-      SolrPluginUtils.invokeSetters(scheduler, mergeSchedulerInfo.initArgs);
+    if (mergeSchedulerInfo != null) {
+      // LUCENE-5080: these two setters are removed, so we have to invoke setMaxMergesAndThreads
+      // if someone has them configured.
+      if (scheduler instanceof ConcurrentMergeScheduler) {
+        NamedList args = mergeSchedulerInfo.initArgs.clone();
+        Integer maxMergeCount = (Integer) args.remove("maxMergeCount");
+        if (maxMergeCount == null) {
+          maxMergeCount = ((ConcurrentMergeScheduler) scheduler).getMaxMergeCount();
+        }
+        Integer maxThreadCount = (Integer) args.remove("maxThreadCount");
+        if (maxThreadCount == null) {
+          maxThreadCount = ((ConcurrentMergeScheduler) scheduler).getMaxThreadCount();
+        }
+        ((ConcurrentMergeScheduler)scheduler).setMaxMergesAndThreads(maxMergeCount, maxThreadCount);
+        SolrPluginUtils.invokeSetters(scheduler, args);
+      } else {
+        SolrPluginUtils.invokeSetters(scheduler, mergeSchedulerInfo.initArgs);
+      }
+    }
 
     return scheduler;
   }
+
+  public boolean getUseCompoundFile() {
+    return effectiveUseCompountFileSetting;
+  }
+
+  /**
+   * Lucene 4.4 removed the setUseCompoundFile(boolean) method from the two 
+   * conrete MergePolicies provided with Lucene/Solr and added it to the 
+   * IndexWRiterConfig.  
+   * In the event that users have a value explicitly configured for this 
+   * setter in their MergePolicy init args, we remove it from the MergePolicy 
+   * init args, update the 'effective' useCompoundFile setting used by the 
+   * IndexWriterConfig, and warn about discontinuing to use this init arg.
+   * 
+   * @see #getUseCompoundFile
+   */
+  private void fixUseCFMergePolicyInitArg(Class c) {
+
+    if (null == mergePolicyInfo || null == mergePolicyInfo.initArgs) return;
+
+    Object useCFSArg = mergePolicyInfo.initArgs.remove("useCompoundFile");
+    if (null != useCFSArg) {
+      log.warn("Ignoring 'useCompoundFile' specified as an init arg for the <mergePolicy> since it is no directly longer supported by " + c.getSimpleName());
+      if (useCFSArg instanceof Boolean) {
+        boolean cfs = ((Boolean)useCFSArg).booleanValue();
+        log.warn("Please update your config to specify <useCompoundFile>"+cfs+"</useCompoundFile> directly in your <indexConfig> settings.");
+        effectiveUseCompountFileSetting = cfs;
+      } else {
+        log.error("MergePolicy's 'useCompoundFile' init arg is not a boolean, can not apply back compat logic to apply to the IndexWriterConfig: " + useCFSArg.toString());
+      }
+    }
+  }
 }
diff --git a/solr/core/src/java/org/apache/solr/update/SolrIndexWriter.java b/solr/core/src/java/org/apache/solr/update/SolrIndexWriter.java
index 464bdec..736893d 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrIndexWriter.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrIndexWriter.java
@@ -77,7 +77,7 @@
     super(directory,
           config.toIndexWriterConfig(schema).
           setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND).
-          setIndexDeletionPolicy(delPolicy).setCodec(codec).setInfoStream(toInfoStream(config))
+          setIndexDeletionPolicy(delPolicy).setCodec(codec)
           );
     log.debug("Opened Writer " + name);
     this.name = name;
@@ -88,20 +88,6 @@
     this.directoryFactory = factory;
   }
 
-  private static InfoStream toInfoStream(SolrIndexConfig config) throws IOException {
-    String infoStreamFile = config.infoStreamFile;
-    if (infoStreamFile != null) {
-      File f = new File(infoStreamFile);
-      File parent = f.getParentFile();
-      if (parent != null) parent.mkdirs();
-      FileOutputStream fos = new FileOutputStream(f, true);
-      return new PrintStreamInfoStream(new PrintStream(fos, true, "UTF-8"));
-    } else {
-      return InfoStream.NO_OUTPUT;
-    }
-  }
-
-
   /**
    * use DocumentBuilder now...
    * private final void addField(Document doc, String name, String val) {
@@ -164,11 +150,8 @@
       if (infoStream != null) {
         infoStream.close();
       }
-      
       isClosed = true;
-      
       directoryFactory.release(directory);
-      
       numCloses.incrementAndGet();
     }
   }
diff --git a/solr/core/src/java/org/apache/solr/update/TransactionLog.java b/solr/core/src/java/org/apache/solr/update/TransactionLog.java
index 1133d2f..31f1a34 100644
--- a/solr/core/src/java/org/apache/solr/update/TransactionLog.java
+++ b/solr/core/src/java/org/apache/solr/update/TransactionLog.java
@@ -17,15 +17,6 @@
 
 package org.apache.solr.update;
 
-import org.apache.lucene.util.BytesRef;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.util.FastInputStream;
-import org.apache.solr.common.util.FastOutputStream;
-import org.apache.solr.common.util.JavaBinCodec;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
@@ -34,15 +25,23 @@
 import java.nio.channels.Channels;
 import java.nio.channels.FileChannel;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.lucene.util.BytesRef;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.util.DataInputInputStream;
+import org.apache.solr.common.util.FastInputStream;
+import org.apache.solr.common.util.FastOutputStream;
+import org.apache.solr.common.util.JavaBinCodec;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  *  Log Format: List{Operation, Version, ...}
  *  ADD, VERSION, DOC
@@ -75,7 +74,7 @@
   FastOutputStream fos;    // all accesses to this stream should be synchronized on "this" (The TransactionLog)
   int numRecords;
   
-  volatile boolean deleteOnClose = true;  // we can delete old tlogs since they are currently only used for real-time-get (and in the future, recovery)
+  protected volatile boolean deleteOnClose = true;  // we can delete old tlogs since they are currently only used for real-time-get (and in the future, recovery)
 
   AtomicInteger refcount = new AtomicInteger(1);
   Map<String,Integer> globalStringMap = new HashMap<String, Integer>();
@@ -98,7 +97,7 @@
   };
 
   public class LogCodec extends JavaBinCodec {
-    public LogCodec() {
+    public LogCodec(JavaBinCodec.ObjectResolver resolver) {
       super(resolver);
     }
 
@@ -121,7 +120,7 @@
     }
 
     @Override
-    public String readExternString(FastInputStream fis) throws IOException {
+    public String readExternString(DataInputInputStream fis) throws IOException {
       int idx = readSize(fis);
       if (idx != 0) {// idx != 0 is the index of the extern string
       // no need to synchronize globalStringList - it's only updated before the first record is written to the log
@@ -191,6 +190,9 @@
     }
   }
 
+  // for subclasses
+  protected TransactionLog() {}
+
   /** Returns the number of records in the log (currently includes the header and an optional commit).
    * Note: currently returns 0 for reopened existing log files.
    */
@@ -245,7 +247,7 @@
 
 
   public long writeData(Object o) {
-    LogCodec codec = new LogCodec();
+    LogCodec codec = new LogCodec(resolver);
     try {
       long pos = fos.size();   // if we had flushed, this should be equal to channel.position()
       codec.init(fos);
@@ -260,7 +262,7 @@
   private void readHeader(FastInputStream fis) throws IOException {
     // read existing header
     fis = fis != null ? fis : new ChannelFastInputStream(channel, 0);
-    LogCodec codec = new LogCodec();
+    LogCodec codec = new LogCodec(resolver);
     Map header = (Map)codec.unmarshal(fis);
 
     fis.readInt(); // skip size
@@ -276,7 +278,7 @@
     }
   }
 
-  private void addGlobalStrings(Collection<String> strings) {
+  protected void addGlobalStrings(Collection<String> strings) {
     if (strings == null) return;
     int origSize = globalStringMap.size();
     for (String s : strings) {
@@ -297,7 +299,7 @@
     }
   }
 
-  private void writeLogHeader(LogCodec codec) throws IOException {
+  protected void writeLogHeader(LogCodec codec) throws IOException {
     long pos = fos.size();
     assert pos == 0;
 
@@ -309,7 +311,7 @@
     endRecord(pos);
   }
 
-  private void endRecord(long startRecordPosition) throws IOException {
+  protected void endRecord(long startRecordPosition) throws IOException {
     fos.writeInt((int)(fos.size() - startRecordPosition));
     numRecords++;
   }
@@ -333,7 +335,7 @@
   int lastAddSize;
 
   public long write(AddUpdateCommand cmd, int flags) {
-    LogCodec codec = new LogCodec();
+    LogCodec codec = new LogCodec(resolver);
     SolrInputDocument sdoc = cmd.getSolrInputDocument();
 
     try {
@@ -375,7 +377,7 @@
   }
 
   public long writeDelete(DeleteUpdateCommand cmd, int flags) {
-    LogCodec codec = new LogCodec();
+    LogCodec codec = new LogCodec(resolver);
 
     try {
       checkWriteHeader(codec, null);
@@ -405,7 +407,7 @@
   }
 
   public long writeDeleteByQuery(DeleteUpdateCommand cmd, int flags) {
-    LogCodec codec = new LogCodec();
+    LogCodec codec = new LogCodec(resolver);
     try {
       checkWriteHeader(codec, null);
 
@@ -431,7 +433,7 @@
 
 
   public long writeCommit(CommitUpdateCommand cmd, int flags) {
-    LogCodec codec = new LogCodec();
+    LogCodec codec = new LogCodec(resolver);
     synchronized (this) {
       try {
         long pos = fos.size();   // if we had flushed, this should be equal to channel.position()
@@ -479,7 +481,7 @@
       }
 
       ChannelFastInputStream fis = new ChannelFastInputStream(channel, pos);
-      LogCodec codec = new LogCodec();
+      LogCodec codec = new LogCodec(resolver);
       return codec.readVal(fis);
     } catch (IOException e) {
       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
@@ -529,7 +531,7 @@
     }
   }
 
-  private void close() {
+  protected void close() {
     try {
       if (debug) {
         log.debug("Closing tlog" + this);
@@ -570,19 +572,22 @@
 
   /** Returns a single threaded reverse reader */
   public ReverseReader getReverseReader() throws IOException {
-    return new ReverseReader();
+    return new FSReverseReader();
   }
 
 
   public class LogReader {
-    ChannelFastInputStream fis;
-    private LogCodec codec = new LogCodec();
+    private ChannelFastInputStream fis;
+    private LogCodec codec = new LogCodec(resolver);
 
     public LogReader(long startingPos) {
       incref();
       fis = new ChannelFastInputStream(channel, startingPos);
     }
 
+    // for classes that extend
+    protected LogReader() {}
+
     /** Returns the next object from the log, or null if none available.
      *
      * @return The log record, or null if EOF
@@ -638,11 +643,32 @@
 
   }
 
-  public class ReverseReader {
+  public abstract class ReverseReader {
+
+
+
+    /** Returns the next object from the log, or null if none available.
+     *
+     * @return The log record, or null if EOF
+     * @throws IOException If there is a low-level I/O error.
+     */
+    public abstract Object next() throws IOException;
+
+    /* returns the position in the log file of the last record returned by next() */
+    public abstract long position();
+    public abstract void close();
+
+    @Override
+    public abstract String toString() ;
+
+
+  }
+  
+  public class FSReverseReader extends ReverseReader {
     ChannelFastInputStream fis;
-    private LogCodec codec = new LogCodec() {
+    private LogCodec codec = new LogCodec(resolver) {
       @Override
-      public SolrInputDocument readSolrInputDocument(FastInputStream dis) {
+      public SolrInputDocument readSolrInputDocument(DataInputInputStream dis) {
         // Given that the SolrInputDocument is last in an add record, it's OK to just skip
         // reading it completely.
         return null;
@@ -652,7 +678,7 @@
     int nextLength;  // length of the next record (the next one closer to the start of the log file)
     long prevPos;    // where we started reading from last time (so prevPos - nextLength == start of next record)
 
-    public ReverseReader() throws IOException {
+    public FSReverseReader() throws IOException {
       incref();
 
       long sz;
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateHandler.java b/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
index e79b15f..03c5a48 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
@@ -18,10 +18,11 @@
 package org.apache.solr.update;
 
 
-import java.io.File;
 import java.io.IOException;
 import java.util.Vector;
 
+import org.apache.solr.core.DirectoryFactory;
+import org.apache.solr.core.HdfsDirectoryFactory;
 import org.apache.solr.core.PluginInfo;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.core.SolrEventListener;
@@ -52,7 +53,7 @@
   protected Vector<SolrEventListener> softCommitCallbacks = new Vector<SolrEventListener>();
   protected Vector<SolrEventListener> optimizeCallbacks = new Vector<SolrEventListener>();
 
-  protected volatile UpdateLog ulog;
+  protected final UpdateLog ulog;
 
   private void parseEventListeners() {
     final Class<SolrEventListener> clazz = SolrEventListener.class;
@@ -71,34 +72,6 @@
     }
   }
 
-
-  private void initLog(PluginInfo ulogPluginInfo) {
-    if (ulogPluginInfo != null && ulogPluginInfo.isEnabled()) {
-      ulog = new UpdateLog();
-      ulog.init(ulogPluginInfo);
-      // ulog = core.createInitInstance(ulogPluginInfo, UpdateLog.class, "update log", "solr.NullUpdateLog");
-      ulog.init(this, core);
-    }
-  }
-
-  // not thread safe - for startup
-  private void clearLog(PluginInfo ulogPluginInfo) {
-    if (ulogPluginInfo == null) return;
-    File tlogDir = UpdateLog.getTlogDir(core, ulogPluginInfo);
-    log.info("Clearing tlog files, tlogDir=" + tlogDir);
-    if (tlogDir.exists()) {
-      String[] files = UpdateLog.getLogList(tlogDir);
-      for (String file : files) {
-        File f = new File(tlogDir, file);
-        boolean s = f.delete();
-        if (!s) {
-          log.error("Could not remove tlog file:" + f.getAbsolutePath());
-          //throw new SolrException(ErrorCode.SERVER_ERROR, "Could not remove tlog file:" + f.getAbsolutePath());
-        }
-      }
-    }
-  }
-
   protected void callPostCommitCallbacks() {
     for (SolrEventListener listener : commitCallbacks) {
       listener.postCommit();
@@ -127,14 +100,43 @@
     idFieldType = idField!=null ? idField.getType() : null;
     parseEventListeners();
     PluginInfo ulogPluginInfo = core.getSolrConfig().getPluginInfo(UpdateLog.class.getName());
-    if (!core.isReloaded() && !core.getDirectoryFactory().isPersistent()) {
-      clearLog(ulogPluginInfo);
-    }
-    if (updateLog == null) {
-      initLog(ulogPluginInfo);
+    
+
+    if (updateLog == null && ulogPluginInfo != null && ulogPluginInfo.isEnabled()) {
+      String dataDir = (String)ulogPluginInfo.initArgs.get("dir");
+      
+      String ulogDir = core.getCoreDescriptor().getUlogDir();
+      if (ulogDir != null) {
+        dataDir = ulogDir;
+      }
+      if (dataDir == null || dataDir.length()==0) {
+        dataDir = core.getDataDir();
+      }
+           
+      if (dataDir != null && dataDir.startsWith("hdfs:/")) {
+        DirectoryFactory dirFactory = core.getDirectoryFactory();
+        if (dirFactory instanceof HdfsDirectoryFactory) {
+          ulog = new HdfsUpdateLog(((HdfsDirectoryFactory)dirFactory).getConfDir());
+        } else {
+          ulog = new HdfsUpdateLog();
+        }
+        
+      } else {
+        ulog = new UpdateLog();
+      }
+      
+      if (!core.isReloaded() && !core.getDirectoryFactory().isPersistent()) {
+        ulog.clearLog(core, ulogPluginInfo);
+      }
+      
+      ulog.init(ulogPluginInfo);
+
+      ulog.init(this, core);
     } else {
-      this.ulog = updateLog;
+      ulog = updateLog;
     }
+    // ulog.init() when reusing an existing log is deferred (currently at the end of the DUH2 constructor
+
   }
 
   /**
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
index 8ab3a07..be01db5 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
@@ -17,15 +17,38 @@
 
 package org.apache.solr.update;
 
+import static org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase.FROMLEADER;
+import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Deque;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.Future;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.lucene.util.BytesRef;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.params.UpdateParams;
 import org.apache.solr.common.util.ExecutorUtil;
-import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.PluginInfo;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.request.LocalSolrQueryRequest;
@@ -34,9 +57,6 @@
 import org.apache.solr.response.SolrQueryResponse;
 import org.apache.solr.search.SolrIndexSearcher;
 import org.apache.solr.update.processor.DistributedUpdateProcessor;
-import org.apache.solr.update.processor.DistributedUpdateProcessorFactory;
-import org.apache.solr.update.processor.DistributingUpdateProcessorFactory;
-import org.apache.solr.update.processor.RunUpdateProcessorFactory;
 import org.apache.solr.update.processor.UpdateRequestProcessor;
 import org.apache.solr.update.processor.UpdateRequestProcessorChain;
 import org.apache.solr.util.DefaultSolrThreadFactory;
@@ -45,15 +65,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.*;
-
-import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
-import static org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase.FROMLEADER;
-
 
 /** @lucene.experimental */
 public class UpdateLog implements PluginInfoInitialized {
@@ -64,6 +75,10 @@
   public boolean debug = log.isDebugEnabled();
   public boolean trace = log.isTraceEnabled();
 
+  // TODO: hack
+  public FileSystem getFs() {
+    return null;
+  }
 
   public enum SyncLevel { NONE, FLUSH, FSYNC;
     public static SyncLevel getSyncLevel(String level){
@@ -108,27 +123,27 @@
   }
 
   long id = -1;
-  private State state = State.ACTIVE;
-  private int operationFlags;  // flags to write in the transaction log with operations (i.e. FLAG_GAP)
+  protected State state = State.ACTIVE;
+  protected int operationFlags;  // flags to write in the transaction log with operations (i.e. FLAG_GAP)
 
-  private TransactionLog tlog;
-  private TransactionLog prevTlog;
-  private Deque<TransactionLog> logs = new LinkedList<TransactionLog>();  // list of recent logs, newest first
-  private LinkedList<TransactionLog> newestLogsOnStartup = new LinkedList<TransactionLog>();
-  private int numOldRecords;  // number of records in the recent logs
+  protected TransactionLog tlog;
+  protected TransactionLog prevTlog;
+  protected Deque<TransactionLog> logs = new LinkedList<TransactionLog>();  // list of recent logs, newest first
+  protected LinkedList<TransactionLog> newestLogsOnStartup = new LinkedList<TransactionLog>();
+  protected int numOldRecords;  // number of records in the recent logs
 
-  private Map<BytesRef,LogPtr> map = new HashMap<BytesRef, LogPtr>();
-  private Map<BytesRef,LogPtr> prevMap;  // used while committing/reopening is happening
-  private Map<BytesRef,LogPtr> prevMap2;  // used while committing/reopening is happening
-  private TransactionLog prevMapLog;  // the transaction log used to look up entries found in prevMap
-  private TransactionLog prevMapLog2;  // the transaction log used to look up entries found in prevMap
+  protected Map<BytesRef,LogPtr> map = new HashMap<BytesRef, LogPtr>();
+  protected Map<BytesRef,LogPtr> prevMap;  // used while committing/reopening is happening
+  protected Map<BytesRef,LogPtr> prevMap2;  // used while committing/reopening is happening
+  protected TransactionLog prevMapLog;  // the transaction log used to look up entries found in prevMap
+  protected TransactionLog prevMapLog2;  // the transaction log used to look up entries found in prevMap
 
-  private final int numDeletesToKeep = 1000;
-  private final int numDeletesByQueryToKeep = 100;
+  protected final int numDeletesToKeep = 1000;
+  protected final int numDeletesByQueryToKeep = 100;
   public final int numRecordsToKeep = 100;
 
   // keep track of deletes only... this is not updated on an add
-  private LinkedHashMap<BytesRef, LogPtr> oldDeletes = new LinkedHashMap<BytesRef, LogPtr>(numDeletesToKeep) {
+  protected LinkedHashMap<BytesRef, LogPtr> oldDeletes = new LinkedHashMap<BytesRef, LogPtr>(numDeletesToKeep) {
     @Override
     protected boolean removeEldestEntry(Map.Entry eldest) {
       return size() > numDeletesToKeep;
@@ -145,21 +160,21 @@
     }
   }
 
-  private LinkedList<DBQ> deleteByQueries = new LinkedList<DBQ>();
+  protected LinkedList<DBQ> deleteByQueries = new LinkedList<DBQ>();
 
-  private String[] tlogFiles;
-  private File tlogDir;
-  private Collection<String> globalStrings;
+  protected String[] tlogFiles;
+  protected File tlogDir;
+  protected Collection<String> globalStrings;
 
-  private String dataDir;
-  private String lastDataDir;
+  protected String dataDir;
+  protected String lastDataDir;
 
-  private VersionInfo versionInfo;
+  protected VersionInfo versionInfo;
 
-  private SyncLevel defaultSyncLevel = SyncLevel.FLUSH;
+  protected SyncLevel defaultSyncLevel = SyncLevel.FLUSH;
 
   volatile UpdateHandler uhandler;    // a core reload can change this reference!
-  private volatile boolean cancelApplyBufferUpdate;
+  protected volatile boolean cancelApplyBufferUpdate;
   List<Long> startingVersions;
   int startingOperation;  // last operation in the logs on startup
 
@@ -189,13 +204,17 @@
     defaultSyncLevel = SyncLevel.getSyncLevel((String)info.initArgs.get("syncLevel"));
   }
 
+  /* Note, when this is called, uhandler is not completely constructed.
+   * This must be called when a new log is created, or
+   * for an existing log whenever the core or update handler changes.
+   */
   public void init(UpdateHandler uhandler, SolrCore core) {
     // ulogDir from CoreDescriptor overrides
     String ulogDir = core.getCoreDescriptor().getUlogDir();
     if (ulogDir != null) {
       dataDir = ulogDir;
     }
-    
+
     if (dataDir == null || dataDir.length()==0) {
       dataDir = core.getDataDir();
     }
@@ -276,8 +295,8 @@
 
   }
   
-  public File getLogDir() {
-    return tlogDir;
+  public String getLogDir() {
+    return tlogDir.getAbsolutePath();
   }
   
   public List<Long> getStartingVersions() {
@@ -291,7 +310,7 @@
   /* Takes over ownership of the log, keeping it until no longer needed
      and then decrementing it's reference and dropping it.
    */
-  private void addOldLog(TransactionLog oldLog, boolean removeOld) {
+  protected void addOldLog(TransactionLog oldLog, boolean removeOld) {
     if (oldLog == null) return;
 
     numOldRecords += oldLog.numRecords();
@@ -322,7 +341,7 @@
   }
 
 
-  public static String[] getLogList(File directory) {
+  public String[] getLogList(File directory) {
     final String prefix = TLOG_NAME+'.';
     String[] names = directory.list(new FilenameFilter() {
       @Override
@@ -330,6 +349,9 @@
         return name.startsWith(prefix);
       }
     });
+    if (names == null) {
+      throw new RuntimeException(new FileNotFoundException(directory.getAbsolutePath()));
+    }
     Arrays.sort(names);
     return names;
   }
@@ -540,7 +562,7 @@
     }
   }
 
-  private void newMap() {
+  protected void newMap() {
     prevMap2 = prevMap;
     prevMapLog2 = prevMapLog;
 
@@ -793,7 +815,7 @@
   }
 
 
-  private void ensureLog() {
+  protected void ensureLog() {
     if (tlog == null) {
       String newLogName = String.format(Locale.ROOT, LOG_FILENAME_PATTERN, TLOG_NAME, id);
       tlog = new TransactionLog(new File(tlogDir, newLogName), globalStrings);
@@ -968,6 +990,8 @@
               log.warn("Exception reverse reading log", ex);
               break;
             }
+
+            numUpdates++;
           }
 
         } catch (IOException e) {
@@ -1139,7 +1163,7 @@
 
 
 
-  private RecoveryInfo recoveryInfo;
+  protected RecoveryInfo recoveryInfo;
 
   class LogReplayer implements Runnable {
     private Logger loglog = log;  // set to something different?
@@ -1416,7 +1440,7 @@
     }
   }
   
-  public static File getTlogDir(SolrCore core, PluginInfo info) {
+  protected String getTlogDir(SolrCore core, PluginInfo info) {
     String dataDir = (String) info.initArgs.get("dir");
     
     String ulogDir = core.getCoreDescriptor().getUlogDir();
@@ -1427,11 +1451,30 @@
     if (dataDir == null || dataDir.length() == 0) {
       dataDir = core.getDataDir();
     }
-    
-    return new File(dataDir, TLOG_NAME);
+
+    return dataDir + "/" + TLOG_NAME;
+  }
+  
+  /**
+   * Clears the logs on the file system. Only call before init.
+   * 
+   * @param core the SolrCore
+   * @param ulogPluginInfo the init info for the UpdateHandler
+   */
+  public void clearLog(SolrCore core, PluginInfo ulogPluginInfo) {
+    if (ulogPluginInfo == null) return;
+    File tlogDir = new File(getTlogDir(core, ulogPluginInfo));
+    if (tlogDir.exists()) {
+      String[] files = getLogList(tlogDir);
+      for (String file : files) {
+        File f = new File(tlogDir, file);
+        boolean s = f.delete();
+        if (!s) {
+          log.error("Could not remove tlog file:" + f);
+        }
+      }
+    }
   }
   
 }
 
-
-
diff --git a/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java
new file mode 100644
index 0000000..ff88ca4
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java
@@ -0,0 +1,349 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update.processor;
+
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.SolrInputField;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.schema.IndexSchema;
+import org.apache.solr.schema.ManagedIndexSchema;
+import org.apache.solr.schema.SchemaField;
+import org.apache.solr.update.AddUpdateCommand;
+import org.apache.solr.update.processor.FieldMutatingUpdateProcessorFactory.SelectorParams;
+import org.apache.solr.update.processor.FieldMutatingUpdateProcessor.FieldNameSelector;
+import org.apache.solr.util.plugin.SolrCoreAware;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.apache.solr.common.SolrException.ErrorCode.BAD_REQUEST;
+import static org.apache.solr.common.SolrException.ErrorCode.SERVER_ERROR;
+
+
+/**
+ * <p>
+ * This processor will dynamically add fields to the schema if an input document contains
+ * one or more fields that don't match any field or dynamic field in the schema.
+ * </p>
+ * <p>
+ * By default, this processor selects all fields that don't match a schema field or
+ * dynamic field.  The "fieldName" and "fieldRegex" selectors may be specified to further
+ * restrict the selected fields, but the other selectors ("typeName", "typeClass", and
+ * "fieldNameMatchesSchemaField") may not be specified.
+ * </p>
+ * <p>
+ * This processor is configured to map from each field's values' class(es) to the schema
+ * field type that will be used when adding the new field to the schema.  All new fields
+ * are then added to the schema in a single batch.  If schema addition fails for any
+ * field, addition is re-attempted only for those that don’t match any schema
+ * field.  This process is repeated, either until all new fields are successfully added,
+ * or until there are no new fields (presumably because the fields that were new when
+ * this processor started its work were subsequently added by a different update
+ * request, possibly on a different node).
+ * </p>
+ * <p>
+ * This processor takes as configuration a sequence of zero or more "typeMapping"-s from
+ * one or more "valueClass"-s, specified as either an &lt;arr&gt; of &lt;str&gt;, or
+ * multiple &lt;str&gt; with the same name, to an existing schema "fieldType".
+ * </p>
+ * <p>
+ * If more than one "valueClass" is specified in a "typeMapping", field values with any
+ * of the specified "valueClass"-s will be mapped to the specified target "fieldType".
+ * The "typeMapping"-s are attempted in the specified order; if a field value's class
+ * is not specified in a "valueClass", the next "typeMapping" is attempted. If no
+ * "typeMapping" succeeds, then the specified "defaultFieldType" is used. 
+ * </p>
+ * <p>
+ * Example configuration:
+ * </p>
+ * 
+ * <pre class="prettyprint">
+ * &lt;processor class="solr.AddSchemaFieldsUpdateProcessorFactory"&gt;
+ *   &lt;str name="defaultFieldType"&gt;text_general&lt;/str&gt;
+ *   &lt;lst name="typeMapping"&gt;
+ *     &lt;str name="valueClass"&gt;Boolean&lt;/str&gt;
+ *     &lt;str name="fieldType"&gt;boolean&lt;/str&gt;
+ *   &lt;/lst&gt;
+ *   &lt;lst name="typeMapping"&gt;
+ *     &lt;str name="valueClass"&gt;Integer&lt;/str&gt;
+ *     &lt;str name="fieldType"&gt;tint&lt;/str&gt;
+ *   &lt;/lst&gt;
+ *   &lt;lst name="typeMapping"&gt;
+ *     &lt;str name="valueClass"&gt;Float&lt;/str&gt;
+ *     &lt;str name="fieldType"&gt;tfloat&lt;/str&gt;
+ *   &lt;/lst&gt;
+ *   &lt;lst name="typeMapping"&gt;
+ *     &lt;str name="valueClass"&gt;Date&lt;/str&gt;
+ *     &lt;str name="fieldType"&gt;tdate&lt;/str&gt;
+ *   &lt;/lst&gt;
+ *   &lt;lst name="typeMapping"&gt;
+ *     &lt;str name="valueClass"&gt;Long&lt;/str&gt;
+ *     &lt;str name="valueClass"&gt;Integer&lt;/str&gt;
+ *     &lt;str name="fieldType"&gt;tlong&lt;/str&gt;
+ *   &lt;/lst&gt;
+ *   &lt;lst name="typeMapping"&gt;
+ *     &lt;arr name="valueClass"&gt;
+ *       &lt;str&gt;Double&lt;/str&gt;
+ *       &lt;str&gt;Float&lt;/str&gt;
+ *     &lt;/arr&gt;
+ *     &lt;str name="fieldType"&gt;tdouble&lt;/str&gt;
+ *   &lt;/lst&gt;
+ * &lt;/processor&gt;</pre>
+ */
+public class AddSchemaFieldsUpdateProcessorFactory extends UpdateRequestProcessorFactory implements SolrCoreAware {
+  public final static Logger log = LoggerFactory.getLogger(AddSchemaFieldsUpdateProcessorFactory.class);
+
+  private static final String TYPE_MAPPING_PARAM = "typeMapping";
+  private static final String VALUE_CLASS_PARAM = "valueClass";
+  private static final String FIELD_TYPE_PARAM = "fieldType";
+  private static final String DEFAULT_FIELD_TYPE_PARAM = "defaultFieldType";
+  
+  private List<TypeMapping> typeMappings = Collections.emptyList();
+  private SelectorParams inclusions = new SelectorParams();
+  private Collection<SelectorParams> exclusions = new ArrayList<SelectorParams>();
+  private FieldNameSelector selector = null;
+  private String defaultFieldType;
+
+  protected final FieldMutatingUpdateProcessor.FieldNameSelector getSelector() {
+    if (null != selector) return selector;
+    throw new SolrException(SERVER_ERROR, "selector was never initialized, inform(SolrCore) never called???");
+  }
+
+  @Override
+  public UpdateRequestProcessor getInstance(SolrQueryRequest req, 
+                                            SolrQueryResponse rsp, 
+                                            UpdateRequestProcessor next) {
+    return new AddSchemaFieldsUpdateProcessor(next);
+  }
+
+  @Override
+  public void init(NamedList args) {
+    inclusions = FieldMutatingUpdateProcessorFactory.parseSelectorParams(args);
+    validateSelectorParams(inclusions);
+    inclusions.fieldNameMatchesSchemaField = false;  // Explicitly (non-configurably) require unknown field names
+    exclusions = FieldMutatingUpdateProcessorFactory.parseSelectorExclusionParams(args);
+    for (SelectorParams exclusion : exclusions) {
+      validateSelectorParams(exclusion);
+    }
+    Object defaultFieldTypeParam = args.remove(DEFAULT_FIELD_TYPE_PARAM);
+    if (null == defaultFieldTypeParam) {
+      throw new SolrException(SERVER_ERROR, "Missing required init param '" + DEFAULT_FIELD_TYPE_PARAM + "'");
+    } else {
+      if ( ! (defaultFieldTypeParam instanceof CharSequence)) {
+        throw new SolrException(SERVER_ERROR, "Init param '" + DEFAULT_FIELD_TYPE_PARAM + "' must be a <str>");
+      }
+    }
+    defaultFieldType = defaultFieldTypeParam.toString();
+
+    typeMappings = parseTypeMappings(args);
+
+    super.init(args);
+  }
+
+  @Override
+  public void inform(SolrCore core) {
+    selector = FieldMutatingUpdateProcessor.createFieldNameSelector
+        (core.getResourceLoader(), core, inclusions, getDefaultSelector(core));
+
+    for (SelectorParams exc : exclusions) {
+      selector = FieldMutatingUpdateProcessor.wrap(selector, FieldMutatingUpdateProcessor.createFieldNameSelector
+          (core.getResourceLoader(), core, exc, FieldMutatingUpdateProcessor.SELECT_NO_FIELDS));
+    }
+
+    for (TypeMapping typeMapping : typeMappings) {
+      typeMapping.populateValueClasses(core);
+    }
+  }
+
+  private FieldNameSelector getDefaultSelector(final SolrCore core) {
+    return new FieldNameSelector() {
+      @Override
+      public boolean shouldMutate(final String fieldName) {
+        return null == core.getLatestSchema().getFieldTypeNoEx(fieldName);
+      }
+    };
+  }
+
+  private static List<TypeMapping> parseTypeMappings(NamedList args) {
+    List<TypeMapping> typeMappings = new ArrayList<TypeMapping>();
+    List<Object> typeMappingsParams = args.getAll(TYPE_MAPPING_PARAM);
+    for (Object typeMappingObj : typeMappingsParams) {
+      if (null == typeMappingObj) {
+        throw new SolrException(SERVER_ERROR, "'" + TYPE_MAPPING_PARAM + "' init param cannot be null");
+      }
+      if ( ! (typeMappingObj instanceof NamedList) ) {
+        throw new SolrException(SERVER_ERROR, "'" + TYPE_MAPPING_PARAM + "' init param must be a <lst>");
+      }
+      NamedList typeMappingNamedList = (NamedList)typeMappingObj;
+
+      Object fieldTypeObj = typeMappingNamedList.remove(FIELD_TYPE_PARAM);
+      if (null == fieldTypeObj) {
+        throw new SolrException(SERVER_ERROR,
+            "Each '" + TYPE_MAPPING_PARAM + "' <lst/> must contain a '" + FIELD_TYPE_PARAM + "' <str>");
+      }
+      if ( ! (fieldTypeObj instanceof CharSequence)) {
+        throw new SolrException(SERVER_ERROR, "'" + FIELD_TYPE_PARAM + "' init param must be a <str>");
+      }
+      if (null != typeMappingNamedList.get(FIELD_TYPE_PARAM)) {
+        throw new SolrException(SERVER_ERROR,
+            "Each '" + TYPE_MAPPING_PARAM + "' <lst/> must contain a '" + FIELD_TYPE_PARAM + "' <str>");
+      }
+      String fieldType = fieldTypeObj.toString();
+
+      Collection<String> valueClasses
+          = FieldMutatingUpdateProcessorFactory.oneOrMany(typeMappingNamedList, VALUE_CLASS_PARAM);
+      if (valueClasses.isEmpty()) {
+        throw new SolrException(SERVER_ERROR, 
+            "Each '" + TYPE_MAPPING_PARAM + "' <lst/> must contain at least one '" + VALUE_CLASS_PARAM + "' <str>");
+      }
+      typeMappings.add(new TypeMapping(fieldType, valueClasses));
+
+      if (0 != typeMappingNamedList.size()) {
+        throw new SolrException(SERVER_ERROR, 
+            "Unexpected '" + TYPE_MAPPING_PARAM + "' init sub-param(s): '" + typeMappingNamedList.toString() + "'");
+      }
+      args.remove(TYPE_MAPPING_PARAM);
+    }
+    return typeMappings;
+  }
+
+  private void validateSelectorParams(SelectorParams params) {
+    if ( ! params.typeName.isEmpty()) {
+      throw new SolrException(SERVER_ERROR, "'typeName' init param is not allowed in this processor");
+    }
+    if ( ! params.typeClass.isEmpty()) {
+      throw new SolrException(SERVER_ERROR, "'typeClass' init param is not allowed in this processor");
+    }
+    if (null != params.fieldNameMatchesSchemaField) {
+      throw new SolrException(SERVER_ERROR, "'fieldNameMatchesSchemaField' init param is not allowed in this processor");
+    }
+  }
+
+  private static class TypeMapping {
+    public String fieldTypeName;
+    public Collection<String> valueClassNames;
+    public Set<Class<?>> valueClasses;
+
+    public TypeMapping(String fieldTypeName, Collection<String> valueClassNames) {
+      this.fieldTypeName = fieldTypeName;
+      this.valueClassNames = valueClassNames;
+      // this.valueClasses population is delayed until the schema is available
+    }
+
+    public void populateValueClasses(SolrCore core) {
+      IndexSchema schema = core.getLatestSchema();
+      ClassLoader loader = core.getResourceLoader().getClassLoader();
+      if (null == schema.getFieldTypeByName(fieldTypeName)) {
+        throw new SolrException(SERVER_ERROR, "fieldType '" + fieldTypeName + "' not found in the schema");
+      }
+      valueClasses = new HashSet<Class<?>>();
+      for (String valueClassName : valueClassNames) {
+        try {
+          valueClasses.add(loader.loadClass(valueClassName));
+        } catch (ClassNotFoundException e) {
+          throw new SolrException(SERVER_ERROR,
+              "valueClass '" + valueClassName + "' not found for fieldType '" + fieldTypeName + "'");
+        }
+      }
+    }
+  }
+
+  private class AddSchemaFieldsUpdateProcessor extends UpdateRequestProcessor {
+    public AddSchemaFieldsUpdateProcessor(UpdateRequestProcessor next) {
+      super(next);
+    }
+    
+    @Override
+    public void processAdd(AddUpdateCommand cmd) throws IOException {
+      if ( ! cmd.getReq().getCore().getLatestSchema().isMutable()) {
+        final String message = "This IndexSchema is not mutable.";
+        throw new SolrException(BAD_REQUEST, message);
+      }
+      final SolrInputDocument doc = cmd.getSolrInputDocument();
+      final SolrCore core = cmd.getReq().getCore();
+      for (;;) {
+        final IndexSchema oldSchema = core.getLatestSchema();
+        List<SchemaField> newFields = new ArrayList<SchemaField>();
+        for (final String fieldName : doc.getFieldNames()) {
+          if (selector.shouldMutate(fieldName)) {
+            String fieldTypeName = mapValueClassesToFieldType(doc.getField(fieldName));
+            newFields.add(oldSchema.newField(fieldName, fieldTypeName, Collections.<String,Object>emptyMap()));
+          }
+        }
+        if (newFields.isEmpty()) {
+          // nothing to do - no fields will be added - exit from the retry loop
+          log.debug("No fields to add to the schema.");
+          break;
+        }
+        if (log.isDebugEnabled()) {
+          StringBuilder builder = new StringBuilder();
+          builder.append("Fields to be added to the schema: [");
+          boolean isFirst = true;
+          for (SchemaField field : newFields) {
+            builder.append(isFirst ? "" : ",");
+            isFirst = false;
+            builder.append(field.getName());
+            builder.append("{type=").append(field.getType().getTypeName()).append("}");
+          }
+          builder.append("]");
+          log.debug(builder.toString());
+        }
+        try {
+          IndexSchema newSchema = oldSchema.addFields(newFields);
+          cmd.getReq().getCore().setLatestSchema(newSchema);
+          cmd.getReq().updateSchemaToLatest();
+          log.debug("Successfully added field(s) to the schema.");
+          break; // success - exit from the retry loop
+        } catch(ManagedIndexSchema.FieldExistsException e) {
+          log.debug("At least one field to be added already exists in the schema - retrying.");
+          // No action: at least one field to be added already exists in the schema, so retry 
+        }
+      }
+      super.processAdd(cmd);
+    }                          
+
+    private String mapValueClassesToFieldType(SolrInputField field) {
+      NEXT_TYPE_MAPPING: for (TypeMapping typeMapping : typeMappings) {
+        NEXT_FIELD_VALUE: for (Object fieldValue : field.getValues()) {
+          for (Class<?> valueClass : typeMapping.valueClasses) {
+            if (valueClass.isInstance(fieldValue)) {
+              continue NEXT_FIELD_VALUE;
+            }
+          }
+          // This fieldValue is not an instance of any of this fieldType's valueClass-s
+          continue NEXT_TYPE_MAPPING;
+        }
+        // Success! Each of this field's values is an instance of one of this fieldType's valueClass-s
+        return typeMapping.fieldTypeName;
+      }
+      // At least one of this field's values is not an instance of any configured fieldType's valueClass-s
+      return defaultFieldType;
+    }
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/update/processor/AllValuesOrNoneFieldMutatingUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/AllValuesOrNoneFieldMutatingUpdateProcessor.java
new file mode 100644
index 0000000..58969b0
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/processor/AllValuesOrNoneFieldMutatingUpdateProcessor.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update.processor;
+
+import org.apache.solr.common.SolrInputField;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+                   
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+/**
+ * Abstract subclass of FieldMutatingUpdateProcessor for implementing 
+ * UpdateProcessors that will mutate all individual values of a selected 
+ * field independently.  If not all individual values are acceptable
+ * - i.e., mutateValue(srcVal) returns {@link #SKIP_FIELD_VALUE_LIST_SINGLETON}
+ * for at least one value - then none of the values are mutated:
+ * mutate(srcField) will return srcField.
+ *
+ * @see FieldMutatingUpdateProcessorFactory
+ * @see FieldValueMutatingUpdateProcessor
+ */
+public abstract class AllValuesOrNoneFieldMutatingUpdateProcessor extends FieldMutatingUpdateProcessor {
+
+  private static final Logger log = LoggerFactory.getLogger(AllValuesOrNoneFieldMutatingUpdateProcessor.class);
+
+  public static final Object DELETE_VALUE_SINGLETON = new Object() {
+    @Override
+    public String toString() {
+      return "!!Singleton Object Triggering Value Deletion!!";
+    }
+  };
+
+  public static final Object SKIP_FIELD_VALUE_LIST_SINGLETON= new Object() {
+    @Override
+    public String toString() {
+      return "!!Singleton Object Triggering Skipping Field Mutation!!";
+    }
+  };
+
+
+  public AllValuesOrNoneFieldMutatingUpdateProcessor(FieldNameSelector selector, UpdateRequestProcessor next) {
+    super(selector, next);
+  }
+
+  /**
+   * Mutates individual values of a field as needed, or returns the original 
+   * value.
+   *
+   * @param srcVal a value from a matched field which should be mutated
+   * @return the value to use as a replacement for src, or 
+   *         <code>DELETE_VALUE_SINGLETON</code> to indicate that the value 
+   *         should be removed completely, or
+   *         <code>SKIP_FIELD_VALUE_LIST_SINGLETON</code> to indicate that
+   *         a field value is not consistent with 
+   * @see #DELETE_VALUE_SINGLETON
+   * @see #SKIP_FIELD_VALUE_LIST_SINGLETON
+   */
+  protected abstract Object mutateValue(final Object srcVal);
+
+  protected final SolrInputField mutate(final SolrInputField srcField) {
+    List<String> messages = null;
+    SolrInputField result = new SolrInputField(srcField.getName());
+    for (final Object srcVal : srcField.getValues()) {
+      final Object destVal = mutateValue(srcVal);
+      if (SKIP_FIELD_VALUE_LIST_SINGLETON == destVal) {
+        log.debug("field '{}' {} value '{}' is not mutatable, so no values will be mutated",
+                  new Object[] { srcField.getName(), srcVal.getClass().getSimpleName(), srcVal });
+        return srcField;
+      }
+      if (DELETE_VALUE_SINGLETON == destVal) {
+        if (log.isDebugEnabled()) {
+          if (null == messages) {
+            messages = new ArrayList<String>();
+          }
+          messages.add(String.format(Locale.ROOT, "removing value from field '%s': %s '%s'", 
+                                     srcField.getName(), srcVal.getClass().getSimpleName(), srcVal));
+        }
+      } else {
+        if (log.isDebugEnabled()) {
+          if (null == messages) {
+            messages = new ArrayList<String>();
+          }
+          messages.add(String.format(Locale.ROOT, "replace value from field '%s': %s '%s' with %s '%s'", 
+                                     srcField.getName(), srcVal.getClass().getSimpleName(), srcVal, 
+                                     destVal.getClass().getSimpleName(), destVal));
+        }
+        result.addValue(destVal, 1.0F);
+      }
+    }
+    result.setBoost(srcField.getBoost());
+    
+    if (null != messages && log.isDebugEnabled()) {
+      for (String message : messages) {
+        log.debug(message);
+      }
+    }
+    return 0 == result.getValueCount() ? null : result;
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/update/processor/CloneFieldUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/CloneFieldUpdateProcessorFactory.java
index cf8e5ab..679c6bb 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/CloneFieldUpdateProcessorFactory.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/CloneFieldUpdateProcessorFactory.java
@@ -195,25 +195,13 @@
     
     srcSelector = 
       FieldMutatingUpdateProcessor.createFieldNameSelector
-      (core.getResourceLoader(),
-       core,
-       srcInclusions.fieldName,
-       srcInclusions.typeName,
-       srcInclusions.typeClass,
-       srcInclusions.fieldRegex,
-       FieldMutatingUpdateProcessor.SELECT_NO_FIELDS);
+          (core.getResourceLoader(), core, srcInclusions, FieldMutatingUpdateProcessor.SELECT_NO_FIELDS);
 
     for (SelectorParams exc : srcExclusions) {
       srcSelector = FieldMutatingUpdateProcessor.wrap
         (srcSelector,
          FieldMutatingUpdateProcessor.createFieldNameSelector
-         (core.getResourceLoader(),
-          core,
-          exc.fieldName,
-          exc.typeName,
-          exc.typeClass,
-          exc.fieldRegex,
-          FieldMutatingUpdateProcessor.SELECT_NO_FIELDS));
+             (core.getResourceLoader(), core, exc, FieldMutatingUpdateProcessor.SELECT_NO_FIELDS));
     }
   }
 
diff --git a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
index bccc97e..b78766f 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
@@ -222,9 +222,9 @@
         // Replica leader = slice.getLeader();
         Replica leaderReplica = zkController.getZkStateReader().getLeaderRetry(
             collection, shardId);
-        ZkCoreNodeProps leaderProps = new ZkCoreNodeProps(leaderReplica);
-        String coreNodeName = zkController.getCoreNodeName(req.getCore().getCoreDescriptor());
-        isLeader = coreNodeName.equals(leaderReplica.getName());
+        isLeader = leaderReplica.getName().equals(
+            req.getCore().getCoreDescriptor().getCloudDescriptor()
+                .getCoreNodeName());
 
         DistribPhase phase =
             DistribPhase.parseParam(req.getParams().get(DISTRIB_UPDATE_PARAM));
@@ -240,10 +240,9 @@
           // so get the replicas...
           forwardToLeader = false;
           List<ZkCoreNodeProps> replicaProps = zkController.getZkStateReader()
-              .getReplicaProps(collection, shardId, coreNodeName,
+              .getReplicaProps(collection, shardId, leaderReplica.getName(),
                   coreName, null, ZkStateReader.DOWN);
 
-          nodes = addSubShardLeaders(coll, shardId, id, doc, nodes);
           if (replicaProps != null) {
             if (nodes == null)  {
             nodes = new ArrayList<Node>(replicaProps.size());
@@ -273,7 +272,7 @@
         } else {
           // I need to forward onto the leader...
           nodes = new ArrayList<Node>(1);
-          nodes.add(new RetryNode(leaderProps, zkController.getZkStateReader(), collection, shardId));
+          nodes.add(new RetryNode(new ZkCoreNodeProps(leaderReplica), zkController.getZkStateReader(), collection, shardId));
           forwardToLeader = true;
         }
 
@@ -287,8 +286,9 @@
     return nodes;
   }
 
-  private List<Node> addSubShardLeaders(DocCollection coll, String shardId, String docId, SolrInputDocument doc, List<Node> nodes) {
+  private List<Node> getSubShardLeaders(DocCollection coll, String shardId, String docId, SolrInputDocument doc) {
     Collection<Slice> allSlices = coll.getSlices();
+    List<Node> nodes = null;
     for (Slice aslice : allSlices) {
       if (Slice.CONSTRUCTION.equals(aslice.getState()))  {
         DocRouter.Range myRange = coll.getSlice(shardId).getRange();
@@ -343,7 +343,9 @@
 
     if (isLeader && !localIsLeader) {
       log.error("ClusterState says we are the leader, but locally we don't think so");
-      throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "ClusterState says we are the leader, but locally we don't think so");
+      throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
+          "ClusterState says we are the leader (" + zkController.getBaseUrl()
+              + "/" + req.getCore().getName() + "), but locally we don't think so. Request came from " + from);
     }
   }
 
@@ -356,16 +358,15 @@
     try {
       Replica leaderReplica = zkController.getZkStateReader().getLeaderRetry(
           collection, shardId);
-      String leaderCoreNodeName = leaderReplica.getName();
-
-      String coreNodeName = zkController.getCoreNodeName(req.getCore().getCoreDescriptor());
-      isLeader = coreNodeName.equals(leaderCoreNodeName);
+      isLeader = leaderReplica.getName().equals(
+          req.getCore().getCoreDescriptor().getCloudDescriptor()
+              .getCoreNodeName());
 
       // TODO: what if we are no longer the leader?
 
       forwardToLeader = false;
       List<ZkCoreNodeProps> replicaProps = zkController.getZkStateReader()
-          .getReplicaProps(collection, shardId, coreNodeName,
+          .getReplicaProps(collection, shardId, leaderReplica.getName(),
               req.getCore().getName());
       if (replicaProps != null) {
         nodes = new ArrayList<Node>(replicaProps.size());
@@ -373,8 +374,6 @@
           nodes.add(new StdNode(props));
         }
       }
-
-      nodes = addSubShardLeaders(zkController.getClusterState().getCollection(collection), shardId, null, null, nodes);
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
       throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "",
@@ -395,7 +394,7 @@
     } else {
       isLeader = getNonZkLeaderAssumption(req);
     }
-    
+
     boolean dropCmd = false;
     if (!forwardToLeader) {
       dropCmd = versionAdd(cmd);
@@ -405,22 +404,35 @@
       // TODO: do we need to add anything to the response?
       return;
     }
-    
+
+    if (zkEnabled && isLeader)  {
+      DocCollection coll = zkController.getClusterState().getCollection(collection);
+      List<Node> subShardLeaders = getSubShardLeaders(coll, cloudDesc.getShardId(), cmd.getHashableId(), cmd.getSolrInputDocument());
+      // the list<node> will actually have only one element for an add request
+      if (subShardLeaders != null && !subShardLeaders.isEmpty()) {
+        ModifiableSolrParams params = new ModifiableSolrParams(filterParams(req.getParams()));
+        params.set(DISTRIB_UPDATE_PARAM, DistribPhase.FROMLEADER.toString());
+        params.set("distrib.from", ZkCoreNodeProps.getCoreUrl(
+            zkController.getBaseUrl(), req.getCore().getName()));
+        params.set("distrib.from.parent", req.getCore().getCoreDescriptor().getCloudDescriptor().getShardId());
+        for (Node subShardLeader : subShardLeaders) {
+          cmdDistrib.syncAdd(cmd, subShardLeader, params);
+        }
+      }
+    }
+
     ModifiableSolrParams params = null;
     if (nodes != null) {
-      
+
       params = new ModifiableSolrParams(filterParams(req.getParams()));
-      params.set(DISTRIB_UPDATE_PARAM, 
-                 (isLeader ? 
-                  DistribPhase.FROMLEADER.toString() : 
+      params.set(DISTRIB_UPDATE_PARAM,
+                 (isLeader ?
+                  DistribPhase.FROMLEADER.toString() :
                   DistribPhase.TOLEADER.toString()));
       if (isLeader) {
         params.set("distrib.from", ZkCoreNodeProps.getCoreUrl(
             zkController.getBaseUrl(), req.getCore().getName()));
       }
-      if (forwardToSubShard)  {
-        params.set("distrib.from.parent", req.getCore().getCoreDescriptor().getCloudDescriptor().getShardId());
-      }
 
       params.set("distrib.from", ZkCoreNodeProps.getCoreUrl(
           zkController.getBaseUrl(), req.getCore().getName()));
@@ -640,7 +652,7 @@
         if (willDistrib) {
           clonedDoc = cmd.solrDoc.deepCopy();
         }
-        
+
         // TODO: possibly set checkDeleteByQueries as a flag on the command?
         doLocalAdd(cmd);
         
@@ -781,13 +793,28 @@
       return;
     }
 
+    if (zkEnabled && isLeader)  {
+      DocCollection coll = zkController.getClusterState().getCollection(collection);
+      List<Node> subShardLeaders = getSubShardLeaders(coll, cloudDesc.getShardId(), null, null);
+      // the list<node> will actually have only one element for an add request
+      if (subShardLeaders != null && !subShardLeaders.isEmpty()) {
+        ModifiableSolrParams params = new ModifiableSolrParams(filterParams(req.getParams()));
+        params.set(DISTRIB_UPDATE_PARAM, DistribPhase.FROMLEADER.toString());
+        params.set("distrib.from", ZkCoreNodeProps.getCoreUrl(
+            zkController.getBaseUrl(), req.getCore().getName()));
+        params.set("distrib.from.parent", cloudDesc.getShardId());
+        cmdDistrib.syncDelete(cmd, subShardLeaders, params);
+      }
+    }
+
+
     ModifiableSolrParams params = null;
     if (nodes != null) {
-      
+
       params = new ModifiableSolrParams(filterParams(req.getParams()));
-      params.set(DISTRIB_UPDATE_PARAM, 
-                 (isLeader ? 
-                  DistribPhase.FROMLEADER.toString() : 
+      params.set(DISTRIB_UPDATE_PARAM,
+                 (isLeader ?
+                  DistribPhase.FROMLEADER.toString() :
                   DistribPhase.TOLEADER.toString()));
       if (isLeader) {
         params.set("distrib.from", ZkCoreNodeProps.getCoreUrl(
@@ -817,9 +844,11 @@
   }
 
   private void passParam(SolrParams params, ModifiableSolrParams fparams, String param) {
-    String value = params.get(param);
-    if (value != null) {
-      fparams.add(param, value);
+    String[] values = params.getParams(param);
+    if (values != null) {
+      for (String value : values) {
+        fparams.add(param, value);
+      }
     }
   }
 
@@ -843,13 +872,15 @@
     DistribPhase phase = 
     DistribPhase.parseParam(req.getParams().get(DISTRIB_UPDATE_PARAM));
 
+    DocCollection coll = zkEnabled 
+      ? zkController.getClusterState().getCollection(collection) : null;
+
     if (zkEnabled && DistribPhase.NONE == phase) {
       boolean leaderForAnyShard = false;  // start off by assuming we are not a leader for any shard
 
       ModifiableSolrParams outParams = new ModifiableSolrParams(filterParams(req.getParams()));
       outParams.set(DISTRIB_UPDATE_PARAM, DistribPhase.TOLEADER.toString());
 
-      DocCollection coll = zkController.getClusterState().getCollection(collection);
       SolrParams params = req.getParams();
       Collection<Slice> slices = coll.getRouter().getSearchSlices(params.get(ShardParams.SHARD_KEYS), params, coll);
 
@@ -869,7 +900,7 @@
         // Am I the leader for this slice?
         ZkCoreNodeProps coreLeaderProps = new ZkCoreNodeProps(leader);
         String leaderCoreNodeName = leader.getName();
-        String coreNodeName = zkController.getCoreNodeName(req.getCore().getCoreDescriptor());
+        String coreNodeName = req.getCore().getCoreDescriptor().getCloudDescriptor().getCoreNodeName();
         isLeader = coreNodeName.equals(leaderCoreNodeName);
 
         if (isLeader) {
@@ -957,16 +988,22 @@
       vinfo.unblockUpdates();
     }
 
-
     // forward to all replicas
-    if (leaderLogic && replicas != null) {
+    if (leaderLogic && zkEnabled) {
+      List<Node> subShardLeaders = getSubShardLeaders(coll, cloudDesc.getShardId(), null, null);
+
       ModifiableSolrParams params = new ModifiableSolrParams(filterParams(req.getParams()));
       params.set(VERSION_FIELD, Long.toString(cmd.getVersion()));
       params.set(DISTRIB_UPDATE_PARAM, DistribPhase.FROMLEADER.toString());
       params.set("update.from", ZkCoreNodeProps.getCoreUrl(
           zkController.getBaseUrl(), req.getCore().getName()));
-      cmdDistrib.distribDelete(cmd, replicas, params);
-      cmdDistrib.finish();
+      if (subShardLeaders != null)  {
+        cmdDistrib.syncDelete(cmd, subShardLeaders, params);
+      }
+      if (replicas != null) {
+        cmdDistrib.distribDelete(cmd, replicas, params);
+        cmdDistrib.finish();
+      }
     }
 
 
@@ -1097,15 +1134,37 @@
     }
   }
 
-
   @Override
   public void processCommit(CommitUpdateCommand cmd) throws IOException {
     updateCommand = cmd;
-
+    List<Node> nodes = null;
+    boolean singleLeader = false;
     if (zkEnabled) {
       zkCheck();
+      
+      nodes = getCollectionUrls(req, req.getCore().getCoreDescriptor()
+          .getCloudDescriptor().getCollectionName());
+      if (isLeader && nodes.size() == 1) {
+        singleLeader = true;
+      }
     }
     
+    if (!zkEnabled || req.getParams().getBool(COMMIT_END_POINT, false) || singleLeader) {
+      doLocalCommit(cmd);
+    } else if (zkEnabled) {
+      ModifiableSolrParams params = new ModifiableSolrParams(filterParams(req.getParams()));
+      if (!req.getParams().getBool(COMMIT_END_POINT, false)) {
+        params.set(COMMIT_END_POINT, true);
+
+        if (nodes != null) {
+          cmdDistrib.distribCommit(cmd, nodes, params);
+          finish();
+        }
+      }
+    }
+  }
+
+  private void doLocalCommit(CommitUpdateCommand cmd) throws IOException {
     if (vinfo != null) {
       vinfo.lockForUpdate();
     }
@@ -1122,23 +1181,6 @@
         vinfo.unlockForUpdate();
       }
     }
-    // TODO: we should consider this? commit everyone in the current collection
-
-    if (zkEnabled) {
-      ModifiableSolrParams params = new ModifiableSolrParams(filterParams(req.getParams()));
-      if (!req.getParams().getBool(COMMIT_END_POINT, false)) {
-        params.set(COMMIT_END_POINT, true);
-
-        String coreNodeName = zkController.getCoreNodeName(req.getCore().getCoreDescriptor());
-        List<Node> nodes = getCollectionUrls(req, req.getCore().getCoreDescriptor()
-            .getCloudDescriptor().getCollectionName(), coreNodeName);
-
-        if (nodes != null) {
-          cmdDistrib.distribCommit(cmd, nodes, params);
-          finish();
-        }
-      }
-    }
   }
   
   @Override
@@ -1150,7 +1192,7 @@
  
 
   
-  private List<Node> getCollectionUrls(SolrQueryRequest req, String collection, String coreNodeName) {
+  private List<Node> getCollectionUrls(SolrQueryRequest req, String collection) {
     ClusterState clusterState = req.getCore().getCoreDescriptor()
         .getCoreContainer().getZkController().getClusterState();
     List<Node> urls = new ArrayList<Node>();
@@ -1166,7 +1208,7 @@
       
       for (Entry<String,Replica> entry : shardMap.entrySet()) {
         ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(entry.getValue());
-        if (clusterState.liveNodesContain(nodeProps.getNodeName()) && !entry.getKey().equals(coreNodeName)) {
+        if (clusterState.liveNodesContain(nodeProps.getNodeName())) {
           urls.add(new StdNode(nodeProps));
         }
       }
diff --git a/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessor.java
index 993b4b3..92fc82b 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessor.java
@@ -20,11 +20,11 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Set;
 import java.util.regex.Pattern;
 
 import static org.apache.solr.common.SolrException.ErrorCode.BAD_REQUEST;
 import static org.apache.solr.common.SolrException.ErrorCode.SERVER_ERROR;
+import static org.apache.solr.update.processor.FieldMutatingUpdateProcessorFactory.SelectorParams;
 
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.SolrInputField;
@@ -108,7 +108,7 @@
         // for now, don't allow it.
         if (! fname.equals(dest.getName()) ) {
           throw new SolrException(SERVER_ERROR,
-                                  "mutute returned field with different name: " 
+                                  "mutate returned field with different name: " 
                                   + fname + " => " + dest.getName());
         }
         doc.put(dest.getName(), dest);
@@ -118,7 +118,7 @@
   }
   
   /**
-   * Interface for idenfifying which fileds should be mutated
+   * Interface for identifying which fields should be mutated
    */
   public static interface FieldNameSelector {
     public boolean shouldMutate(final String fieldName);
@@ -192,79 +192,75 @@
   public static FieldNameSelector createFieldNameSelector
     (final SolrResourceLoader loader,
      final SolrCore core,
-     final Set<String> fields,
-     final Set<String> typeNames,
-     final Collection<String> typeClasses,
-     final Collection<Pattern> regexes,
+     final SelectorParams params,
      final FieldNameSelector defSelector) {
-    
-    final Collection<Class> classes 
-      = new ArrayList<Class>(typeClasses.size());
-    
-    for (String t : typeClasses) {
-      try {
-        classes.add(loader.findClass(t, Object.class));
-      } catch (Exception e) {
-        throw new SolrException(SERVER_ERROR,
-                                "Can't resolve typeClass: " + t, e);
-      }
-    }
-    
-    if (classes.isEmpty() && 
-        typeNames.isEmpty() && 
-        regexes.isEmpty() && 
-        fields.isEmpty()) {
+
+    if (params.noSelectorsSpecified()) {
       return defSelector;
     }
     
-    return new ConfigurableFieldNameSelector(core, fields, typeNames, classes, regexes); 
+    return new ConfigurableFieldNameSelector(loader, core, params); 
   }
   
+  
+  
   private static final class ConfigurableFieldNameSelector 
     implements FieldNameSelector {
 
     final SolrCore core;
-    final Set<String> fields;
-    final Set<String> typeNames;
+    final SelectorParams params;
     final Collection<Class> classes;
-    final Collection<Pattern> regexes;
 
-    private ConfigurableFieldNameSelector(final SolrCore core,
-                                          final Set<String> fields,
-                                          final Set<String> typeNames,
-                                          final Collection<Class> classes,
-                                          final Collection<Pattern> regexes) {
+    private ConfigurableFieldNameSelector(final SolrResourceLoader loader,
+                                          final SolrCore core,
+                                          final SelectorParams params) {
       this.core = core;
-      this.fields = fields;
-      this.typeNames = typeNames;
+      this.params = params;
+
+      final Collection<Class> classes = new ArrayList<Class>(params.typeClass.size());
+
+      for (String t : params.typeClass) {
+        try {
+          classes.add(loader.findClass(t, Object.class));
+        } catch (Exception e) {
+          throw new SolrException(SERVER_ERROR, "Can't resolve typeClass: " + t, e);
+        }
+      }
       this.classes = classes;
-      this.regexes = regexes;
     }
 
     @Override
     public boolean shouldMutate(final String fieldName) {
       
-      // order of checks is bsaed on what should be quicker 
+      // order of checks is based on what should be quicker
       // (ie: set lookups faster the looping over instanceOf / matches tests
       
-      if ( ! (fields.isEmpty() || fields.contains(fieldName)) ) {
+      if ( ! (params.fieldName.isEmpty() || params.fieldName.contains(fieldName)) ) {
         return false;
       }
       
       // do not consider it an error if the fieldName has no type
       // there might be another processor dealing with it later
       FieldType t =  core.getLatestSchema().getFieldTypeNoEx(fieldName);
-      if (null != t) {
-        if (! (typeNames.isEmpty() || typeNames.contains(t.getTypeName())) ) {
+      final boolean fieldExists = (null != t);
+
+      if ( (null != params.fieldNameMatchesSchemaField) &&
+           (fieldExists != params.fieldNameMatchesSchemaField) ) {
+        return false;
+      }
+
+      if (fieldExists) { 
+
+        if (! (params.typeName.isEmpty() || params.typeName.contains(t.getTypeName())) ) {
           return false;
         }
         
         if (! (classes.isEmpty() || instanceOfAny(t, classes)) ) {
           return false;
-          }
-      }
+        }
+      } 
       
-      if (! (regexes.isEmpty() || matchesAny(fieldName, regexes)) ) {
+      if (! (params.fieldRegex.isEmpty() || matchesAny(fieldName, params.fieldRegex)) ) {
         return false;
       }
       
diff --git a/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessorFactory.java
index 9f1eb72..ad55e70 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessorFactory.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessorFactory.java
@@ -58,6 +58,15 @@
  * <b>at least one of each</b> to be selected.
  * </p>
  * <p>
+ * The following additional selector may be specified as a &lt;bool&gt; - when specified
+ * as false, only fields that <b>do not</b> match a schema field/dynamic field are selected;
+ * when specified as true, only fields that <b>do</b> match a schema field/dynamic field are
+ * selected:
+ * </p>
+ * <ul>
+ *   <li><code>fieldNameMatchesSchemaField</code> - selecting specific fields based on whether or not they match a schema field</li>
+ * </ul>
+ * <p>
  * One or more <code>excludes</code> &lt;lst&gt; params may also be specified, 
  * containing any of the above criteria, identifying fields to be excluded 
  * from seelction even if they match the selection criteria.  As with the main 
@@ -71,7 +80,7 @@
  * fields will be mutated if the name starts with "foo" <i>or</i> "bar"; 
  * <b>unless</b> the field name contains the substring "SKIP" <i>or</i> 
  * the fieldType is (or subclasses) DateField.  Meaning a field named 
- * "foo_SKIP" is gaurunteed not to be selected, but a field named "bar_smith" 
+ * "foo_SKIP" is guaranteed not to be selected, but a field named "bar_smith" 
  * that uses StrField will be selected.
  * </p>
  * <pre class="prettyprint">
@@ -106,6 +115,13 @@
     public Set<String> typeName = Collections.emptySet();
     public Collection<String> typeClass = Collections.emptyList();
     public Collection<Pattern> fieldRegex = Collections.emptyList();
+    public Boolean fieldNameMatchesSchemaField = null; // null => not specified
+
+    public boolean noSelectorsSpecified() {
+      return typeClass.isEmpty()  && typeName.isEmpty() 
+          && fieldRegex.isEmpty() && fieldName.isEmpty() 
+          && null == fieldNameMatchesSchemaField;
+    }
   }
 
   private SelectorParams inclusions = new SelectorParams();
@@ -121,7 +137,6 @@
                             " inform(SolrCore) never called???");
   }
 
-  @SuppressWarnings("unchecked")
   public static SelectorParams parseSelectorParams(NamedList args) {
     SelectorParams params = new SelectorParams();
     
@@ -145,13 +160,41 @@
     // resolve this into actual Class objects later
     params.typeClass = oneOrMany(args, "typeClass");
 
+    // getBooleanArg() returns null if the arg is not specified
+    params.fieldNameMatchesSchemaField = getBooleanArg(args, "fieldNameMatchesSchemaField");
+    
     return params;
   }
-                                                            
+                               
+  public static Collection<SelectorParams> parseSelectorExclusionParams(NamedList args) {
+    Collection<SelectorParams> exclusions = new ArrayList<SelectorParams>();
+    List<Object> excList = args.getAll("exclude");
+    for (Object excObj : excList) {
+      if (null == excObj) {
+        throw new SolrException
+            (SERVER_ERROR, "'exclude' init param can not be null");
+      }
+      if (! (excObj instanceof NamedList) ) {
+        throw new SolrException
+            (SERVER_ERROR, "'exclude' init param must be <lst/>");
+      }
+      NamedList exc = (NamedList) excObj;
+      exclusions.add(parseSelectorParams(exc));
+      if (0 < exc.size()) {
+        throw new SolrException(SERVER_ERROR,
+            "Unexpected 'exclude' init sub-param(s): '" +
+                args.getName(0) + "'");
+      }
+      // call once per instance
+      args.remove("exclude");
+    }
+    return exclusions;
+  }
+  
 
   /**
    * Handles common initialization related to source fields for 
-   * constructoring the FieldNameSelector to be used.
+   * constructing the FieldNameSelector to be used.
    *
    * Will error if any unexpected init args are found, so subclasses should
    * remove any subclass-specific init args before calling this method.
@@ -161,27 +204,8 @@
   public void init(NamedList args) {
 
     inclusions = parseSelectorParams(args);
+    exclusions = parseSelectorExclusionParams(args);
 
-    List<Object> excList = args.getAll("exclude");
-    for (Object excObj : excList) {
-      if (null == excObj) {
-        throw new SolrException
-          (SERVER_ERROR, "'exclude' init param can not be null"); 
-      }
-      if (! (excObj instanceof NamedList) ) {
-        throw new SolrException
-          (SERVER_ERROR, "'exclude' init param must be <lst/>"); 
-      }
-      NamedList exc = (NamedList) excObj;
-      exclusions.add(parseSelectorParams(exc));
-      if (0 < exc.size()) {
-        throw new SolrException(SERVER_ERROR, 
-                                "Unexpected 'exclude' init sub-param(s): '" + 
-                                args.getName(0) + "'");
-      }
-      // call once per instance
-      args.remove("exclude");
-    }
     if (0 < args.size()) {
       throw new SolrException(SERVER_ERROR, 
                               "Unexpected init param(s): '" + 
@@ -195,25 +219,13 @@
     
     selector = 
       FieldMutatingUpdateProcessor.createFieldNameSelector
-      (core.getResourceLoader(),
-       core,
-       inclusions.fieldName,
-       inclusions.typeName,
-       inclusions.typeClass,
-       inclusions.fieldRegex,
-       getDefaultSelector(core));
+          (core.getResourceLoader(), core, inclusions, getDefaultSelector(core));
 
     for (SelectorParams exc : exclusions) {
       selector = FieldMutatingUpdateProcessor.wrap
         (selector,
          FieldMutatingUpdateProcessor.createFieldNameSelector
-         (core.getResourceLoader(),
-          core,
-          exc.fieldName,
-          exc.typeName,
-          exc.typeClass,
-          exc.fieldRegex,
-          FieldMutatingUpdateProcessor.SELECT_NO_FIELDS));
+             (core.getResourceLoader(), core, exc, FieldMutatingUpdateProcessor.SELECT_NO_FIELDS));
     }
   }
   
@@ -270,7 +282,28 @@
     return result;
   }
 
+  /**
+   * Removes the first instance of the key from NamedList, returning the Boolean
+   * that key referred to, or null if the key is not specified.
+   * @exception SolrException invalid type or structure
+   */
+  public static Boolean getBooleanArg(final NamedList args, final String key) {
+    Boolean bool;
+    List values = args.getAll(key);
+    if (0 == values.size()) {
+      return null;
+    }
+    if (values.size() > 1) {
+      throw new SolrException(SERVER_ERROR, "Only one '" + key + "' is allowed");
+    }
+    Object o = args.remove(key);
+    if (o instanceof Boolean) {
+      bool = (Boolean)o;
+    } else if (o instanceof CharSequence) {
+      bool = Boolean.parseBoolean(o.toString());
+    } else {
+      throw new SolrException(SERVER_ERROR, "'" + key + "' must have type 'bool' or 'str'; found " + o.getClass());
+    }
+    return bool;
+  }
 }
-
-
-
diff --git a/solr/core/src/java/org/apache/solr/update/processor/ParseBooleanFieldUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/ParseBooleanFieldUpdateProcessorFactory.java
new file mode 100644
index 0000000..9dc0e38
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/processor/ParseBooleanFieldUpdateProcessorFactory.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update.processor;
+
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.schema.BoolField;
+import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.IndexSchema;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Locale;
+import java.util.Set;
+
+/**
+ * <p>
+ * Attempts to mutate selected fields that have only CharSequence-typed values
+ * into Boolean values.
+ * </p>
+ * <p>
+ * The default selection behavior is to mutate both those fields that don't match
+ * a schema field, as well as those fields that do match a schema field and have
+ * a field type that uses class solr.BooleanField.
+ * </p>
+ * <p>
+ * If all values are parseable as boolean (or are already Boolean), then the field
+ * will be mutated, replacing each value with its parsed Boolean equivalent; 
+ * otherwise, no mutation will occur.
+ * </p>
+ * <p>
+ * The default true and false values are "true" and "false", respectively, and match
+ * case-insensitively.  The following configuration changes the acceptable values, and
+ * requires a case-sensitive match - note that either individual &lt;str&gt; elements
+ * or &lt;arr&gt;-s of &lt;str&gt; elements may be used to specify the trueValue-s
+ * and falseValue-s:
+ * </p>
+ *
+ * <pre class="prettyprint">
+ * &lt;processor class="solr.ParseBooleanFieldUpdateProcessorFactory"&gt;
+ *   &lt;str name="caseSensitive"&gt;true&lt;/str&gt;
+ *   &lt;str name="trueValue"&gt;True&lt;/str&gt;
+ *   &lt;str name="trueValue"&gt;Yes&lt;/str&gt;
+ *   &lt;arr name="falseValue"&gt;
+ *     &lt;str&gt;False&lt;/str&gt;
+ *     &lt;str&gt;No&lt;/str&gt;
+ *   &lt:/arr&gt;
+ * &lt;/processor&gt;</pre>
+ */
+public class ParseBooleanFieldUpdateProcessorFactory extends FieldMutatingUpdateProcessorFactory {
+  private static final String TRUE_VALUES_PARAM = "trueValue";
+  private static final String FALSE_VALUES_PARAM = "falseValue";
+  private static final String CASE_SENSITIVE_PARAM = "caseSensitive";
+  
+  private Set<String> trueValues = new HashSet<String>(Arrays.asList(new String[] { "true" }));
+  private Set<String> falseValues = new HashSet<String>(Arrays.asList(new String[] { "false" }));
+  private boolean caseSensitive = false;
+
+  @Override
+  public UpdateRequestProcessor getInstance(SolrQueryRequest req, 
+                                            SolrQueryResponse rsp, 
+                                            UpdateRequestProcessor next) {
+    return new AllValuesOrNoneFieldMutatingUpdateProcessor(getSelector(), next) {
+      @Override
+      protected Object mutateValue(Object srcVal) {
+        if (srcVal instanceof CharSequence) {
+          String stringVal = caseSensitive ? srcVal.toString() : srcVal.toString().toLowerCase(Locale.ROOT);
+          if (trueValues.contains(stringVal)) {
+            return true;
+          } else if (falseValues.contains(stringVal)) {
+            return false;
+          } else {
+            return SKIP_FIELD_VALUE_LIST_SINGLETON;
+          }
+        }
+        if (srcVal instanceof Boolean) {
+          return srcVal;
+        }
+        return SKIP_FIELD_VALUE_LIST_SINGLETON;
+      }
+    };
+  }
+
+  @Override
+  public void init(NamedList args) {
+    Object caseSensitiveParam = args.remove(CASE_SENSITIVE_PARAM);
+    if (null != caseSensitiveParam) {
+      if (caseSensitiveParam instanceof Boolean) {
+        caseSensitive = (Boolean)caseSensitiveParam;
+      } else {
+        caseSensitive = Boolean.valueOf(caseSensitiveParam.toString());
+      }
+    }
+
+    Collection<String> trueValuesParam = oneOrMany(args, TRUE_VALUES_PARAM);
+    if ( ! trueValuesParam.isEmpty()) {
+      trueValues.clear();
+      for (String trueVal : trueValuesParam) {
+        trueValues.add(caseSensitive ? trueVal : trueVal.toLowerCase(Locale.ROOT));
+      }
+    }
+
+    Collection<String> falseValuesParam = oneOrMany(args, FALSE_VALUES_PARAM);
+    if ( ! falseValuesParam.isEmpty()) {
+      falseValues.clear();
+      for (String val : falseValuesParam) {
+        final String falseVal = caseSensitive ? val : val.toLowerCase(Locale.ROOT);
+        if (trueValues.contains(falseVal)) {
+          throw new SolrException(ErrorCode.SERVER_ERROR,
+              "Param '" + FALSE_VALUES_PARAM + "' contains a value also in param '" + TRUE_VALUES_PARAM
+                  + "': '" + val + "'");
+        }
+        falseValues.add(falseVal);
+      }
+    }
+    super.init(args);
+  }
+
+
+  /**
+   * Returns true if the field doesn't match any schema field or dynamic field,
+   *           or if the matched field's type is BoolField
+   */
+  @Override
+  public FieldMutatingUpdateProcessor.FieldNameSelector
+  getDefaultSelector(final SolrCore core) {
+
+    return new FieldMutatingUpdateProcessor.FieldNameSelector() {
+      @Override
+      public boolean shouldMutate(final String fieldName) {
+        final IndexSchema schema = core.getLatestSchema();
+        FieldType type = schema.getFieldTypeNoEx(fieldName);
+        return (null == type) || (type instanceof BoolField);
+      }
+    };
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/update/processor/ParseDateFieldUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/ParseDateFieldUpdateProcessorFactory.java
new file mode 100644
index 0000000..05aecbf
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/processor/ParseDateFieldUpdateProcessorFactory.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update.processor;
+
+import org.apache.commons.lang.LocaleUtils;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.schema.DateValueFieldType;
+import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.IndexSchema;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.DateTimeFormat;
+import org.joda.time.format.DateTimeFormatter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ * <p>
+ * Attempts to mutate selected fields that have only CharSequence-typed values
+ * into Date values.  Solr will continue to index date/times in the UTC time
+ * zone, but the input date/times may be expressed using other time zones,
+ * and will be converted to UTC when they are mutated.
+ * </p>
+ * <p>
+ * The default selection behavior is to mutate both those fields that don't match
+ * a schema field, as well as those fields that match a schema field with a field 
+ * type that uses class solr.DateField or a sub-class, including solr.TrieDateField.
+ * </p>
+ * <p>
+ * If all values are parseable as dates (or are already Date), then the field will
+ * be mutated, replacing each value with its parsed Date equivalent; otherwise, no
+ * mutation will occur.
+ * </p>
+ * <p>
+ * One or more date "format" specifiers must be specified.  See 
+ * <a href="http://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html"
+ * >Joda-time's DateTimeFormat javadocs</a> for a description of format strings.
+ * </p>
+ * <p>
+ * A default time zone name or offset may optionally be specified for those dates
+ * that don't include an explicit zone/offset.  NOTE: three-letter zone
+ * designations like "EST" are not parseable (with the single exception of "UTC"),
+ * because they are ambiguous.  If no default time zone is specified, UTC will be
+ * used. See <a href="http://en.wikipedia.org/wiki/List_of_tz_database_time_zones"
+ * >Wikipedia's list of TZ database time zone names</a>.
+ * </p>
+ * <p>
+ * The locale to use when parsing field values using the specified formats may
+ * optionally be specified.  If no locale is configured, then {@link Locale#ROOT}
+ * will be used. The following configuration specifies the French/France locale and
+ * two date formats that will parse the strings "le mardi 8 janvier 2013" and 
+ * "le 28 déc. 2010 à 15 h 30", respectively.  Note that either individual &lt;str&gt;
+ * elements or &lt;arr&gt;-s of &lt;str&gt; elements may be used to specify the
+ * date format(s):
+ * </p>
+ *
+ * <pre class="prettyprint">
+ * &lt;processor class="solr.ParseDateFieldUpdateProcessorFactory"&gt;
+ *   &lt;str name="defaultTimeZone"&gt;Europe/Paris&lt;/str&gt;
+ *   &lt;str name="locale"&gt;fr_FR&lt;/str&gt;
+ *   &lt;arr name="format"&gt;
+ *     &lt;str&gt;'le' EEEE dd MMMM yyyy&lt;/str&gt;
+ *     &lt;str&gt;'le' dd MMM. yyyy 'à' HH 'h' mm&lt;/str&gt;
+ *   &lt;/arr&gt;
+ * &lt;/processor&gt;</pre>
+ *
+ * <p>
+ * See {@link Locale} for a description of acceptable language, country (optional)
+ * and variant (optional) values, joined with underscore(s).
+ * </p>
+ */
+public class ParseDateFieldUpdateProcessorFactory extends FieldMutatingUpdateProcessorFactory {
+  public static final Logger log = LoggerFactory.getLogger(ParseDateFieldUpdateProcessorFactory.class);
+
+  private static final String FORMATS_PARAM = "format";
+  private static final String DEFAULT_TIME_ZONE_PARAM = "defaultTimeZone";
+  private static final String LOCALE_PARAM = "locale";
+
+  private Map<String,DateTimeFormatter> formats = new LinkedHashMap<String,DateTimeFormatter>();
+
+  @Override
+  public UpdateRequestProcessor getInstance(SolrQueryRequest req,
+                                            SolrQueryResponse rsp,
+                                            UpdateRequestProcessor next) {
+    return new AllValuesOrNoneFieldMutatingUpdateProcessor(getSelector(), next) {
+      @Override
+      protected Object mutateValue(Object srcVal) {
+        if (srcVal instanceof CharSequence) {
+          String srcStringVal = srcVal.toString();
+          for (Map.Entry<String,DateTimeFormatter> format : formats.entrySet()) {
+            DateTimeFormatter parser = format.getValue();
+            try {
+              DateTime dateTime = parser.parseDateTime(srcStringVal);
+              return dateTime.withZone(DateTimeZone.UTC).toDate();
+            } catch (IllegalArgumentException e) {
+              log.debug("value '{}' is not parseable with format '{}'",
+                        new Object[] { srcStringVal, format.getKey() });
+            }
+          }
+          log.debug("value '{}' was not parsed by any configured format, thus was not mutated", srcStringVal);
+          return SKIP_FIELD_VALUE_LIST_SINGLETON;
+        }
+        if (srcVal instanceof Date) {
+          return srcVal;
+        }
+        return SKIP_FIELD_VALUE_LIST_SINGLETON;
+      }
+    };
+  }
+
+  @Override
+  public void init(NamedList args) {
+    
+    Locale locale = Locale.ROOT;
+    
+    String localeParam = (String)args.remove(LOCALE_PARAM);
+    if (null != localeParam) {
+      locale = LocaleUtils.toLocale(localeParam);
+    }
+
+    Object defaultTimeZoneParam = args.remove(DEFAULT_TIME_ZONE_PARAM);
+    DateTimeZone defaultTimeZone = DateTimeZone.UTC;
+    if (null != defaultTimeZoneParam) {
+      defaultTimeZone = DateTimeZone.forID(defaultTimeZoneParam.toString());
+    }
+
+    Collection<String> formatsParam = oneOrMany(args, FORMATS_PARAM);
+    if (null != formatsParam) {
+      for (String value : formatsParam) {
+        formats.put(value, DateTimeFormat.forPattern(value).withZone(defaultTimeZone).withLocale(locale));
+      }
+    }
+    super.init(args);
+  }
+
+  /**
+   * Returns true if the field doesn't match any schema field or dynamic field,
+   *           or if the matched field's type is BoolField
+   */
+  @Override
+  public FieldMutatingUpdateProcessor.FieldNameSelector
+  getDefaultSelector(final SolrCore core) {
+
+    return new FieldMutatingUpdateProcessor.FieldNameSelector() {
+      @Override
+      public boolean shouldMutate(final String fieldName) {
+        final IndexSchema schema = core.getLatestSchema();
+        FieldType type = schema.getFieldTypeNoEx(fieldName);
+        return (null == type) || type instanceof DateValueFieldType;
+      }
+    };
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/update/processor/ParseDoubleFieldUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/ParseDoubleFieldUpdateProcessorFactory.java
new file mode 100644
index 0000000..07c984c
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/processor/ParseDoubleFieldUpdateProcessorFactory.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update.processor;
+
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.schema.DoubleValueFieldType;
+import org.apache.solr.schema.FieldType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.math.RoundingMode;
+import java.text.NumberFormat;
+import java.text.ParsePosition;
+import java.util.Locale;
+
+/**
+ * <p>
+ * Attempts to mutate selected fields that have only CharSequence-typed values
+ * into Double values.  If required, rounding uses ceiling mode:
+ * {@link RoundingMode#CEILING}.  Grouping separators (',' in the ROOT locale)
+ * are parsed.
+ * </p>
+ * <p>
+ * The default selection behavior is to mutate both those fields that don't match
+ * a schema field, as well as those fields that match a schema field with a field 
+ * type that uses class solr.DoubleField, solr.TrieDoubleField, or 
+ * solr.SortableDoubleField.
+ * </p>
+ * <p>
+ * If all values are parseable as double (or are already Double), then the field
+ * will be mutated, replacing each value with its parsed Double equivalent; 
+ * otherwise, no mutation will occur.
+ * </p>
+ * <p>
+ * The locale to use when parsing field values, which will affect the recognized
+ * grouping separator and decimal characters, may optionally be specified.  If
+ * no locale is configured, then {@link Locale#ROOT} will be used.  The following
+ * configuration specifies the Russian/Russia locale, which will parse the string
+ * string "12 345,899" as double value 12345.899 (the grouping separator
+ * character is U+00AO NO-BREAK SPACE).
+ * </p>
+ *
+ * <pre class="prettyprint">
+ * &lt;processor class="solr.ParseDoubleFieldUpdateProcessorFactory"&gt;
+ *   &lt;str name="locale"&gt;ru_RU&lt;/str&gt;
+ * &lt;/processor&gt;</pre>
+ *
+ * <p>
+ * See {@link Locale} for a description of acceptable language, country (optional)
+ * and variant (optional) values, joined with underscore(s).
+ * </p>
+ */
+public class ParseDoubleFieldUpdateProcessorFactory extends ParseNumericFieldUpdateProcessorFactory {
+
+  private static final Logger log = LoggerFactory.getLogger(ParseDoubleFieldUpdateProcessorFactory.class);
+
+  @Override
+  public UpdateRequestProcessor getInstance(SolrQueryRequest req,
+                                            SolrQueryResponse rsp,
+                                            UpdateRequestProcessor next) {
+    return new ParseDoubleFieldUpdateProcessor(getSelector(), locale, next); 
+  }
+
+  private static final class ParseDoubleFieldUpdateProcessor extends AllValuesOrNoneFieldMutatingUpdateProcessor {
+    private final Locale locale;
+    // NumberFormat instances are not thread safe
+    private final ThreadLocal<NumberFormat> numberFormat = new ThreadLocal<NumberFormat>() {
+      @Override
+      protected NumberFormat initialValue() {
+        NumberFormat format = NumberFormat.getInstance(locale);
+        format.setParseIntegerOnly(false);
+        format.setRoundingMode(RoundingMode.CEILING);
+        return format;
+      }
+    };
+
+    ParseDoubleFieldUpdateProcessor(FieldNameSelector selector, Locale locale, UpdateRequestProcessor next) {
+      super(selector, next);
+      this.locale = locale;
+    }
+
+    @Override
+    protected Object mutateValue(Object srcVal) {
+      if (srcVal instanceof CharSequence) {
+        String stringVal = srcVal.toString(); 
+        ParsePosition pos = new ParsePosition(0);
+        Number number = numberFormat.get().parse(stringVal, pos);
+        if (pos.getIndex() != stringVal.length()) {
+          log.debug("value '{}' is not parseable, thus not mutated; unparsed chars: '{}'",
+                    new Object[] { srcVal, stringVal.substring(pos.getIndex())});
+          return SKIP_FIELD_VALUE_LIST_SINGLETON;
+        }
+        return number.doubleValue();
+      }
+      if (srcVal instanceof Double) {
+        return srcVal;
+      }
+      return SKIP_FIELD_VALUE_LIST_SINGLETON;
+    }
+  }
+
+  @Override
+  protected boolean isSchemaFieldTypeCompatible(FieldType type) {
+    return type instanceof DoubleValueFieldType;
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/update/processor/ParseFloatFieldUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/ParseFloatFieldUpdateProcessorFactory.java
new file mode 100644
index 0000000..b085b80
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/processor/ParseFloatFieldUpdateProcessorFactory.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update.processor;
+
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.FloatValueFieldType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.math.RoundingMode;
+import java.text.NumberFormat;
+import java.text.ParsePosition;
+import java.util.Locale;
+
+/**
+ * <p>
+ * Attempts to mutate selected fields that have only CharSequence-typed values
+ * into Float values.  If required, rounding uses ceiling mode:
+ * {@link RoundingMode#CEILING}.  Grouping separators (',' in the ROOT locale)
+ * are parsed.
+ * </p>
+ * <p>
+ * The default selection behavior is to mutate both those fields that don't match
+ * a schema field, as well as those fields that match a schema field with a field 
+ * type that uses class solr.FloatField, solr.TrieFloatField, or 
+ * solr.SortableFloatField.
+ * </p>
+ * <p>
+ * If all values are parseable as float (or are already Float), then the field
+ * will be mutated, replacing each value with its parsed Float equivalent; 
+ * otherwise, no mutation will occur.
+ * </p>
+ * <p>
+ * The locale to use when parsing field values, which will affect the recognized
+ * grouping separator and decimal characters, may optionally be specified.  If
+ * no locale is configured, then {@link Locale#ROOT} will be used. The following
+ * configuration specifies the Russian/Russia locale, which will parse the string
+ * "12 345,899" as 12345.899f (the grouping separator character is U+00AO NO-BREAK
+ * SPACE).
+ * </p>
+ *
+ * <pre class="prettyprint">
+ * &lt;processor class="solr.ParseFloatFieldUpdateProcessorFactory"&gt;
+ *   &lt;str name="locale"&gt;ru_RU&lt;/str&gt;
+ * &lt;/processor&gt;</pre>
+ *
+ * <p>
+ * See {@link Locale} for a description of acceptable language, country (optional)
+ * and variant (optional) values, joined with underscore(s).
+ * </p>
+ */
+public class ParseFloatFieldUpdateProcessorFactory extends ParseNumericFieldUpdateProcessorFactory {
+
+  private static final Logger log = LoggerFactory.getLogger(ParseFloatFieldUpdateProcessorFactory.class);
+
+  @Override
+  public UpdateRequestProcessor getInstance(SolrQueryRequest req,
+                                            SolrQueryResponse rsp,
+                                            UpdateRequestProcessor next) {
+    return new ParseFloatFieldUpdateProcessor(getSelector(), locale, next);
+  }
+
+  private static class ParseFloatFieldUpdateProcessor extends AllValuesOrNoneFieldMutatingUpdateProcessor {
+    private final Locale locale;
+
+    // NumberFormat instances are not thread safe
+    private final ThreadLocal<NumberFormat> numberFormat = new ThreadLocal<NumberFormat>() {
+      @Override
+      protected NumberFormat initialValue() {
+        NumberFormat format = NumberFormat.getInstance(locale);
+        format.setParseIntegerOnly(false);
+        format.setRoundingMode(RoundingMode.CEILING);
+        return format;
+      }
+    };
+
+    ParseFloatFieldUpdateProcessor(FieldNameSelector selector, Locale locale, UpdateRequestProcessor next) {
+      super(selector, next);
+      this.locale = locale;
+    }
+
+    @Override
+    protected Object mutateValue(Object srcVal) {
+      if (srcVal instanceof CharSequence) {
+        String stringVal = srcVal.toString();
+        ParsePosition pos = new ParsePosition(0);
+        Number number = numberFormat.get().parse(stringVal, pos);
+        if (pos.getIndex() != stringVal.length()) {
+          log.debug("value '{}' is not parseable, thus not mutated; unparsed chars: '{}'",
+              new Object[] { srcVal, stringVal.substring(pos.getIndex())});
+          return SKIP_FIELD_VALUE_LIST_SINGLETON;
+        }
+        return number.floatValue();
+      }
+      if (srcVal instanceof Float) {
+        return srcVal;
+      }
+      return SKIP_FIELD_VALUE_LIST_SINGLETON;
+    }
+  }
+
+  @Override
+  protected boolean isSchemaFieldTypeCompatible(FieldType type) {
+    return type instanceof FloatValueFieldType;
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/update/processor/ParseIntFieldUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/ParseIntFieldUpdateProcessorFactory.java
new file mode 100644
index 0000000..51faad7
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/processor/ParseIntFieldUpdateProcessorFactory.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update.processor;
+
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.IntValueFieldType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.text.NumberFormat;
+import java.text.ParsePosition;
+import java.util.Locale;
+
+/**
+ * <p>
+ * Attempts to mutate selected fields that have only CharSequence-typed values
+ * into Integer values.  Grouping separators (',' in the ROOT locale) are parsed.
+ * </p>
+ * <p>
+ * The default selection behavior is to mutate both those fields that don't match
+ * a schema field, as well as those fields that match a schema field with a field 
+ * type that uses class solr.IntField, solr.TrieIntField, or
+ * solr.SortableIntField.
+ * </p>
+ * <p>
+ * If all values are parseable as int (or are already Integer), then the field
+ * will be mutated, replacing each value with its parsed Integer equivalent;
+ * otherwise, no mutation will occur.
+ * </p>
+ * <p>
+ * The locale to use when parsing field values, which will affect the recognized
+ * grouping separator character, may optionally be specified.  If no locale is
+ * configured, then {@link Locale#ROOT} will be used. The following configuration
+ * specifies the Russian/Russia locale, which will parse the string "12 345 899"
+ * as 12345899L (the grouping separator character is U+00AO NO-BREAK SPACE).
+ * </p>
+ *
+ * <pre class="prettyprint">
+ * &lt;processor class="solr.ParseIntFieldUpdateProcessorFactory"&gt;
+ *   &lt;str name="locale"&gt;ru_RU&lt;/str&gt;
+ * &lt;/processor&gt;</pre>
+ *
+ * <p>
+ * See {@link Locale} for a description of acceptable language, country (optional)
+ * and variant (optional) values, joined with underscore(s).
+ * </p>
+ */
+public class ParseIntFieldUpdateProcessorFactory extends ParseNumericFieldUpdateProcessorFactory {
+
+  private static final Logger log = LoggerFactory.getLogger(ParseIntFieldUpdateProcessorFactory.class);
+
+  @Override
+  public UpdateRequestProcessor getInstance(SolrQueryRequest req,
+                                            SolrQueryResponse rsp,
+                                            UpdateRequestProcessor next) {
+    return new ParseIntFieldUpdateProcessor(getSelector(), locale, next);
+  }
+
+  private static final class ParseIntFieldUpdateProcessor extends AllValuesOrNoneFieldMutatingUpdateProcessor {
+    private final Locale locale;
+    
+    // NumberFormat instances are not thread safe
+    private final ThreadLocal<NumberFormat> numberFormat = new ThreadLocal<NumberFormat>() {
+      @Override
+      protected NumberFormat initialValue() {
+        NumberFormat format = NumberFormat.getInstance(locale);
+        format.setParseIntegerOnly(true);
+        return format;
+      }
+    };
+
+    ParseIntFieldUpdateProcessor(FieldNameSelector selector, Locale locale, UpdateRequestProcessor next) {
+      super(selector, next);
+      this.locale = locale;
+    }
+
+    @Override
+    protected Object mutateValue(Object srcVal) {
+      if (srcVal instanceof CharSequence) {
+        String stringVal = srcVal.toString();
+        ParsePosition pos = new ParsePosition(0);
+        Number number = numberFormat.get().parse(stringVal, pos);
+        if (pos.getIndex() != stringVal.length()) {
+          log.debug("value '{}' is not parseable, thus not mutated; unparsed chars: '{}'",
+                    new Object[] { srcVal, stringVal.substring(pos.getIndex())});
+          return SKIP_FIELD_VALUE_LIST_SINGLETON;
+        }
+        int intValue = number.intValue();
+        if (number.longValue() == (long)intValue) {
+          // If the high bits don't get truncated by number.intValue()
+          return intValue;
+        }
+        log.debug("value '{}' doesn't fit into an Integer, thus was not mutated", srcVal);
+        return SKIP_FIELD_VALUE_LIST_SINGLETON;
+      }
+      if (srcVal instanceof Integer) {
+        return srcVal;
+      }
+      return SKIP_FIELD_VALUE_LIST_SINGLETON;
+    }
+  }
+
+  @Override
+  protected boolean isSchemaFieldTypeCompatible(FieldType type) {
+    return type instanceof IntValueFieldType;
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/update/processor/ParseLongFieldUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/ParseLongFieldUpdateProcessorFactory.java
new file mode 100644
index 0000000..1b0ceb5
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/processor/ParseLongFieldUpdateProcessorFactory.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update.processor;
+
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.LongValueFieldType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.text.NumberFormat;
+import java.text.ParsePosition;
+import java.util.Locale;
+
+/**
+ * <p>
+ * Attempts to mutate selected fields that have only CharSequence-typed values
+ * into Long values.  Grouping separators (',' in the ROOT locale) are parsed.
+ * </p>
+ * <p>
+ * The default selection behavior is to mutate both those fields that don't match
+ * a schema field, as well as those fields that match a schema field with a field 
+ * type that uses class solr.LongField, solr.TrieLongField, or 
+ * solr.SortableLongField.
+ * </p>
+ * <p>
+ * If all values are parseable as long (or are already Long), then the field
+ * will be mutated, replacing each value with its parsed Long equivalent;
+ * otherwise, no mutation will occur.
+ * </p>
+ * <p>
+ * The locale to use when parsing field values, which will affect the recognized
+ * grouping separator character, may optionally be specified.  If no locale is
+ * configured, then {@link Locale#ROOT} will be used. The following configuration
+ * specifies the Russian/Russia locale, which will parse the string "12 345 899" 
+ * as 12345899L (the grouping separator character is U+00AO NO-BREAK SPACE).
+ * </p>
+ *
+ * <pre class="prettyprint">
+ * &lt;processor class="solr.ParseLongFieldUpdateProcessorFactory"&gt;
+ *   &lt;str name="locale"&gt;ru_RU&lt;/str&gt;
+ * &lt;/processor&gt;</pre>
+ *
+ * <p>
+ * See {@link Locale} for a description of acceptable language, country (optional)
+ * and variant (optional) values, joined with underscore(s).
+ * </p>
+ */
+public class ParseLongFieldUpdateProcessorFactory extends ParseNumericFieldUpdateProcessorFactory {
+  
+  private static final Logger log = LoggerFactory.getLogger(ParseLongFieldUpdateProcessorFactory.class);
+
+  @Override
+  public UpdateRequestProcessor getInstance(SolrQueryRequest req,
+                                            SolrQueryResponse rsp,
+                                            UpdateRequestProcessor next) {
+    return new ParseLongFieldUpdateProcessor(getSelector(), locale, next);
+  }
+
+  private static class ParseLongFieldUpdateProcessor extends AllValuesOrNoneFieldMutatingUpdateProcessor {
+    private final Locale locale;
+    
+    // NumberFormat instances are not thread safe
+    private final ThreadLocal<NumberFormat> numberFormat = new ThreadLocal<NumberFormat>() {
+      @Override
+      protected NumberFormat initialValue() {
+        NumberFormat format = NumberFormat.getInstance(locale);
+        format.setParseIntegerOnly(true);
+        return format;
+      }
+    };
+
+    ParseLongFieldUpdateProcessor(FieldNameSelector selector, Locale locale, UpdateRequestProcessor next) {
+      super(selector, next);
+      this.locale = locale;
+    }
+    
+    @Override
+    protected Object mutateValue(Object srcVal) {
+      if (srcVal instanceof CharSequence) {
+        String stringVal = srcVal.toString();
+        ParsePosition pos = new ParsePosition(0);
+        Number number = numberFormat.get().parse(stringVal, pos);
+        if (pos.getIndex() != stringVal.length()) {
+          log.debug("value '{}' is not parseable, thus not mutated; unparsed chars: '{}'",
+                    new Object[] { srcVal, stringVal.substring(pos.getIndex())});
+          return SKIP_FIELD_VALUE_LIST_SINGLETON;
+        }
+        return number.longValue();
+      }
+      if (srcVal instanceof Long) {
+        return srcVal;
+      }
+      return SKIP_FIELD_VALUE_LIST_SINGLETON;
+    }
+  }
+
+  @Override
+  protected boolean isSchemaFieldTypeCompatible(FieldType type) {
+    return type instanceof LongValueFieldType;
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/update/processor/ParseNumericFieldUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/ParseNumericFieldUpdateProcessorFactory.java
new file mode 100644
index 0000000..20bc67e
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/processor/ParseNumericFieldUpdateProcessorFactory.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update.processor;
+
+import org.apache.commons.lang.LocaleUtils;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.IndexSchema;
+
+import java.util.Locale;
+
+/**
+ * Abstract base class for numeric parsing update processor factories.
+ * Subclasses can optionally configure a locale.  If no locale is configured,
+ * then {@link Locale#ROOT} will be used.  E.g. to configure the French/France
+ * locale:
+ * 
+ * <pre class="prettyprint">
+ * &lt;processor class="solr.Parse[Type]FieldUpdateProcessorFactory"&gt;
+ *   &lt;str name="locale"&gt;fr_FR&lt;/str&gt;
+ *   [...]
+ * &lt;/processor&gt;</pre>
+ *
+ * <p>
+ * See {@link Locale} for a description of acceptable language, country (optional)
+ * and variant (optional) values, joined with underscore(s).
+ * </p>
+ */
+public abstract class ParseNumericFieldUpdateProcessorFactory extends FieldMutatingUpdateProcessorFactory {
+
+  private static final String LOCALE_PARAM = "locale";
+
+  protected Locale locale = Locale.ROOT;
+
+  @Override
+  public void init(NamedList args) {
+    String localeParam = (String)args.remove(LOCALE_PARAM);
+    if (null != localeParam) {
+      locale = LocaleUtils.toLocale(localeParam);
+    }
+    super.init(args);
+  }
+
+  /**
+   * Returns true if the given FieldType is compatible with this parsing factory.
+   */
+  protected abstract boolean isSchemaFieldTypeCompatible(FieldType type);  
+
+  /**
+   * Returns true if the field doesn't match any schema field or dynamic field,
+   *           or if the matched field's type is compatible
+   * @param core Where to get the current schema from
+   */
+  @Override
+  public FieldMutatingUpdateProcessor.FieldNameSelector
+  getDefaultSelector(final SolrCore core) {
+
+    return new FieldMutatingUpdateProcessor.FieldNameSelector() {
+      @Override
+      public boolean shouldMutate(final String fieldName) {
+        final IndexSchema schema = core.getLatestSchema();
+        FieldType type = schema.getFieldTypeNoEx(fieldName);
+        return (null == type) || isSchemaFieldTypeCompatible(type);
+      }
+    };
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/update/processor/UpdateRequestProcessorChain.java b/solr/core/src/java/org/apache/solr/update/processor/UpdateRequestProcessorChain.java
index 282549b..8bd75e6 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/UpdateRequestProcessorChain.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/UpdateRequestProcessorChain.java
@@ -34,23 +34,48 @@
 /**
  * Manages a chain of UpdateRequestProcessorFactories.
  * <p>
- * Chain can be configured via solrconfig.xml:
+ * Chains can be configured via solrconfig.xml using the following syntax...
  * </p>
  * <pre class="prettyprint">
- * &lt;updateRequestProcessors name="key" default="true"&gt;
- *   &lt;processor class="PathToClass1" /&gt;
- *   &lt;processor class="PathToClass2" /&gt;
+ * &lt;updateRequestProcessorChain name="key" default="true"&gt;
+ *   &lt;processor class="package.Class1" /&gt;
+ *   &lt;processor class="package.Class2" &gt;
+ *     &lt;str name="someInitParam1"&gt;value&lt;/str&gt;
+ *     &lt;int name="someInitParam2"&gt;42&lt;/int&gt;
+ *   &lt;/processor&gt;
  *   &lt;processor class="solr.LogUpdateProcessorFactory" &gt;
  *     &lt;int name="maxNumToLog"&gt;100&lt;/int&gt;
  *   &lt;/processor&gt;
  *   &lt;processor class="solr.RunUpdateProcessorFactory" /&gt;
- * &lt;/updateRequestProcessors&gt;
+ * &lt;/updateRequestProcessorChain&gt;
  * </pre>
  * <p>
+ * Multiple Chains can be defined, each with a distinct name.  The name of 
+ * a chain used to handle an update request may be specified using the request 
+ * param <code>update.chain</code>.  If no chain is explicitly selected 
+ * by name, then Solr will attempt to determine a default chain:
+ * </p>
+ * <ul>
+ *  <li>A single configured chain may explicitly be declared with 
+ *      <code>default="true"</code> (see example above)</li>
+ *  <li>If no chain is explicitly declared as the default, Solr will look for
+ *      any chain that does not have a name, and treat it as the default</li>
+ *  <li>As a last resort, Solr will create an implicit default chain 
+ *      consisting of:<ul>
+ *        <li>{@link LogUpdateProcessorFactory}</li>
+ *        <li>{@link DistributedUpdateProcessorFactory}</li>
+ *        <li>{@link RunUpdateProcessorFactory}</li>
+ *      </ul></li>
+ * </ul>
+ *
+ * <p>
  * Allmost all processor chains should end with an instance of 
- * {@link RunUpdateProcessorFactory} unless the user is explicitly 
+ * <code>RunUpdateProcessorFactory</code> unless the user is explicitly 
  * executing the update commands in an alternative custom 
- * <code>UpdateRequestProcessorFactory</code>.
+ * <code>UpdateRequestProcessorFactory</code>.  If a chain includes 
+ * <code>RunUpdateProcessorFactory</code> but does not include a 
+ * <code>DistributingUpdateProcessorFactory</code>, it will be added 
+ * automaticly by {@link #init init()}.
  * </p>
  *
  * @see UpdateRequestProcessorFactory
diff --git a/solr/core/src/java/org/apache/solr/util/HdfsUtil.java b/solr/core/src/java/org/apache/solr/util/HdfsUtil.java
new file mode 100644
index 0000000..b46af58
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/util/HdfsUtil.java
@@ -0,0 +1,51 @@
+package org.apache.solr.util;
+
+import java.io.File;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class HdfsUtil {
+  
+  private static final String[] HADOOP_CONF_FILES = {"core-site.xml",
+    "hdfs-site.xml", "mapred-site.xml", "yarn-site.xml", "hadoop-site.xml"};
+  
+  public static void addHdfsResources(Configuration conf, String confDir) {
+  if (confDir != null && confDir.length() > 0) {
+    File confDirFile = new File(confDir);
+    if (!confDirFile.exists()) {
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Resource directory does not exist: " + confDirFile.getAbsolutePath());
+    }
+    if (!confDirFile.isDirectory()) {
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Specified resource directory is not a directory" + confDirFile.getAbsolutePath());
+    }
+    if (!confDirFile.canRead()) {
+      throw new SolrException(ErrorCode.SERVER_ERROR, "Resource directory must be readable by the Solr process: " + confDirFile.getAbsolutePath());
+    }
+    for (String file : HADOOP_CONF_FILES) {
+      if (new File(confDirFile, file).exists()) {
+        conf.addResource(new Path(confDir, file));
+      }
+    }
+  }
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/util/IOUtils.java b/solr/core/src/java/org/apache/solr/util/IOUtils.java
new file mode 100644
index 0000000..e7b82ea
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/util/IOUtils.java
@@ -0,0 +1,38 @@
+package org.apache.solr.util;
+
+import java.io.Closeable;
+
+import org.apache.solr.core.HdfsDirectoryFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class IOUtils {
+  public static Logger LOG = LoggerFactory.getLogger(IOUtils.class);
+  
+  public static void closeQuietly(Closeable closeable) {
+    try {
+      if (closeable != null) {
+        closeable.close();
+      }
+    } catch (Exception e) {
+      LOG.error("Error while closing", e);
+    }
+  }
+}
diff --git a/solr/core/src/test-files/log4j.properties b/solr/core/src/test-files/log4j.properties
index fbc817f..9b74a5f 100644
--- a/solr/core/src/test-files/log4j.properties
+++ b/solr/core/src/test-files/log4j.properties
@@ -7,3 +7,4 @@
 log4j.appender.CONSOLE.layout.ConversionPattern=%-5p - %d{yyyy-MM-dd HH:mm:ss.SSS}; %C; %m\n
 
 log4j.logger.org.apache.zookeeper=WARN
+log4j.logger.org.apache.hadoop=WARN
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-add-schema-fields-update-processor.xml b/solr/core/src/test-files/solr/collection1/conf/schema-add-schema-fields-update-processor.xml
new file mode 100644
index 0000000..2b59472
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-add-schema-fields-update-processor.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<schema name="add-schema-fields-update-processor" version="1.5">
+  <types>
+    <fieldType name="tint" class="solr.TrieIntField" precisionStep="8" multiValued="true" positionIncrementGap="0"/>
+    <fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" multiValued="true" positionIncrementGap="0"/>
+    <fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" multiValued="true" positionIncrementGap="0"/>
+    <fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" multiValued="true" positionIncrementGap="0"/>
+    <fieldType name="tdate" class="solr.TrieDateField" precisionStep="6" multiValued="true" positionIncrementGap="0"/>
+    <fieldtype name="boolean" class="solr.BoolField" sortMissingLast="true" multiValued="true"/>
+    <fieldtype name="string" class="solr.StrField" sortMissingLast="true"/>
+    <fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
+    <fieldType name="text" class="solr.TextField" multiValued="true" positionIncrementGap="100">
+      <analyzer>
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+      </analyzer>
+    </fieldType>
+  </types>
+  <fields>
+    <field name="id"           type="string"  indexed="true" stored="true" multiValued="false" required="true"/>
+    <field name="_version_"    type="long"    indexed="true" stored="true"/>
+    
+    <dynamicField name="*_t"   type="text"    indexed="true" stored="true"/>
+    <dynamicField name="*_ti"  type="tint"    indexed="true" stored="true"/>
+    <dynamicField name="*_tl"  type="tlong"   indexed="true" stored="true"/>
+    <dynamicField name="*_tf"  type="tfloat"  indexed="true" stored="true"/>
+    <dynamicField name="*_td"  type="tdouble" indexed="true" stored="true"/>
+    <dynamicField name="*_tdt" type="tdate"   indexed="true" stored="true"/>
+  </fields>
+
+  <uniqueKey>id</uniqueKey>
+</schema>
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-copyfield-test.xml b/solr/core/src/test-files/solr/collection1/conf/schema-copyfield-test.xml
index d3a1862..3ab7837 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-copyfield-test.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-copyfield-test.xml
@@ -394,6 +394,7 @@
       termVectors="true" termPositions="true" termOffsets="true"/>
    <field name="signatureField" type="string" indexed="true" stored="false"/>
    
+   <field name="_version_" type="long" indexed="true" stored="true" multiValued="false" />
 
    
    <!-- Dynamic field definitions.  If a field name is not found, dynamicFields
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-folding.xml b/solr/core/src/test-files/solr/collection1/conf/schema-folding.xml
index 88a6d66..c2a0e60 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-folding.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-folding.xml
@@ -221,8 +221,6 @@
     <fieldType name="float" class="solr.TrieFloatField" precisionStep="4" omitNorms="true" positionIncrementGap="0"/>
     <fieldType name="long" class="solr.TrieLongField" precisionStep="4" omitNorms="true" positionIncrementGap="0"/>
     <fieldType name="double" class="solr.TrieDoubleField" precisionStep="4" omitNorms="true" positionIncrementGap="0"/>
-    <fieldType name="byte" class="solr.ByteField" omitNorms="true" positionIncrementGap="0"/>
-    <fieldType name="short" class="solr.ShortField" omitNorms="true" positionIncrementGap="0"/>
     <fieldtype name="boolean" class="solr.BoolField" sortMissingLast="true"/>
     <fieldtype name="date" class="solr.TrieDateField" precisionStep="0"/>
   </types>
@@ -233,8 +231,6 @@
     <field name="float_f" type="float"/>
     <field name="long_f" type="long"/>
     <field name="double_f" type="double"/>
-    <field name="byte_f" type="byte"/>
-    <field name="short_f" type="short"/>
     <field name="bool_f" type="boolean"/>
     <field name="date_f" type="date"/>
 
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-luceneMatchVersion.xml b/solr/core/src/test-files/solr/collection1/conf/schema-luceneMatchVersion.xml
index 13e12f3..3bb2b49 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-luceneMatchVersion.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-luceneMatchVersion.xml
@@ -17,6 +17,7 @@
 -->
 <schema name="luceneMatchVersionTest" version="1.1">
  <types>
+  <fieldtype name="long" class="solr.TrieLongField"/>
   <fieldtype name="string" class="solr.StrField"/>
   <fieldtype name="text40" class="solr.TextField">
     <analyzer>
@@ -50,6 +51,7 @@
    <field name="textDefault" type="textDefault" indexed="true" stored="false" />
    <field name="textStandardAnalyzer40" type="textStandardAnalyzer40" indexed="true" stored="false" />
    <field name="textStandardAnalyzerDefault" type="textStandardAnalyzerDefault" indexed="true" stored="false" />
+   <field name="_version_" type="long" indexed="true" stored="true" multiValued="false" />
    <dynamicField name="*_sS" type="string"  indexed="false" stored="true"/>
  </fields>
  <uniqueKey>id</uniqueKey>
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-one-field-no-dynamic-field-unique-key.xml b/solr/core/src/test-files/solr/collection1/conf/schema-one-field-no-dynamic-field-unique-key.xml
new file mode 100644
index 0000000..783ae77
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-one-field-no-dynamic-field-unique-key.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<schema name="one_field_no_dynamic_field_unique_key" version="1.1">
+  <types>
+    <fieldType name="string" class="solr.StrField"/>
+    <fieldType name="text" class="solr.TextField">
+      <analyzer class="org.apache.lucene.analysis.standard.StandardAnalyzer"/>
+    </fieldType>
+  </types>
+  <fields>
+    <field name="str" type="string" indexed="true" stored="true"/>
+  </fields>
+  <uniqueKey>str</uniqueKey>
+</schema>
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-required-fields.xml b/solr/core/src/test-files/solr/collection1/conf/schema-required-fields.xml
index 9363398..8dea791 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-required-fields.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-required-fields.xml
@@ -376,7 +376,7 @@
    <field name="intDefault" type="sint" indexed="true" stored="true" default="42" multiValued="false"/>
    <field name="signatureField" type="string" indexed="true" stored="false"/>
    
-
+   <field name="_version_" type="long" indexed="true" stored="true" multiValued="false" />
    
    <!-- Dynamic field definitions.  If a field name is not found, dynamicFields
         will be used if the name matches any of the patterns.
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-reversed.xml b/solr/core/src/test-files/solr/collection1/conf/schema-reversed.xml
index 22844af..40fc0e8 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-reversed.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-reversed.xml
@@ -25,6 +25,7 @@
   <types>
 
 
+    <fieldtype name="long" class="solr.TrieLongField" />
     <fieldtype name="integer" class="solr.IntField" />
     <fieldtype name="string" class="solr.StrField" />
 
@@ -73,6 +74,8 @@
    <field name="two" type="rev" indexed="true" stored="false"/>
    <field name="three" type="text" indexed="true" stored="false"/>
 
+   <field name="_version_" type="long" indexed="true" stored="true" multiValued="false" />
+
    <field name="signatureField" type="string" indexed="true" stored="false"/>
    <dynamicField name="*_sS" type="string"  indexed="false" stored="true"/>
 
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-trie.xml b/solr/core/src/test-files/solr/collection1/conf/schema-trie.xml
index 85ad7b7..1819bfa 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-trie.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-trie.xml
@@ -282,6 +282,8 @@
 
    <field name="tdate" type="tdate" indexed="true" stored="true" />
 
+   <field name="_version_" type="long" indexed="true" stored="true" multiValued="false" />
+
    <!-- Dynamic field definitions.  If a field name is not found, dynamicFields
         will be used if the name matches any of the patterns.
         RESTRICTION: the glob-like pattern in the name attribute must have
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema.xml b/solr/core/src/test-files/solr/collection1/conf/schema.xml
index 74525ca..6ab7743 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema.xml
@@ -48,8 +48,6 @@
     <fieldType name="float" class="solr.TrieFloatField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
     <fieldType name="long" class="solr.TrieLongField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
     <fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
-    <fieldType name="byte" class="solr.ByteField" omitNorms="true" positionIncrementGap="0"/>
-    <fieldType name="short" class="solr.ShortField" omitNorms="true" positionIncrementGap="0"/>
 
 
     <fieldType name="tint" class="solr.TrieIntField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
@@ -609,10 +607,6 @@
    <dynamicField name="*_dt" type="date"    indexed="true"  stored="true"/>
    <dynamicField name="*_dt1" type="date"    indexed="true"  stored="true" multiValued="false"/>
    <dynamicField name="*_bcd" type="bcdstr" indexed="true"  stored="true"/>
-   <dynamicField name="*_by"  type="byte"  indexed="true" stored="true"/>
-   <dynamicField name="*_by1" type="byte"  indexed="true" stored="true" multiValued="false"/>
-   <dynamicField name="*_sh" type="short"  indexed="true" stored="true"/>
-   <dynamicField name="*_sh1" type="short"  indexed="true" stored="true" multiValued="false"/>
 
 
       <!-- some trie-coded dynamic fields for faster range queries -->
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema11.xml b/solr/core/src/test-files/solr/collection1/conf/schema11.xml
index 5c2e281..a993cbd 100755
--- a/solr/core/src/test-files/solr/collection1/conf/schema11.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema11.xml
@@ -322,6 +322,8 @@
 
    <field name="text_no_analyzer" type="text_no_analyzer" indexed="true" />
 
+   <field name="_version_" type="long" indexed="true" stored="true" multiValued="false" />
+
    <!-- Dynamic field definitions.  If a field name is not found, dynamicFields
         will be used if the name matches any of the patterns.
         RESTRICTION: the glob-like pattern in the name attribute must have
diff --git a/solr/core/src/test-files/solr/collection1/conf/schemasurround.xml b/solr/core/src/test-files/solr/collection1/conf/schemasurround.xml
index 923bb0f..04e90e3 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schemasurround.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schemasurround.xml
@@ -539,6 +539,8 @@
    <field name="uniq3" type="string" indexed="true" stored="true"/>
    <field name="nouniq" type="string" indexed="true" stored="true" multiValued="true"/>
 
+   <field name="_version_" type="long" indexed="true" stored="true" multiValued="false" />
+
    <dynamicField name="*_coordinate"  type="tdouble" indexed="true"  stored="false"/>
 
 
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-SOLR-749.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-SOLR-749.xml
index 7785b50..1fabd5c 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-SOLR-749.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-SOLR-749.xml
@@ -19,6 +19,7 @@
 
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
   <queryParser name="foo" class="FooQParserPlugin"/>
   <!-- override the default "lucene" qparser -->
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-add-schema-fields-update-processor-chains.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-add-schema-fields-update-processor-chains.xml
new file mode 100644
index 0000000..9a59d90
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-add-schema-fields-update-processor-chains.xml
@@ -0,0 +1,155 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+   Test Config that enumerates many different parsing update processor chain 
+   configurations.
+  -->
+<config>
+  <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <requestHandler name="standard" class="solr.StandardRequestHandler"></requestHandler>
+  <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
+
+  <schemaFactory class="ManagedIndexSchemaFactory">
+    <bool name="mutable">true</bool>
+    <str name="managedSchemaResourceName">managed-schema</str>
+  </schemaFactory>
+
+  <updateRequestProcessorChain name="add-fields-no-run-processor">
+    <processor class="solr.AddSchemaFieldsUpdateProcessorFactory">
+      <str name="defaultFieldType">text</str>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Boolean</str>
+        <str name="fieldType">boolean</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Integer</str>
+        <str name="fieldType">tint</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Float</str>
+        <str name="fieldType">tfloat</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.util.Date</str>
+        <str name="fieldType">tdate</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Long</str>
+        <str name="valueClass">java.lang.Integer</str>
+        <str name="fieldType">tlong</str>
+      </lst>
+      <lst name="typeMapping">
+        <arr name="valueClass">
+          <str>java.lang.Double</str>
+          <str>java.lang.Float</str>
+        </arr>
+        <str name="fieldType">tdouble</str>
+      </lst>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="add-fields">
+    <processor class="solr.AddSchemaFieldsUpdateProcessorFactory">
+      <str name="defaultFieldType">text</str>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Boolean</str>
+        <str name="fieldType">boolean</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Integer</str>
+        <str name="fieldType">tint</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Float</str>
+        <str name="fieldType">tfloat</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.util.Date</str>
+        <str name="fieldType">tdate</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Long</str>
+        <str name="valueClass">java.lang.Integer</str>
+        <str name="fieldType">tlong</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Number</str>
+        <str name="fieldType">tdouble</str>
+      </lst>
+    </processor>
+    <processor class="solr.RunUpdateProcessorFactory" />
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-and-add-fields">
+    <processor class="solr.ParseBooleanFieldUpdateProcessorFactory"/>
+    <processor class="solr.ParseLongFieldUpdateProcessorFactory"/>
+    <processor class="solr.ParseDoubleFieldUpdateProcessorFactory"/>
+    <processor class="solr.ParseDateFieldUpdateProcessorFactory">
+      <arr name="format">
+        <str>yyyy-MM-dd'T'HH:mm:ss.SSSZ</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss,SSSZ</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss.SSS</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss,SSS</str>
+        <str>yyyy-MM-dd'T'HH:mm:ssZ</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss</str>
+        <str>yyyy-MM-dd'T'HH:mmZ</str>
+        <str>yyyy-MM-dd'T'HH:mm</str>
+        <str>yyyy-MM-dd HH:mm:ss.SSSZ</str>
+        <str>yyyy-MM-dd HH:mm:ss,SSSZ</str>
+        <str>yyyy-MM-dd HH:mm:ss.SSS</str>
+        <str>yyyy-MM-dd HH:mm:ss,SSS</str>
+        <str>yyyy-MM-dd HH:mm:ssZ</str>
+        <str>yyyy-MM-dd HH:mm:ss</str>
+        <str>yyyy-MM-dd HH:mmZ</str>
+        <str>yyyy-MM-dd HH:mm</str>
+        <str>yyyy-MM-dd</str>
+      </arr>
+    </processor>
+    <processor class="solr.AddSchemaFieldsUpdateProcessorFactory">
+      <str name="defaultFieldType">text</str>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Boolean</str>
+        <str name="fieldType">boolean</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Integer</str>
+        <str name="fieldType">tint</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Float</str>
+        <str name="fieldType">tfloat</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.util.Date</str>
+        <str name="fieldType">tdate</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Long</str>
+        <str name="valueClass">java.lang.Integer</str>
+        <str name="fieldType">tlong</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Number</str>
+        <str name="fieldType">tdouble</str>
+      </lst>
+    </processor>
+    <processor class="solr.RunUpdateProcessorFactory" />
+  </updateRequestProcessorChain>
+</config>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-altdirectory.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-altdirectory.xml
index 96cfd2d..3105baf 100755
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-altdirectory.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-altdirectory.xml
@@ -19,6 +19,7 @@
 
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <requestHandler name="standard" class="solr.StandardRequestHandler"></requestHandler>
   <directoryFactory name="DirectoryFactory" class="org.apache.solr.core.AlternateDirectoryTest$TestFSDirectoryFactory"></directoryFactory>
   <indexReaderFactory name="IndexReaderFactory" class="org.apache.solr.core.AlternateDirectoryTest$TestIndexReaderFactory"></indexReaderFactory >
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-basic.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-basic.xml
index b23c612..abfb42d 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-basic.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-basic.xml
@@ -22,6 +22,7 @@
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
   <dataDir>${solr.data.dir:}</dataDir>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
   <requestHandler name="standard" class="solr.StandardRequestHandler"></requestHandler>
 </config>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-caching.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-caching.xml
index c6f7cbf..0de6f94 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-caching.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-caching.xml
@@ -16,6 +16,7 @@
   -->
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
   <query>
     <cache name="lfuCacheDecayFalse"
@@ -35,4 +36,4 @@
            size="10"
            initialSize="9" />
   </query>
-</config>
\ No newline at end of file
+</config>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-components-name.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-components-name.xml
index 40a9451..b5501d8 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-components-name.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-components-name.xml
@@ -17,21 +17,12 @@
  limitations under the License.
 -->
 
-<!-- This is a "kitchen sink" config file that tests can use.
-     When writting a new test, feel free to add *new* items (plugins,
-     config options, etc...) as long as they don't break any existing
-     tests.  if you need to test something esoteric please add a new
-     "solrconfig-your-esoteric-purpose.xml" config file.
-
-     Note in particular that this test is used by MinimalSchemaTest so
-     Anything added to this file needs to work correctly even if there
-     is now uniqueKey or defaultSearch Field.
-  -->
-
 <config>
 
   <jmx />
 
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+
   <!-- Used to specify an alternate directory to hold all index data.
        It defaults to "index" if not present, and should probably
        not be changed if replication is in use. -->
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-defaults.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-defaults.xml
new file mode 100644
index 0000000..fe39eef
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-defaults.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- 
+
+ an "empty" solrconfig for testing default behavior
+
+ DO NOT ADD THINGS TO THIS CONFIG! 
+
+ NOTE: Most tests should not use this config unless the *explicitly* want
+ to test default behavior.  Most tests should use either...
+
+     solrconfig-minimal.xml
+     solrconfig-basic.xml
+     solrconfig.xml
+
+ ...in order to get better randomization of various config options.
+
+-->
+<config>
+  <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion><!--
+
+
+                DO NOT ADD THINGS TO THIS CONFIG! 
+
+
+--></config>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-delpolicy1.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-delpolicy1.xml
index 09e9c65..80a1136 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-delpolicy1.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-delpolicy1.xml
@@ -17,7 +17,6 @@
  limitations under the License.
 -->
 
-
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
@@ -26,6 +25,7 @@
   <requestHandler name="standard" class="solr.StandardRequestHandler"/>
 
   <indexConfig>
+   <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
    <deletionPolicy class="solr.SolrDeletionPolicy">
     <str name="keepOptimizedOnly">true</str>
     <str name="maxCommitsToKeep">3</str>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-delpolicy2.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-delpolicy2.xml
index 585a1cd..6710892 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-delpolicy2.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-delpolicy2.xml
@@ -21,6 +21,7 @@
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
   <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
     <deletionPolicy class="org.apache.solr.core.FakeDeletionPolicy">
       <str name="var1">value1</str>
 		  <str name="var2">value2</str>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-elevate.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-elevate.xml
index 9d0732f..d218cb9 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-elevate.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-elevate.xml
@@ -32,6 +32,7 @@
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
 
   <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
     <lockType>single</lockType>
   </indexConfig>
   
@@ -66,10 +67,8 @@
     </listener>
     -->
 
-
   </updateHandler>
 
-
   <query>
     <!-- Maximum number of clauses in a boolean query... can affect
         range or wildcard queries that expand to big boolean
@@ -77,7 +76,6 @@
     -->
     <maxBooleanClauses>1024</maxBooleanClauses>
 
-
     <!-- Cache specification for Filters or DocSets - unordered set of *all* documents
          that match a particular query.
       -->
@@ -108,19 +106,16 @@
     <!-- set maxSize artificially low to exercise both types of sets -->
     <HashDocSet maxSize="3" loadFactor="0.75"/>
 
-
     <!-- boolToFilterOptimizer converts boolean clauses with zero boost
          into cached filters if the number of docs selected by the clause exceeds
          the threshold (represented as a fraction of the total index)
     -->
     <boolTofilterOptimizer enabled="false" cacheSize="32" threshold=".05"/>
 
-
   </query>
 
   <requestHandler name="/update" class="solr.UpdateRequestHandler"  />
 
-
   <!-- test elevation -->
   <searchComponent name="elevate" class="org.apache.solr.handler.component.QueryElevationComponent" >
     <str name="queryFieldType">string</str>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-enableplugin.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-enableplugin.xml
index d385f91..2d563d6 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-enableplugin.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-enableplugin.xml
@@ -28,6 +28,7 @@
 
   <indexConfig>
     <lockType>single</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
 
   <updateHandler class="solr.DirectUpdateHandler2">
@@ -45,7 +46,6 @@
   <requestHandler name="disabled" class="solr.StandardRequestHandler" enable="false"/>
   <requestHandler name="enabled" class="solr.StandardRequestHandler" enable="true"/>
 
-
   <!-- test query parameter defaults -->
   <requestHandler name="lazy" class="solr.StandardRequestHandler" startup="lazy">
   </requestHandler>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-functionquery.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-functionquery.xml
index 12bf3a2..1a1a4ff 100755
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-functionquery.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-functionquery.xml
@@ -20,6 +20,8 @@
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
  
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+
   <!--  The DirectoryFactory to use for indexes.
         solr.StandardDirectoryFactory, the default, is filesystem based.
         solr.RAMDirectoryFactory is memory based and not persistent. -->
@@ -38,5 +40,4 @@
     <float name="nvlFloatValue">0.0</float>
   </valueSourceParser>
 
-
 </config>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-highlight.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-highlight.xml
index 9da3245..7d55cc2 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-highlight.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-highlight.xml
@@ -20,6 +20,8 @@
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
 
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+
   <!--  The DirectoryFactory to use for indexes.
         solr.StandardDirectoryFactory, the default, is filesystem based.
         solr.RAMDirectoryFactory is memory based and not persistent. -->
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-indexconfig.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-indexconfig.xml
index 74c8268..834032d 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-indexconfig.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-indexconfig.xml
@@ -22,6 +22,8 @@
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
 
   <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
     <maxIndexingThreads>123</maxIndexingThreads>
+    <infoStream>true</infoStream>
   </indexConfig>
 </config>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-infostream-logging.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-infostream-logging.xml
new file mode 100644
index 0000000..722f5e4
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-infostream-logging.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<config>
+  <dataDir>${solr.data.dir:}</dataDir>
+
+  <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+
+  <indexConfig>
+    <infoStream>true</infoStream>
+  </indexConfig>
+</config>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-lazywriter.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-lazywriter.xml
index 1e2939f..0636a1d 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-lazywriter.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-lazywriter.xml
@@ -21,6 +21,7 @@
      DO NOT ADD THINGS TO THIS CONFIG! -->
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <requestHandler name="standard" class="solr.StandardRequestHandler"></requestHandler>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
   <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy"/>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-managed-schema.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-managed-schema.xml
index 13da8cf..383f858 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-managed-schema.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-managed-schema.xml
@@ -20,6 +20,8 @@
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
 
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+
   <schemaFactory class="ManagedIndexSchemaFactory">
     <bool name="mutable">false</bool>
     <str name="managedSchemaResourceName">managed-schema</str>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-master.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-master.xml
index 7de4b30..996b87a 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-master.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-master.xml
@@ -24,6 +24,7 @@
 
   <indexConfig>
     <lockType>single</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
 
   <updateHandler class="solr.DirectUpdateHandler2">
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-master1-keepOneBackup.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-master1-keepOneBackup.xml
index 5347975..313efc7 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-master1-keepOneBackup.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-master1-keepOneBackup.xml
@@ -23,6 +23,7 @@
 
   <indexConfig>
     <lockType>single</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
 
   <updateHandler class="solr.DirectUpdateHandler2">
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-master1.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-master1.xml
index 3ceaa73..b451b77 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-master1.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-master1.xml
@@ -24,6 +24,7 @@
 
   <indexConfig>
     <lockType>single</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
 
   <updateHandler class="solr.DirectUpdateHandler2">
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-master2.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-master2.xml
index 0248016..16d2382 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-master2.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-master2.xml
@@ -24,6 +24,7 @@
 
   <indexConfig>
     <lockType>single</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
 
   <updateHandler class="solr.DirectUpdateHandler2">
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-master3.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-master3.xml
index 3e486db..9a1bafb 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-master3.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-master3.xml
@@ -24,6 +24,7 @@
 
   <indexConfig>
     <lockType>single</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
 
   <updateHandler class="solr.DirectUpdateHandler2">
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-mergepolicy-defaults.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-mergepolicy-defaults.xml
new file mode 100644
index 0000000..9d2a99a
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-mergepolicy-defaults.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<config>
+  <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
+
+  <indexConfig>
+    <!-- do not put any merge policy, merge factor 
+         or CFS related settings here 
+    -->
+  </indexConfig>
+
+  <requestHandler name="standard" class="solr.StandardRequestHandler"></requestHandler>
+
+</config>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-mergepolicy-legacy.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-mergepolicy-legacy.xml
new file mode 100644
index 0000000..00c77ae
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-mergepolicy-legacy.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<config>
+  <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
+
+  <indexConfig>
+    <mergeFactor>7</mergeFactor>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+  </indexConfig>
+
+  <requestHandler name="standard" class="solr.StandardRequestHandler"></requestHandler>
+
+</config>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-mergepolicy.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-mergepolicy.xml
index 48490ed..cd37e60 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-mergepolicy.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-mergepolicy.xml
@@ -26,7 +26,15 @@
     <mergePolicy class="org.apache.lucene.index.TieredMergePolicy">
       <int name="maxMergeAtOnceExplicit">19</int>
       <int name="segmentsPerTier">9</int>
-      <double name="noCFSRatio">1.0</double>
+      <double name="noCFSRatio">0.1</double>
+
+      <!-- Setter for this was moved from the MergePolicies to IndexWriterConfig
+           in Lucene 4.4, so we should treat it the same as a <useCompoundFile>
+           setting and log a warning (instead of failing because the setter is 
+           gone).
+      -->
+      <bool name="useCompoundFile">${useCompoundFile:false}</bool>
+
     </mergePolicy>
   </indexConfig>
 
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-minimal.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-minimal.xml
index 5fb39bb..78a4eb7 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-minimal.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-minimal.xml
@@ -28,8 +28,7 @@
   <directoryFactory name="DirectoryFactory"
                     class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
 
-  <indexConfig>
-  </indexConfig>
+  <xi:include href="./solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
 
   <jmx/>
   <updateHandler class="solr.DirectUpdateHandler2">
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-mutable-managed-schema.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-mutable-managed-schema.xml
index 8daacbf..21571e1 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-mutable-managed-schema.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-mutable-managed-schema.xml
@@ -19,6 +19,8 @@
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
 
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+
   <schemaFactory class="ManagedIndexSchemaFactory">
     <bool name="mutable">true</bool>
     <str name="managedSchemaResourceName">managed-schema</str>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-nativelock.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-nativelock.xml
index ac0f70d..7b5b6f7 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-nativelock.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-nativelock.xml
@@ -28,6 +28,7 @@
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
 
   <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
     <lockType>native</lockType>
   </indexConfig>
 </config>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-nocache.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-nocache.xml
index e89f7f9..ee27d0c 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-nocache.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-nocache.xml
@@ -20,6 +20,7 @@
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
 
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <!--  The DirectoryFactory to use for indexes.
         solr.StandardDirectoryFactory, the default, is filesystem based.
         solr.RAMDirectoryFactory is memory based and not persistent. -->
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-parsing-update-processor-chains.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-parsing-update-processor-chains.xml
new file mode 100644
index 0000000..3c41f50
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-parsing-update-processor-chains.xml
@@ -0,0 +1,230 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+   Test Config that enumerates many different parsing update processor chain 
+   configurations.
+  -->
+<config>
+  <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <requestHandler name="standard" class="solr.StandardRequestHandler"></requestHandler>
+  <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
+
+  <updateRequestProcessorChain name="parse-date">
+    <processor class="solr.ParseDateFieldUpdateProcessorFactory">
+      <str name="format">yyyy-MM-dd'T'HH:mm:ss.SSSZ</str>
+    </processor>
+    <processor class="solr.RunUpdateProcessorFactory" />
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-date-no-run-processor">
+    <processor class="solr.ParseDateFieldUpdateProcessorFactory">
+      <str name="format">yyyy-MM-dd'T'HH:mm:ss.SSSZ</str>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-date-explicit-not-in-schema-selector-no-run-processor">
+    <processor class="solr.ParseDateFieldUpdateProcessorFactory">
+      <bool name="fieldNameMatchesSchemaField">false</bool>
+      <str name="format">yyyy-MM-dd'T'HH:mm:ss.SSSZ</str>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-date-explicit-typeclass-selector-no-run-processor">
+    <processor class="solr.ParseDateFieldUpdateProcessorFactory">
+      <str name="typeClass">solr.DateField</str>
+      <str name="typeClass">solr.TrieDateField</str>
+      <str name="format">yyyy-MM-dd'T'HH:mm:ss.SSSZ</str>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-date-non-UTC-defaultTimeZone">
+    <processor class="solr.ParseDateFieldUpdateProcessorFactory">
+      <str name="defaultTimeZone">America/New_York</str>
+      <str name="locale">en_US</str>
+      <str name="format">yyyy-MM-dd'T'HH:mm:ss.SSSZ</str>
+      <str name="format">yyyy-MM-dd'T'HH:mm:ss.SSS</str>
+    </processor>
+    <processor class="solr.RunUpdateProcessorFactory" />
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="US-Pacific-parse-date-no-run-processor">
+    <processor class="solr.ParseDateFieldUpdateProcessorFactory">
+      <str name="defaultTimeZone">America/Los_Angeles</str>
+      <arr name="format">
+        <str>MM/dd/yyyy</str>
+      </arr>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-date-UTC-defaultTimeZone-no-run-processor">
+    <processor class="solr.ParseDateFieldUpdateProcessorFactory">
+      <str name="defaultTimeZone">UTC</str>
+      <str name="locale">en_US</str>
+      <arr name="format">
+        <str>yyyy-MM-dd'T'HH:mm:ss.SSSZ</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss,SSSZ</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss.SSS</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss,SSS</str>
+        <str>yyyy-MM-dd'T'HH:mm:ssZ</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss</str>
+        <str>yyyy-MM-dd'T'HH:mmZ</str>
+        <str>yyyy-MM-dd'T'HH:mm</str>
+        <str>yyyy-MM-dd HH:mm:ss.SSSZ</str>
+        <str>yyyy-MM-dd HH:mm:ss,SSSZ</str>
+        <str>yyyy-MM-dd HH:mm:ss.SSS</str>
+        <str>yyyy-MM-dd HH:mm:ss,SSS</str>
+        <str>yyyy-MM-dd HH:mm:ssZ</str>
+        <str>yyyy-MM-dd HH:mm:ss</str>
+        <str>yyyy-MM-dd HH:mmZ</str>
+        <str>yyyy-MM-dd HH:mm</str>
+        <str>yyyy-MM-dd hh:mm a</str>
+        <str>yyyy-MM-dd hh:mma</str>
+        <str>yyyy-MM-dd</str>
+        <str>EEE MMM dd HH:mm:ss Z yyyy</str>
+        <str>EEE MMM dd HH:mm:ss yyyy Z</str>
+        <str>EEE MMM dd HH:mm:ss yyyy</str>
+        <str>EEE, dd MMM yyyy HH:mm:ss Z</str>
+        <str>EEEE, dd-MMM-yy HH:mm:ss Z</str>
+        <str>EEEE, MMMM dd, yyyy</str>
+        <str>MMMM dd, yyyy</str>
+        <str>MMM. dd, yyyy</str>
+      </arr>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-french-date-UTC-defaultTimeZone-no-run-processor">
+    <processor class="solr.ParseDateFieldUpdateProcessorFactory">
+      <str name="defaultTimeZone">UTC</str>
+      <str name="locale">fr</str>
+      <str name="format">'le' EEEE dd MMMM yyyy</str>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-int">
+    <processor class="solr.ParseIntFieldUpdateProcessorFactory"/>
+    <processor class="solr.RunUpdateProcessorFactory"/>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-int-no-run-processor">
+    <processor class="solr.ParseIntFieldUpdateProcessorFactory"/>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-int-russian-no-run-processor">
+    <processor class="solr.ParseIntFieldUpdateProcessorFactory">
+      <str name="locale">ru_RU</str>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-long">
+    <processor class="solr.ParseLongFieldUpdateProcessorFactory"/>
+    <processor class="solr.RunUpdateProcessorFactory"/>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-long-no-run-processor">
+    <processor class="solr.ParseLongFieldUpdateProcessorFactory"/>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-long-russian-no-run-processor">
+    <processor class="solr.ParseLongFieldUpdateProcessorFactory">
+      <str name="locale">ru_RU</str>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-float">
+    <processor class="solr.ParseFloatFieldUpdateProcessorFactory"/>
+    <processor class="solr.RunUpdateProcessorFactory"/>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-float-no-run-processor">
+    <processor class="solr.ParseFloatFieldUpdateProcessorFactory"/>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-float-french-no-run-processor">
+    <processor class="solr.ParseFloatFieldUpdateProcessorFactory">
+      <str name="locale">fr_FR</str>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-double">
+    <processor class="solr.ParseDoubleFieldUpdateProcessorFactory"/>
+    <processor class="solr.RunUpdateProcessorFactory"/>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-double-no-run-processor">
+    <processor class="solr.ParseDoubleFieldUpdateProcessorFactory"/>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-double-french-no-run-processor">
+    <processor class="solr.ParseDoubleFieldUpdateProcessorFactory">
+      <str name="locale">fr_FR</str>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-boolean">
+    <processor class="solr.ParseBooleanFieldUpdateProcessorFactory"/>
+    <processor class="solr.RunUpdateProcessorFactory"/>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-boolean-no-run-processor">
+    <processor class="solr.ParseBooleanFieldUpdateProcessorFactory"/>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-boolean-alternate-values-no-run-processor">
+    <processor class="solr.ParseBooleanFieldUpdateProcessorFactory">
+      <bool name="caseSensitive">false</bool>
+      <arr name="trueValue">
+        <str>true</str>
+        <str>YES</str>
+        <str>on</str>
+      </arr>
+      <arr name="falseValue">
+        <str>false</str>
+        <str>no</str>
+        <str>oFF</str>
+      </arr>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="parse-boolean-alternate-single-values-no-run-processor">
+    <processor class="solr.ParseBooleanFieldUpdateProcessorFactory">
+      <str name="trueValue">yup</str>
+      <str name="falseValue">nope</str>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="cascading-parsers-no-run-processor">
+    <processor class="solr.ParseBooleanFieldUpdateProcessorFactory"/>
+    <processor class="solr.ParseIntFieldUpdateProcessorFactory"/>
+    <processor class="solr.ParseLongFieldUpdateProcessorFactory"/>
+    
+    <!-- Disabled Float because it will always claim floating point values and round -->
+    <!-- to fit values in available precision                                        -->
+    <!-- <processor class="solr.ParseFloatFieldUpdateProcessorFactory"/> -->
+    
+    <processor class="solr.ParseDoubleFieldUpdateProcessorFactory"/>
+    <processor class="solr.ParseDateFieldUpdateProcessorFactory">
+      <arr name="format">
+        <str>yyyy-MM-dd</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss.SSSZ</str>
+        <str>yyyy-MM-dd'T'HH:mm</str>
+      </arr>
+    </processor>
+  </updateRequestProcessorChain>
+</config>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-phrasesuggest.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-phrasesuggest.xml
index ca1d87d..96b4f7b 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-phrasesuggest.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-phrasesuggest.xml
@@ -20,6 +20,7 @@
 <!-- solrconfig.xml for a WFST phrase suggester -->
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <dataDir>${solr.data.dir:}</dataDir>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
   <requestHandler name="standard" class="solr.StandardRequestHandler"></requestHandler>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-postingshighlight.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-postingshighlight.xml
index 8eb9b6b..c3d9d54 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-postingshighlight.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-postingshighlight.xml
@@ -21,6 +21,7 @@
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
   <dataDir>${solr.data.dir:}</dataDir>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
   <requestHandler name="standard" class="solr.StandardRequestHandler">
     <lst name="defaults">
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-propinject-indexdefault.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-propinject-indexdefault.xml
index 7e862ea..a56a71e 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-propinject-indexdefault.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-propinject-indexdefault.xml
@@ -17,21 +17,6 @@
  limitations under the License.
 -->
 
-<!-- 
-
-     This is a "kitchen sink" config file that tests can use.
-     When writting a new test, feel free to add *new* items (plugins,
-     config options, etc...) as long as they don't break any existing
-     tests.  if you need to test something esoteric please add a new
-     "solrconfig-your-esoteric-purpose.xml" config file.
-
-     Note in particular that this test is used by MinimalSchemaTest so
-     Anything added to this file needs to work correctly even if there
-     is now uniqueKey or defaultSearch Field.
-
-
-  -->
-
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
 
@@ -41,6 +26,7 @@
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
 
   <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
     <mergePolicy class="org.apache.lucene.index.LogByteSizeMergePolicy">
       <double name="maxMergeMB">32.0</double>
     </mergePolicy>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-propinject.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-propinject.xml
index 8669ea6..0ba21a5 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-propinject.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-propinject.xml
@@ -17,22 +17,6 @@
  limitations under the License.
 -->
 
-<!-- 
-
-
-     This is a "kitchen sink" config file that tests can use.
-     When writting a new test, feel free to add *new* items (plugins,
-     config options, etc...) as long as they don't break any existing
-     tests.  if you need to test something esoteric please add a new
-     "solrconfig-your-esoteric-purpose.xml" config file.
-
-     Note in particular that this test is used by MinimalSchemaTest so 
-     Anything added to this file needs to work correctly even if there
-     is now uniqueKey or defaultSearch Field.
-
-
-  -->
-
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
 
@@ -43,6 +27,7 @@
 
  
   <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
     <mergePolicy class="org.apache.lucene.index.LogByteSizeMergePolicy">
       <double name="maxMergeMB">64.0</double>
     </mergePolicy>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-querysender-noquery.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-querysender-noquery.xml
index b123db9..af6cc75 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-querysender-noquery.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-querysender-noquery.xml
@@ -19,6 +19,7 @@
 
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
     <!--  The DirectoryFactory to use for indexes.
         solr.StandardDirectoryFactory, the default, is filesystem based.
         solr.RAMDirectoryFactory is memory based and not persistent. -->
@@ -49,7 +50,6 @@
     <listener event="newSearcher"
               class="org.apache.solr.core.MockEventListener" />
 
-
     <!-- a firstSearcher event is fired whenever a new searcher is being
          prepared but there is no current registered searcher to handle
          requests or to gain prewarming data from. -->
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-querysender.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-querysender.xml
index a158d8d6..12252c0 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-querysender.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-querysender.xml
@@ -19,6 +19,7 @@
 
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
     <!--  The DirectoryFactory to use for indexes.
         solr.StandardDirectoryFactory, the default, is filesystem based.
         solr.RAMDirectoryFactory is memory based and not persistent. -->
@@ -47,7 +48,6 @@
     <listener event="newSearcher"
               class="org.apache.solr.core.MockEventListener" />
 
-
     <!-- a firstSearcher event is fired whenever a new searcher is being
          prepared but there is no current registered searcher to handle
          requests or to gain prewarming data from. -->
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-repeater.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-repeater.xml
index f5c900e..fb79427 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-repeater.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-repeater.xml
@@ -24,6 +24,7 @@
 
   <indexConfig>
     <lockType>single</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
 
   <updateHandler class="solr.DirectUpdateHandler2">
@@ -54,7 +55,6 @@
     </lst>
   </requestHandler>
 
-
   <!-- enable streaming for testing... -->
   <requestDispatcher handleSelect="true">
     <requestParsers enableRemoteStreaming="true" multipartUploadLimitInKB="2048"/>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-response-log-component.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-response-log-component.xml
index cac524d..859883d 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-response-log-component.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-response-log-component.xml
@@ -19,6 +19,7 @@
 
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <requestHandler name="standard"
      class="solr.StandardRequestHandler"></requestHandler>
 
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-script-updateprocessor.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-script-updateprocessor.xml
index bf0d93c..43fbc28 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-script-updateprocessor.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-script-updateprocessor.xml
@@ -23,6 +23,7 @@
   -->
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <requestHandler name="standard" class="solr.StandardRequestHandler"></requestHandler>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
 
@@ -34,7 +35,6 @@
     <processor class="solr.RunUpdateProcessorFactory" />
   </updateRequestProcessorChain>
 
-
   <updateRequestProcessorChain name="run-no-scripts">
     <!-- for bypassing all scripts -->
     <processor class="solr.RunUpdateProcessorFactory" />
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-simplelock.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-simplelock.xml
index 1760267..7981b84 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-simplelock.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-simplelock.xml
@@ -28,6 +28,7 @@
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
 
   <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
     <lockType>simple</lockType>
   </indexConfig>
 </config>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-slave.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-slave.xml
index b00206f..1ff32ae 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-slave.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-slave.xml
@@ -24,6 +24,7 @@
 
   <indexConfig>
     <lockType>single</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
 
   <updateHandler class="solr.DirectUpdateHandler2">
@@ -52,7 +53,6 @@
      </lst>
   </requestHandler>
 
-
   <!-- enable streaming for testing... -->
   <requestDispatcher handleSelect="true">
     <requestParsers enableRemoteStreaming="true" multipartUploadLimitInKB="2048"/>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-slave1.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-slave1.xml
index 4eecf11..6e907a4 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-slave1.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-slave1.xml
@@ -24,6 +24,7 @@
 
   <indexConfig>
     <lockType>single</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
 
   <updateHandler class="solr.DirectUpdateHandler2">
@@ -48,7 +49,6 @@
 
   </requestHandler>
 
-
   <!-- enable streaming for testing... -->
   <requestDispatcher handleSelect="true">
     <requestParsers enableRemoteStreaming="true" multipartUploadLimitInKB="2048"/>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-solcoreproperties.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-solcoreproperties.xml
index db9968c..722a392 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-solcoreproperties.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-solcoreproperties.xml
@@ -28,6 +28,7 @@
 
   <indexConfig>
     <lockType>single</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
 
   <updateHandler class="solr.DirectUpdateHandler2">
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-spellcheckcomponent.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-spellcheckcomponent.xml
index 8a01c17..9092a58 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-spellcheckcomponent.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-spellcheckcomponent.xml
@@ -19,6 +19,7 @@
 
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <requestHandler name="standard"
      class="solr.StandardRequestHandler"></requestHandler>
 
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-spellchecker.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-spellchecker.xml
index 72fc0f6..e6744cb 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-spellchecker.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-spellchecker.xml
@@ -17,6 +17,7 @@
 -->
 
 <config>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <!--  The DirectoryFactory to use for indexes.
         solr.StandardDirectoryFactory, the default, is filesystem based.
         solr.RAMDirectoryFactory is memory based and not persistent. -->
@@ -24,7 +25,6 @@
 
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
 
-
   <updateHandler class="solr.DirectUpdateHandler2">
   </updateHandler>
 
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-termindex.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-termindex.xml
index a5e497a..2afd813 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-termindex.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-termindex.xml
@@ -31,6 +31,7 @@
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
 
   <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
     <termIndexInterval>256</termIndexInterval>
     <mergePolicy class="org.apache.lucene.index.TieredMergePolicy"/>
     <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog-managed-schema.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog-managed-schema.xml
index 2da4f23..2ae9b35 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog-managed-schema.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog-managed-schema.xml
@@ -35,6 +35,7 @@
 
   <indexConfig>
     <lockType>${solr.lock.type:native}</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
 
   <!-- an update processor the explicitly excludes distrib to test
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog-mutable-managed-schema.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog-mutable-managed-schema.xml
index 28c7c13..3c86eb9 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog-mutable-managed-schema.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog-mutable-managed-schema.xml
@@ -35,6 +35,7 @@
 
   <indexConfig>
     <lockType>${solr.lock.type:native}</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
 
   <!-- an update processor the explicitly excludes distrib to test
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
index dea28e6..2cacb5a 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
@@ -24,13 +24,17 @@
   
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}">
     <!-- used to keep RAM reqs down for HdfsDirectoryFactory -->
+    <bool name="solr.hdfs.blockcache.enabled">${solr.hdfs.blockcache.enabled:true}</bool>
     <int name="solr.hdfs.blockcache.blocksperbank">${solr.hdfs.blockcache.blocksperbank:1024}</int>
+    <str name="solr.hdfs.home">${solr.hdfs.home:}</str>
+    <str name="solr.hdfs.confdir">${solr.hdfs.confdir:}</str>
   </directoryFactory>
   
   <dataDir>${solr.data.dir:}</dataDir>
 
   <indexConfig>
     <lockType>${solr.lock.type:native}</lockType>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
   </indexConfig>
 
   <!-- an update processor the explicitly excludes distrib to test
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-transformers.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-transformers.xml
index e59bad0..ecaaf11 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-transformers.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-transformers.xml
@@ -19,12 +19,12 @@
 
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <!--  The DirectoryFactory to use for indexes.
         solr.StandardDirectoryFactory, the default, is filesystem based.
         solr.RAMDirectoryFactory is memory based and not persistent. -->
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
 
-
 <updateRequestProcessorChain name="standard">
   <processor class="solr.CustomUpdateRequestProcessorFactory" enable="false">
    <!-- this processor is not enabled, so it won't be used at all -->
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-update-processor-chains.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-update-processor-chains.xml
index dffeabe..7f52688 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-update-processor-chains.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-update-processor-chains.xml
@@ -24,6 +24,7 @@
   -->
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <requestHandler name="standard" class="solr.StandardRequestHandler"></requestHandler>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
 
@@ -53,7 +54,6 @@
     <processor class="solr.RunUpdateProcessorFactory" />
   </updateRequestProcessorChain>
 
-
   <updateRequestProcessorChain name="trim-all">
     <!-- no specific src field configs, so all fields should get trimmed -->
     <processor class="solr.TrimFieldUpdateProcessorFactory">
@@ -66,7 +66,6 @@
     </processor>
   </updateRequestProcessorChain>
 
-
   <updateRequestProcessorChain name="trim-most">
     <!-- all fields except the exclusions should be trimmed -->
     <processor class="solr.TrimFieldUpdateProcessorFactory">
@@ -241,6 +240,46 @@
     <processor class="solr.IgnoreFieldUpdateProcessorFactory" />
   </updateRequestProcessorChain>
 
+  <updateRequestProcessorChain name="ignore-not-in-schema-explicit-selector">
+    <processor class="solr.IgnoreFieldUpdateProcessorFactory">
+      <bool name="fieldNameMatchesSchemaField">false</bool>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="ignore-in-schema">
+    <processor class="solr.IgnoreFieldUpdateProcessorFactory">
+      <bool name="fieldNameMatchesSchemaField">true</bool>
+    </processor>
+  </updateRequestProcessorChain>
+  
+  <updateRequestProcessorChain name="ignore-not-in-schema-and-foo-name-prefix">
+    <processor class="solr.IgnoreFieldUpdateProcessorFactory">
+      <str name="fieldRegex">foo.*</str>
+      <bool name="fieldNameMatchesSchemaField">false</bool>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="ignore-foo-name-prefix-except-not-schema">
+    <processor class="solr.IgnoreFieldUpdateProcessorFactory">
+      <str name="fieldRegex">foo.*</str>
+      <lst name="exclude">
+        <bool name="fieldNameMatchesSchemaField">false</bool>
+      </lst>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="ignore-not-in-schema-explicit-str-selector">
+    <processor class="solr.IgnoreFieldUpdateProcessorFactory">
+      <str name="fieldNameMatchesSchemaField">false</str>
+    </processor>
+  </updateRequestProcessorChain>
+
+  <updateRequestProcessorChain name="ignore-in-schema-str-selector">
+    <processor class="solr.IgnoreFieldUpdateProcessorFactory">
+      <str name="fieldNameMatchesSchemaField">true</str>
+    </processor>
+  </updateRequestProcessorChain>
+
   <updateRequestProcessorChain name="ignore-some">
     <processor class="solr.IgnoreFieldUpdateProcessorFactory">
       <str name="fieldRegex">.*_raw</str>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-warmer.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-warmer.xml
index 3e1a465..69a0f8a 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-warmer.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-warmer.xml
@@ -26,6 +26,7 @@
   <requestHandler name="standard" class="solr.StandardRequestHandler"></requestHandler>
   
   <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
     <mergedSegmentWarmer class="org.apache.lucene.index.SimpleMergedSegmentWarmer"/>
   </indexConfig>
 </config>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-xinclude.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-xinclude.xml
index 4162e4f..230a1eb 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-xinclude.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-xinclude.xml
@@ -19,6 +19,8 @@
 
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
   <xi:include href="foobar-missing.xml" xmlns:xi="http://www.w3.org/2001/XInclude">
     <xi:fallback>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
new file mode 100644
index 0000000..ce21fdf
--- /dev/null
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- 
+
+A solrconfig.xml snippet containing indexConfig settings for randomized testing.
+
+-->
+<indexConfig>
+  <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+</indexConfig>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig.xml
index 88f6559..991594c 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig.xml
@@ -53,7 +53,7 @@
     <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
     <writeLockTimeout>1000</writeLockTimeout>
     <mergeFactor>8</mergeFactor>
-
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
     <!-- for better multi-segment testing, we are using slower
     indexing properties of maxBufferedDocs=10 and LogDocMergePolicy.
     -->
@@ -94,7 +94,7 @@
     </listener>
     -->
     
-    <updateLog enable="${enable.update.log:false}">
+    <updateLog enable="${enable.update.log:true}">
   	  <str name="dir">${solr.ulog.dir:}</str>
     </updateLog> 
     
@@ -104,7 +104,6 @@
 
   </updateHandler>
 
-
   <query>
     <!-- Maximum number of clauses in a boolean query... can affect
         range or wildcard queries that expand to big boolean
@@ -112,7 +111,6 @@
     -->
     <maxBooleanClauses>1024</maxBooleanClauses>
 
-
     <!-- Cache specification for Filters or DocSets - unordered set of *all* documents
          that match a particular query.
       -->
@@ -149,7 +147,6 @@
       />
     -->
 
-
     <!--
     <useFilterForSortedQuery>true</useFilterForSortedQuery>
     -->
@@ -159,14 +156,12 @@
     <!-- set maxSize artificially low to exercise both types of sets -->
     <HashDocSet maxSize="3" loadFactor="0.75"/>
 
-
     <!-- boolToFilterOptimizer converts boolean clauses with zero boost
          into cached filters if the number of docs selected by the clause exceeds
          the threshold (represented as a fraction of the total index)
     -->
     <boolTofilterOptimizer enabled="false" cacheSize="32" threshold=".05"/>
 
-
     <!-- a newSearcher event is fired whenever a new searcher is being prepared
          and there is a current searcher handling requests (aka registered). -->
     <!-- QuerySenderListener takes an array of NamedList and executes a
@@ -191,7 +186,6 @@
     </listener>
     -->
 
-
   </query>
   
   <queryResponseWriter name="xml" default="true"
@@ -219,7 +213,6 @@
      </lst>
   </requestHandler>
 
-
   <requestHandler name="dismax" class="solr.SearchHandler" >
     <lst name="defaults">
      <str name="defType">dismax</str>
@@ -265,7 +258,6 @@
 
   <requestHandler name="/update" class="solr.UpdateRequestHandler"  />
 
-
   <searchComponent name="spellcheck" class="org.apache.solr.handler.component.SpellCheckComponent">
     <!-- This is slightly different from the field value so we can test dealing with token offset changes -->
     <str name="queryAnalyzerFieldType">lowerpunctfilt</str>
@@ -471,7 +463,6 @@
   </highlighting>
   </searchComponent>
 
-
   <!-- enable streaming for testing... -->
   <requestDispatcher handleSelect="true" >
     <requestParsers enableRemoteStreaming="true" multipartUploadLimitInKB="2048" />
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig_codec.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig_codec.xml
index 738bb33..c5cc04c 100644
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig_codec.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig_codec.xml
@@ -18,6 +18,7 @@
 
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
   <requestHandler name="standard" class="solr.StandardRequestHandler"></requestHandler>
   <codecFactory class="solr.SchemaCodecFactory"/>
diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig_perf.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig_perf.xml
index 0c5e205..172fc95 100755
--- a/solr/core/src/test-files/solr/collection1/conf/solrconfig_perf.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig_perf.xml
@@ -23,6 +23,8 @@
        If replication is in use, this should match the replication configuration. -->
   <dataDir>${solr.data.dir:}</dataDir>
 
+  <xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+
   <!--  The DirectoryFactory to use for indexes.
         solr.StandardDirectoryFactory, the default, is filesystem based.
         solr.RAMDirectoryFactory is memory based and not persistent. -->
@@ -66,7 +68,6 @@
     </httpCaching>
   </requestDispatcher>
 
-
   <requestHandler name="standard" class="solr.SearchHandler" default="true">
   </requestHandler>
 
diff --git a/solr/core/src/test-files/solr/crazy-path-to-config.xml b/solr/core/src/test-files/solr/crazy-path-to-config.xml
index 43baf9d..b4bd5e5 100644
--- a/solr/core/src/test-files/solr/crazy-path-to-config.xml
+++ b/solr/core/src/test-files/solr/crazy-path-to-config.xml
@@ -25,6 +25,7 @@
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
 
   <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
     <lockType>single</lockType>
   </indexConfig>
 
@@ -32,7 +33,6 @@
     <commitIntervalLowerBound>0</commitIntervalLowerBound>
   </updateHandler>
 
-
   <query>
     <maxBooleanClauses>1024</maxBooleanClauses>
     <useFilterForSortedQuery>true</useFilterForSortedQuery>
@@ -54,12 +54,9 @@
   <queryResponseWriter name="useless" class="org.apache.solr.OutputWriterTest$UselessOutputWriter" startup="lazy"/>
   <queryResponseWriter name="xslt" class="solr.XSLTResponseWriter"/>
 
-
   <admin>
     <defaultQuery>solr</defaultQuery>
     <gettableFiles>solrconfig.xml schema.xml</gettableFiles>
   </admin>
 
-
-
 </config>
diff --git a/solr/core/src/test-files/solr/solr-no-core.xml b/solr/core/src/test-files/solr/solr-no-core.xml
index 3dbbe5b..476b5bc 100644
--- a/solr/core/src/test-files/solr/solr-no-core.xml
+++ b/solr/core/src/test-files/solr/solr-no-core.xml
@@ -25,6 +25,7 @@
     <str name="hostContext">${hostContext:solr}</str>
     <int name="hostPort">${hostPort:8983}</int>
     <int name="zkClientTimeout">${solr.zkclienttimeout:30000}</int>
+    <bool name="genericCoreNodeNames">${genericCoreNodeNames:true}</bool>
     <int name="distribUpdateConnTimeout">${distribUpdateConnTimeout:15000}</int>
     <int name="distribUpdateSoTimeout">${distribUpdateSoTimeout:120000}</int>
   </solrcloud>
@@ -35,4 +36,4 @@
     <int name="connTimeout">${connTimeout:15000}</int>
   </shardHandlerFactory>
 
-</solr>
\ No newline at end of file
+</solr>
diff --git a/solr/core/src/test-files/solr/solr.xml b/solr/core/src/test-files/solr/solr.xml
index d962918..a25e880 100644
--- a/solr/core/src/test-files/solr/solr.xml
+++ b/solr/core/src/test-files/solr/solr.xml
@@ -30,6 +30,7 @@
   -->
   <cores adminPath="/admin/cores" defaultCoreName="collection1" host="127.0.0.1" hostPort="${hostPort:8983}" 
          hostContext="${hostContext:solr}" zkClientTimeout="${solr.zkclienttimeout:30000}" numShards="${numShards:3}" shareSchema="${shareSchema:false}" 
+         genericCoreNodeNames="${genericCoreNodeNames:true}"
          distribUpdateConnTimeout="${distribUpdateConnTimeout:15000}" distribUpdateSoTimeout="${distribUpdateSoTimeout:120000}">
     <core name="collection1" instanceDir="collection1" shard="${shard:}" collection="${collection:collection1}" config="${solrconfig:solrconfig.xml}" schema="${schema:schema.xml}"
           coreNodeName="${coreNodeName:}"/>
diff --git a/solr/core/src/test/org/apache/solr/SolrTestCaseJ4Test.java b/solr/core/src/test/org/apache/solr/SolrTestCaseJ4Test.java
index 60f8abf..d713547 100644
--- a/solr/core/src/test/org/apache/solr/SolrTestCaseJ4Test.java
+++ b/solr/core/src/test/org/apache/solr/SolrTestCaseJ4Test.java
@@ -42,6 +42,7 @@
     String top = SolrTestCaseJ4.TEST_HOME() + "/collection1/conf";
     FileUtils.copyFile(new File(top, "schema-tiny.xml"), new File(subHome, "schema-tiny.xml"));
     FileUtils.copyFile(new File(top, "solrconfig-minimal.xml"), new File(subHome, "solrconfig-minimal.xml"));
+    FileUtils.copyFile(new File(top, "solrconfig.snippet.randomindexconfig.xml"), new File(subHome, "solrconfig.snippet.randomindexconfig.xml"));
 
     FileUtils.copyDirectory(new File(tmpSolrHome, "core0"), new File(tmpSolrHome, "core1"));
 
diff --git a/solr/core/src/test/org/apache/solr/TestDistributedSearch.java b/solr/core/src/test/org/apache/solr/TestDistributedSearch.java
index afac956..bb8de07 100755
--- a/solr/core/src/test/org/apache/solr/TestDistributedSearch.java
+++ b/solr/core/src/test/org/apache/solr/TestDistributedSearch.java
@@ -123,6 +123,7 @@
     handle.clear();
     handle.put("QTime", SKIPVAL);
     handle.put("timestamp", SKIPVAL);
+    handle.put("_version_", SKIPVAL); // not a cloud test, but may use updateLog
 
     // random value sort
     for (String f : fieldNames) {
@@ -350,7 +351,7 @@
         ChaosMonkey.stop(downJetty);
         downJettys.add(downJetty);
       }
-      
+
       queryPartialResults(upShards, upClients, 
           "q","*:*",
           "facet","true", 
@@ -358,7 +359,27 @@
           "facet.limit",5,
           ShardParams.SHARDS_INFO,"true",
           ShardParams.SHARDS_TOLERANT,"true");
-      
+
+      queryPartialResults(upShards, upClients,
+          "q", "*:*",
+          "facet", "true",
+          "facet.query", i1 + ":[1 TO 50]",
+          ShardParams.SHARDS_INFO, "true",
+          ShardParams.SHARDS_TOLERANT, "true");
+
+      // test group query
+      queryPartialResults(upShards, upClients,
+          "q", "*:*",
+          "rows", 100,
+          "fl", "id," + i1,
+          "group", "true",
+          "group.query", t1 + ":kings OR " + t1 + ":eggs",
+          "group.limit", 10,
+          "sort", i1 + " asc, id asc",
+          CommonParams.TIME_ALLOWED, 1,
+          ShardParams.SHARDS_INFO, "true",
+          ShardParams.SHARDS_TOLERANT, "true");
+
       // restart the jettys
       for (JettySolrRunner downJetty : downJettys) {
         downJetty.start();
diff --git a/solr/core/src/test/org/apache/solr/TestGroupingSearch.java b/solr/core/src/test/org/apache/solr/TestGroupingSearch.java
index dab377a..2df2c14 100644
--- a/solr/core/src/test/org/apache/solr/TestGroupingSearch.java
+++ b/solr/core/src/test/org/apache/solr/TestGroupingSearch.java
@@ -54,6 +54,7 @@
 
   @BeforeClass
   public static void beforeTests() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml", "schema12.xml");
   }
 
diff --git a/solr/core/src/test/org/apache/solr/TestJoin.java b/solr/core/src/test/org/apache/solr/TestJoin.java
index aeb1b14..03be32c 100644
--- a/solr/core/src/test/org/apache/solr/TestJoin.java
+++ b/solr/core/src/test/org/apache/solr/TestJoin.java
@@ -37,6 +37,7 @@
 
   @BeforeClass
   public static void beforeTests() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml","schema12.xml");
   }
 
diff --git a/solr/core/src/test/org/apache/solr/TestRandomFaceting.java b/solr/core/src/test/org/apache/solr/TestRandomFaceting.java
index 4e2e036..8009d7b 100644
--- a/solr/core/src/test/org/apache/solr/TestRandomFaceting.java
+++ b/solr/core/src/test/org/apache/solr/TestRandomFaceting.java
@@ -37,6 +37,7 @@
 
   @BeforeClass
   public static void beforeTests() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml","schema12.xml");
   }
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
index 2b19193..bc37e9f 100644
--- a/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java
@@ -152,7 +152,7 @@
     createAlias("testalias", "collection2,collection1");
     
     // search with new cloud client
-    CloudSolrServer cloudSolrServer = new CloudSolrServer(zkServer.getZkAddress());
+    CloudSolrServer cloudSolrServer = new CloudSolrServer(zkServer.getZkAddress(), random().nextBoolean());
     query = new SolrQuery("*:*");
     query.set("collection", "testalias");
     res = cloudSolrServer.query(query);
diff --git a/solr/core/src/test/org/apache/solr/cloud/AssignTest.java b/solr/core/src/test/org/apache/solr/cloud/AssignTest.java
new file mode 100644
index 0000000..298f515
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/AssignTest.java
@@ -0,0 +1,95 @@
+package org.apache.solr.cloud;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.ImplicitDocRouter;
+import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class AssignTest extends SolrTestCaseJ4 {
+  protected static Logger log = LoggerFactory.getLogger(AssignTest.class);
+
+  
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+
+  }
+  
+  @Override
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+  
+  @Test
+  public void testAssignNode() throws Exception {
+    String cname = "collection1";
+    
+    Map<String,DocCollection> collectionStates = new HashMap<String,DocCollection>();
+    
+    Map<String,Slice> slices = new HashMap<String,Slice>();
+    
+    Map<String,Replica> replicas = new HashMap<String,Replica>();
+    
+    ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state", 
+        ZkStateReader.STATE_PROP, "ACTIVE", 
+        ZkStateReader.BASE_URL_PROP, "0.0.0.0", 
+        ZkStateReader.CORE_NAME_PROP, "core1",
+        ZkStateReader.ROLES_PROP, null,
+        ZkStateReader.NODE_NAME_PROP, "0_0_0_0",
+        ZkStateReader.SHARD_ID_PROP, "shard1",
+        ZkStateReader.COLLECTION_PROP, cname,
+        ZkStateReader.NUM_SHARDS_PROP, "1",
+        ZkStateReader.CORE_NODE_NAME_PROP, "core_node1");
+    Replica replica = new Replica("core_node1" , m.getProperties());
+    replicas.put("core_node1", replica);
+    
+    Slice slice = new Slice("slice1", replicas , new HashMap<String,Object>(0));
+    slices.put("slice1", slice);
+    
+    DocRouter router = new ImplicitDocRouter();
+    DocCollection docCollection = new DocCollection(cname, slices, new HashMap<String,Object>(0), router);
+
+    collectionStates.put(cname, docCollection);
+    
+    Set<String> liveNodes = new HashSet<String>();
+    ClusterState state = new ClusterState(liveNodes, collectionStates);
+    String nodeName = Assign.assignNode("collection1", state);
+    
+    assertEquals("core_node2", nodeName);
+  }
+  
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
index c831f2e..d958647 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
@@ -46,9 +46,7 @@
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.SolrCore;
 import org.apache.solr.handler.ReplicationHandler;
-import org.apache.solr.servlet.SolrDispatchFilter;
 import org.apache.solr.util.AbstractSolrTestCase;
 import org.junit.BeforeClass;
 
@@ -176,8 +174,8 @@
       createCmd.setCoreName(ONE_NODE_COLLECTION + "core");
       createCmd.setCollection(ONE_NODE_COLLECTION);
       createCmd.setNumShards(1);
-      createCmd.setDataDir(dataDir.getAbsolutePath() + File.separator
-          + ONE_NODE_COLLECTION);
+      createCmd.setDataDir(getDataDir(dataDir.getAbsolutePath() + File.separator
+          + ONE_NODE_COLLECTION));
       server.request(createCmd);
     } catch (Exception e) {
       e.printStackTrace();
@@ -331,7 +329,7 @@
       ureq.process(cloudClient);
     } catch(SolrServerException e){
       // try again
-      Thread.sleep(500);
+      Thread.sleep(3500);
       ureq.process(cloudClient);
     }
     
@@ -415,14 +413,16 @@
     ModifiableSolrParams params = new ModifiableSolrParams();
     params.set("qt", "/replication");
     params.set("command", "backup");
+    File location = new File(TEMP_DIR, BasicDistributedZk2Test.class.getName() + "-backupdir-" + System.currentTimeMillis());
+    params.set("location", location.getAbsolutePath());
 
     QueryRequest request = new QueryRequest(params);
     NamedList<Object> results = client.request(request );
     
-    checkForBackupSuccess(client);
+    checkForBackupSuccess(client, location);
     
   }
-  private void checkForBackupSuccess(final HttpSolrServer client)
+  private void checkForBackupSuccess(final HttpSolrServer client, File location)
       throws InterruptedException, IOException {
     class CheckStatus extends Thread {
       volatile String fail = null;
@@ -461,16 +461,6 @@
         
       };
     }
-    ;
-    SolrCore core = ((SolrDispatchFilter) shardToJetty.get(SHARD2).get(0).jetty
-        .getDispatchFilter().getFilter()).getCores().getCore("collection1");
-    String ddir;
-    try {
-      ddir = core.getDataDir(); 
-    } finally {
-      core.close();
-    }
-    File dataDir = new File(ddir);
     
     int waitCnt = 0;
     CheckStatus checkStatus = new CheckStatus();
@@ -482,14 +472,14 @@
       if (checkStatus.success) {
         break;
       }
-      Thread.sleep(200);
-      if (waitCnt == 20) {
+      Thread.sleep(500);
+      if (waitCnt == 90) {
         fail("Backup success not detected:" + checkStatus.response);
       }
       waitCnt++;
     }
     
-    File[] files = dataDir.listFiles(new FilenameFilter() {
+    File[] files = location.listFiles(new FilenameFilter() {
       
       @Override
       public boolean accept(File dir, String name) {
diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
index 76c6176..dcb1ddb 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
@@ -160,7 +160,26 @@
     
     waitForRecoveriesToFinish(false);
     
+    handle.clear();
+    handle.put("QTime", SKIPVAL);
+    handle.put("timestamp", SKIPVAL);
+
     del("*:*");
+    queryAndCompareShards(params("q", "*:*", "distrib", "false", "sanity_check", "is_empty"));
+
+    // ask every individual replica of every shard to update+commit the same doc id
+    // with an incrementing counter on each update+commit
+    int foo_i_counter = 0;
+    for (SolrServer server : clients) {
+      foo_i_counter++;
+      indexDoc(server, params("commit", "true"), // SOLR-4923
+               sdoc(id,1, i1,100, tlong,100, "foo_i", foo_i_counter));
+      // after every update+commit, check all the shards consistency
+      queryAndCompareShards(params("q", "id:1", "distrib", "false", 
+                                   "sanity_check", "non_distrib_id_1_lookup"));
+      queryAndCompareShards(params("q", "id:1", 
+                                   "sanity_check", "distrib_id_1_lookup"));
+    }
 
     indexr(id,1, i1, 100, tlong, 100,t1,"now is the time for all good men"
             ,"foo_f", 1.414f, "foo_b", "true", "foo_d", 1.414d);
@@ -195,10 +214,10 @@
     }
 
     commit();
-
-    handle.clear();
-    handle.put("QTime", SKIPVAL);
-    handle.put("timestamp", SKIPVAL);
+    queryAndCompareShards(params("q", "*:*", 
+                                 "sort", "id desc",
+                                 "distrib", "false", 
+                                 "sanity_check", "is_empty"));
 
     // random value sort
     for (String f : fieldNames) {
@@ -519,11 +538,13 @@
           Create createCmd = new Create();
           createCmd.setCoreName(collection + freezeI);
           createCmd.setCollection(collection);
-          String core3dataDir = dataDir.getAbsolutePath() + File.separator
-              + System.currentTimeMillis() + collection + "_3n" + freezeI;
-          createCmd.setDataDir(core3dataDir);
+
           createCmd.setNumShards(numShards);
           try {
+            String core3dataDir = dataDir.getAbsolutePath() + File.separator
+                + System.currentTimeMillis() + collection + "_3n" + freezeI;
+            createCmd.setDataDir(getDataDir(core3dataDir));
+
             server.request(createCmd);
           } catch (SolrServerException e) {
             throw new RuntimeException(e);
@@ -555,11 +576,13 @@
     params.set(OverseerCollectionProcessor.MAX_SHARDS_PER_NODE, maxShardsPerNode);
     if (createNodeSetStr != null) params.set(OverseerCollectionProcessor.CREATE_NODE_SET, createNodeSetStr);
 
-    int clientIndex = random().nextInt(2);
+    int clientIndex = clients.size() > 1 ? random().nextInt(2) : 0;
     List<Integer> list = new ArrayList<Integer>();
     list.add(numShards);
     list.add(numReplicas);
-    collectionInfos.put(collectionName, list);
+    if (collectionInfos != null) {
+      collectionInfos.put(collectionName, list);
+    }
     params.set("name", collectionName);
     SolrRequest request = new QueryRequest(params);
     request.setPath("/admin/collections");
@@ -913,8 +936,8 @@
           if (shardId == null) {
             createCmd.setNumShards(2);
           }
-          createCmd.setDataDir(dataDir.getAbsolutePath() + File.separator
-              + collection + num);
+          createCmd.setDataDir(getDataDir(dataDir.getAbsolutePath() + File.separator
+              + collection + num));
           if (shardId != null) {
             createCmd.setShardId(shardId);
           }
@@ -1037,8 +1060,9 @@
             server.setSoTimeout(30000);
             Create createCmd = new Create();
             createCmd.setCoreName(collection);
-            createCmd.setDataDir(dataDir.getAbsolutePath() + File.separator
-                + collection + frozeUnique);
+            createCmd.setDataDir(getDataDir(dataDir.getAbsolutePath() + File.separator
+                + collection + frozeUnique));
+
             server.request(createCmd);
 
           } catch (Exception e) {
@@ -1079,7 +1103,7 @@
     if (commondCloudSolrServer == null) {
       synchronized(this) {
         try {
-          commondCloudSolrServer = new CloudSolrServer(zkServer.getZkAddress());
+          commondCloudSolrServer = new CloudSolrServer(zkServer.getZkAddress(), random().nextBoolean());
           commondCloudSolrServer.setDefaultCollection(DEFAULT_COLLECTION);
           commondCloudSolrServer.getLbServer().setConnectionTimeout(15000);
           commondCloudSolrServer.getLbServer().setSoTimeout(30000);
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java
index fbb4ff8..8558bc5 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java
@@ -42,7 +42,6 @@
 import org.slf4j.LoggerFactory;
 
 @Slow
-@BadApple
 public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase {
   public static Logger log = LoggerFactory.getLogger(ChaosMonkeyNothingIsSafeTest.class);
   
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
index 1db6ddd..ed34500 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
@@ -31,6 +31,7 @@
 import org.apache.zookeeper.KeeperException;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Ignore;
 
 import java.io.IOException;
 import java.util.Collection;
@@ -41,6 +42,7 @@
  * Test split phase that occurs when a Collection API split call is made.
  */
 @Slow
+@Ignore("SOLR-4944")
 public class ChaosMonkeyShardSplitTest extends ShardSplitTest {
 
   static final int TIMEOUT = 10000;
@@ -78,7 +80,7 @@
     try {
       del("*:*");
       for (int id = 0; id < 100; id++) {
-        indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id));
+        indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id), id);
       }
       commit();
 
@@ -88,7 +90,7 @@
           int max = atLeast(401);
           for (int id = 101; id < max; id++) {
             try {
-              indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id));
+              indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id), id);
               Thread.sleep(atLeast(25));
             } catch (Exception e) {
               log.error("Exception while adding doc", e);
diff --git a/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java b/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java
index 02530a1..7bc0789 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java
@@ -17,11 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.File;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.cloud.ClusterState;
@@ -31,7 +26,6 @@
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.CoreContainer.Initializer;
 import org.apache.solr.core.CoreDescriptor;
 import org.apache.solr.core.SolrCore;
 import org.apache.zookeeper.CreateMode;
@@ -41,6 +35,11 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.File;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
 @Slow
 public class ClusterStateUpdateTest extends SolrTestCaseJ4  {
   protected static Logger log = LoggerFactory
@@ -65,17 +64,17 @@
   private File dataDir3;
   
   private File dataDir4;
-
-  private Initializer init2;
   
   @BeforeClass
   public static void beforeClass() {
     System.setProperty("solrcloud.skip.autorecovery", "true");
+    System.setProperty("genericCoreNodeNames", "false");
   }
   
   @AfterClass
   public static void afterClass() throws InterruptedException {
     System.clearProperty("solrcloud.skip.autorecovery");
+    System.clearProperty("genericCoreNodeNames");
   }
 
   @Override
@@ -111,22 +110,21 @@
     
     System.setProperty("solr.solr.home", TEST_HOME());
     System.setProperty("hostPort", "1661");
-    CoreContainer.Initializer init1 = new CoreContainer.Initializer();
     System.setProperty("solr.data.dir", ClusterStateUpdateTest.this.dataDir1.getAbsolutePath());
-    container1 = init1.initialize();
+    container1 = new CoreContainer();
+    container1.load();
     System.clearProperty("hostPort");
     
     System.setProperty("hostPort", "1662");
-    init2 = new CoreContainer.Initializer();
     System.setProperty("solr.data.dir", ClusterStateUpdateTest.this.dataDir2.getAbsolutePath());
-    container2 = init2.initialize();
+    container2 = new CoreContainer();
+    container2.load();
     System.clearProperty("hostPort");
     
     System.setProperty("hostPort", "1663");
-    CoreContainer.Initializer init3 = new CoreContainer.Initializer();
-   
     System.setProperty("solr.data.dir", ClusterStateUpdateTest.this.dataDir3.getAbsolutePath());
-    container3 = init3.initialize();
+    container3 = new CoreContainer();
+    container3.load();
     System.clearProperty("hostPort");
     System.clearProperty("solr.solr.home");
     
@@ -157,6 +155,10 @@
     
     dcore.setDataDir(dataDir4.getAbsolutePath());
 
+    if (container1.getZkController() != null) {
+      container1.preRegisterInZk(dcore);
+    }
+    
     SolrCore core = container1.create(dcore);
     
     container1.register(core, false);
@@ -220,7 +222,11 @@
         .disconnect();
     container2.shutdown();
 
-    container2 = init2.initialize();
+    System.setProperty("hostPort", "1662");
+    System.setProperty("solr.data.dir", ClusterStateUpdateTest.this.dataDir2.getAbsolutePath());
+    container2 = new CoreContainer();
+    container2.load();
+    System.clearProperty("hostPort");
     
     // pause for watch to trigger
     for(int i = 0; i < 200; i++) {
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java
index 4658b1d..acaa0c7 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java
@@ -17,6 +17,20 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.common.cloud.ZkNodeProps;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.core.CoreContainer;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.xml.sax.SAXException;
+
+import javax.xml.parsers.ParserConfigurationException;
 import java.io.File;
 import java.io.IOException;
 import java.util.HashMap;
@@ -27,22 +41,6 @@
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import javax.xml.parsers.ParserConfigurationException;
-
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.cloud.ZkNodeProps;
-import org.apache.solr.common.cloud.ZkStateReader;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.CoreContainer.Initializer;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.xml.sax.SAXException;
-
 @Slow
 public class LeaderElectionIntegrationTest extends SolrTestCaseJ4 {
   protected static Logger log = LoggerFactory
@@ -140,7 +138,6 @@
     
     System.setProperty("hostPort", Integer.toString(port));
     System.setProperty("shard", shard);
-    Initializer init = new CoreContainer.Initializer();
     System.setProperty("solr.data.dir", data.getAbsolutePath());
     System.setProperty("solr.solr.home", TEST_HOME());
     Set<Integer> ports = shardPorts.get(shard);
@@ -149,7 +146,8 @@
       shardPorts.put(shard, ports);
     }
     ports.add(port);
-    CoreContainer container = init.initialize();
+    CoreContainer container = new CoreContainer();
+    container.load();
     assertTrue("Container " + port + " has no cores!", container.getCores()
         .size() > 0);
     containerMap.put(port, container);
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
index 7cc550c..e722f52 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
@@ -36,6 +36,7 @@
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
+import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkNodeProps;
@@ -98,7 +99,7 @@
       zkClient.close();
     }
     
-    public String publishState(String coreName, String stateName, int numShards)
+    public String publishState(String coreName, String coreNodeName, String stateName, int numShards)
         throws KeeperException, InterruptedException, IOException {
       if (stateName == null) {
         ElectionContext ec = electionContext.remove(coreName);
@@ -108,6 +109,7 @@
         ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "deletecore",
             ZkStateReader.NODE_NAME_PROP, nodeName,
             ZkStateReader.CORE_NAME_PROP, coreName,
+            ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName,
             ZkStateReader.COLLECTION_PROP, collection);
             DistributedQueue q = Overseer.getInQueue(zkClient);
             q.offer(ZkStateReader.toJSON(m));
@@ -117,6 +119,7 @@
         ZkStateReader.STATE_PROP, stateName,
         ZkStateReader.NODE_NAME_PROP, nodeName,
         ZkStateReader.CORE_NAME_PROP, coreName,
+        ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName,
         ZkStateReader.COLLECTION_PROP, collection,
         ZkStateReader.NUM_SHARDS_PROP, Integer.toString(numShards),
         ZkStateReader.BASE_URL_PROP, "http://" + nodeName
@@ -126,7 +129,8 @@
       }
       
       for (int i = 0; i < 120; i++) {
-        String shardId = getShardId(coreName);
+        String shardId = getShardId("http://" + nodeName
+            + "/solr/", coreName);
         if (shardId != null) {
           try {
             zkClient.makePath("/collections/" + collection + "/leader_elect/"
@@ -136,7 +140,8 @@
               "http://" + nodeName + "/solr/", ZkStateReader.NODE_NAME_PROP,
               nodeName, ZkStateReader.CORE_NAME_PROP, coreName,
               ZkStateReader.SHARD_ID_PROP, shardId,
-              ZkStateReader.COLLECTION_PROP, collection);
+              ZkStateReader.COLLECTION_PROP, collection,
+              ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
           ShardLeaderElectionContextBase ctx = new ShardLeaderElectionContextBase(
               elector, shardId, collection, nodeName + "_" + coreName, props,
               zkStateReader);
@@ -148,13 +153,18 @@
       return null;
     }
     
-    private String getShardId(final String coreName) {
+    private String getShardId(final String baseUrl, final String coreName) {
       Map<String,Slice> slices = zkStateReader.getClusterState().getSlicesMap(
           collection);
       if (slices != null) {
         for (Slice slice : slices.values()) {
-          if (slice.getReplicasMap().containsKey(nodeName + "_" + coreName)) {
-            return slice.getName();
+          for (Replica replica : slice.getReplicas()) {
+            // TODO: for really large clusters, we could 'index' on this
+            String rbaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+            String rcore = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+            if (baseUrl.equals(rbaseUrl) && coreName.equals(rcore)) {
+              return slice.getName();
+            }
           }
         }
       }
@@ -202,7 +212,7 @@
       final int numShards=6;
       
       for (int i = 0; i < numShards; i++) {
-        assertNotNull("shard got no id?", zkController.publishState("core" + (i+1), ZkStateReader.ACTIVE, 3));
+        assertNotNull("shard got no id?", zkController.publishState("core" + (i+1), "node" + (i+1), ZkStateReader.ACTIVE, 3));
       }
 
       assertEquals(2, reader.getClusterState().getSlice("collection1", "shard1").getReplicasMap().size());
@@ -277,7 +287,7 @@
             final String coreName = "core" + slot;
             
             try {
-              ids[slot]=controllers[slot % nodeCount].publishState(coreName, ZkStateReader.ACTIVE, sliceCount);
+              ids[slot]=controllers[slot % nodeCount].publishState(coreName, "node" + slot, ZkStateReader.ACTIVE, sliceCount);
             } catch (Throwable e) {
               e.printStackTrace();
               fail("register threw exception:" + e.getClass());
@@ -440,7 +450,7 @@
 
       assertEquals(reader.getClusterState().toString(), ZkStateReader.RECOVERING,
           reader.getClusterState().getSlice("collection1", "shard1").getReplicasMap()
-              .get("node1_core1").getStr(ZkStateReader.STATE_PROP));
+              .get("core_node1").getStr(ZkStateReader.STATE_PROP));
 
       //publish node state (active)
       m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state",
@@ -471,7 +481,7 @@
     while(maxIterations-->0) {
       Slice slice = reader.getClusterState().getSlice("collection1", "shard1");
       if(slice!=null) {
-        coreState = slice.getReplicasMap().get("node1_core1").getStr(ZkStateReader.STATE_PROP);
+        coreState = slice.getReplicasMap().get("core_node1").getStr(ZkStateReader.STATE_PROP);
         if(coreState.equals(expectedState)) {
           return;
         }
@@ -523,14 +533,14 @@
       overseerClient = electNewOverseer(server.getZkAddress());
 
       Thread.sleep(1000);
-      mockController.publishState("core1", ZkStateReader.RECOVERING, 1);
+      mockController.publishState("core1", "core_node1", ZkStateReader.RECOVERING, 1);
 
       waitForCollections(reader, "collection1");
       verifyStatus(reader, ZkStateReader.RECOVERING);
 
       int version = getClusterStateVersion(controllerClient);
       
-      mockController.publishState("core1", ZkStateReader.ACTIVE, 1);
+      mockController.publishState("core1", "core_node1", ZkStateReader.ACTIVE, 1);
       
       while(version == getClusterStateVersion(controllerClient));
 
@@ -539,7 +549,7 @@
       overseerClient.close();
       Thread.sleep(1000); //wait for overseer to get killed
 
-      mockController.publishState("core1", ZkStateReader.RECOVERING, 1);
+      mockController.publishState("core1",  "core_node1", ZkStateReader.RECOVERING, 1);
       version = getClusterStateVersion(controllerClient);
       
       overseerClient = electNewOverseer(server.getZkAddress());
@@ -553,7 +563,7 @@
       assertEquals("Shard count does not match", 1, reader.getClusterState()
           .getSlice("collection1", "shard1").getReplicasMap().size());
       version = getClusterStateVersion(controllerClient);
-      mockController.publishState("core1", null,1);
+      mockController.publishState("core1", "core_node1", null,1);
       while(version == getClusterStateVersion(controllerClient));
       Thread.sleep(500);
       assertFalse("collection1 should be gone after publishing the null state", reader.getClusterState().getCollections().contains("collection1"));
@@ -641,16 +651,16 @@
       for (int i = 0; i < atLeast(4); i++) {
         killCounter.incrementAndGet(); //for each round allow 1 kill
         mockController = new MockZKController(server.getZkAddress(), "node1", "collection1");
-        mockController.publishState("core1", "state1",1);
+        mockController.publishState("core1", "node1", "state1",1);
         if(mockController2!=null) {
           mockController2.close();
           mockController2 = null;
         }
-        mockController.publishState("core1", "state2",1);
+        mockController.publishState("core1", "node1","state2",1);
         mockController2 = new MockZKController(server.getZkAddress(), "node2", "collection1");
-        mockController.publishState("core1", "state1",1);
+        mockController.publishState("core1", "node1", "state1",1);
         verifyShardLeader(reader, "collection1", "shard1", "core1");
-        mockController2.publishState("core4", "state2" ,1);
+        mockController2.publishState("core4", "node2", "state2" ,1);
         mockController.close();
         mockController = null;
         verifyShardLeader(reader, "collection1", "shard1", "core4");
@@ -697,7 +707,7 @@
       
       overseerClient = electNewOverseer(server.getZkAddress());
 
-      mockController.publishState("core1", ZkStateReader.RECOVERING, 1);
+      mockController.publishState("core1", "core_node1", ZkStateReader.RECOVERING, 1);
 
       waitForCollections(reader, "collection1");
       
@@ -708,7 +718,7 @@
       int version = getClusterStateVersion(controllerClient);
       
       mockController = new MockZKController(server.getZkAddress(), "node1", "collection1");
-      mockController.publishState("core1", ZkStateReader.RECOVERING, 1);
+      mockController.publishState("core1", "core_node1", ZkStateReader.RECOVERING, 1);
 
       while (version == getClusterStateVersion(controllerClient));
       
@@ -718,7 +728,7 @@
       int numFound = 0;
       for (DocCollection collection : state.getCollectionStates().values()) {
         for (Slice slice : collection.getSlices()) {
-          if (slice.getReplicasMap().get("node1_core1") != null) {
+          if (slice.getReplicasMap().get("core_node1") != null) {
             numFound++;
           }
         }
@@ -761,7 +771,7 @@
       
       overseerClient = electNewOverseer(server.getZkAddress());
 
-      mockController.publishState("core1", ZkStateReader.RECOVERING, 12);
+      mockController.publishState("core1", "node1", ZkStateReader.RECOVERING, 12);
 
       waitForCollections(reader, "collection1");
       
diff --git a/solr/core/src/test/org/apache/solr/cloud/RecoveryZkTest.java b/solr/core/src/test/org/apache/solr/cloud/RecoveryZkTest.java
index 2f7daeb..28e473a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/RecoveryZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/RecoveryZkTest.java
@@ -86,19 +86,25 @@
     indexThread.join();
     indexThread2.join();
     
-    Thread.sleep(500);
+    Thread.sleep(1000);
   
-    waitForThingsToLevelOut(30);
+    waitForThingsToLevelOut(45);
     
     Thread.sleep(2000);
     
     waitForThingsToLevelOut(30);
     
+    Thread.sleep(5000);
+    
     waitForRecoveriesToFinish(DEFAULT_COLLECTION, zkStateReader, false, true);
 
     // test that leader and replica have same doc count
     
-    checkShardConsistency("shard1", false, false);
+    String fail = checkShardConsistency("shard1", false, false);
+    if (fail != null) {
+      fail(fail);
+    }
+    
     SolrQuery query = new SolrQuery("*:*");
     query.setParam("distrib", "false");
     long client1Docs = shardToJetty.get("shard1").get(0).client.solrClient.query(query).getResults().getNumFound();
diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
index a999ce6..ab1ba63 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
@@ -17,6 +17,15 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+
 import org.apache.http.params.CoreConnectionPNames;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrRequest;
@@ -28,10 +37,8 @@
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.CompositeIdRouter;
 import org.apache.solr.common.cloud.DocRouter;
 import org.apache.solr.common.cloud.HashBasedRouter;
-import org.apache.solr.common.cloud.PlainIdRouter;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkCoreNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
@@ -43,12 +50,6 @@
 import org.junit.After;
 import org.junit.Before;
 
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
 public class ShardSplitTest extends BasicDistributedZkTest {
 
   public static final String SHARD1_0 = SHARD1 + "_0";
@@ -110,35 +111,66 @@
     del("*:*");
     for (int id = 0; id <= 100; id++) {
       String shardKey = "" + (char)('a' + (id % 26)); // See comment in ShardRoutingTest for hash distribution
-      indexAndUpdateCount(router, ranges, docCounts, shardKey + "!" + String.valueOf(id));
+      indexAndUpdateCount(router, ranges, docCounts, shardKey + "!" + String.valueOf(id), id);
     }
     commit();
 
     Thread indexThread = new Thread() {
       @Override
       public void run() {
-        int max = atLeast(401);
+        Random random = random();
+        int max = atLeast(random, 401);
+        int sleep = atLeast(random, 25);
+        log.info("SHARDSPLITTEST: Going to add " + max + " number of docs at 1 doc per " + sleep + "ms");
+        Set<String> deleted = new HashSet<String>();
         for (int id = 101; id < max; id++) {
           try {
-            indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id));
-            Thread.sleep(atLeast(25));
+            indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id), id);
+            Thread.sleep(sleep);
+            if (usually(random))  {
+              String delId = String.valueOf(random.nextInt(id - 101 + 1) + 101);
+              if (deleted.contains(delId))  continue;
+              try {
+                deleteAndUpdateCount(router, ranges, docCounts, delId);
+                deleted.add(delId);
+              } catch (Exception e) {
+                log.error("Exception while deleting docs", e);
+              }
+            }
           } catch (Exception e) {
-            log.error("Exception while adding doc", e);
+            log.error("Exception while adding docs", e);
           }
         }
       }
     };
     indexThread.start();
 
-    splitShard(SHARD1);
-
-    log.info("Layout after split: \n");
-    printLayout();
-
-    indexThread.join();
+    try {
+      for (int i = 0; i < 3; i++) {
+        try {
+          splitShard(SHARD1);
+          log.info("Layout after split: \n");
+          printLayout();
+          break;
+        } catch (HttpSolrServer.RemoteSolrException e) {
+          if (e.code() != 500)  {
+            throw e;
+          }
+          log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
+          if (i == 2) {
+            fail("SPLITSHARD was not successful even after three tries");
+          }
+        }
+      }
+    } finally {
+      try {
+        indexThread.join();
+      } catch (InterruptedException e) {
+        log.error("Indexing thread interrupted", e);
+      }
+    }
 
     commit();
-
     checkDocCountsAndShardStates(docCounts, numReplicas);
 
     // todo can't call waitForThingsToLevelOut because it looks for jettys of all shards
@@ -148,24 +180,6 @@
   }
 
   protected void checkDocCountsAndShardStates(int[] docCounts, int numReplicas) throws SolrServerException, KeeperException, InterruptedException {
-    SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
-    query.set("distrib", false);
-
-    ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
-    HttpSolrServer shard1_0Server = new HttpSolrServer(shard1_0.getCoreUrl());
-    QueryResponse response = shard1_0Server.query(query);
-    long shard10Count = response.getResults().getNumFound();
-
-    ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
-    HttpSolrServer shard1_1Server = new HttpSolrServer(shard1_1.getCoreUrl());
-    QueryResponse response2 = shard1_1Server.query(query);
-    long shard11Count = response2.getResults().getNumFound();
-
-    logDebugHelp(docCounts, response, shard10Count, response2, shard11Count);
-
-    assertEquals("Wrong doc count on shard1_0", docCounts[0], shard10Count);
-    assertEquals("Wrong doc count on shard1_1", docCounts[1], shard11Count);
-
     ClusterState clusterState = null;
     Slice slice1_0 = null, slice1_1 = null;
     int i = 0;
@@ -188,6 +202,24 @@
     assertEquals("shard1_1 is not active", Slice.ACTIVE, slice1_1.getState());
     assertEquals("Wrong number of replicas created for shard1_0", numReplicas, slice1_0.getReplicas().size());
     assertEquals("Wrong number of replicas created for shard1_1", numReplicas, slice1_1.getReplicas().size());
+
+    SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
+    query.set("distrib", false);
+
+    ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
+    HttpSolrServer shard1_0Server = new HttpSolrServer(shard1_0.getCoreUrl());
+    QueryResponse response = shard1_0Server.query(query);
+    long shard10Count = response.getResults().getNumFound();
+
+    ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
+    HttpSolrServer shard1_1Server = new HttpSolrServer(shard1_1.getCoreUrl());
+    QueryResponse response2 = shard1_1Server.query(query);
+    long shard11Count = response2.getResults().getNumFound();
+
+    logDebugHelp(docCounts, response, shard10Count, response2, shard11Count);
+
+    assertEquals("Wrong doc count on shard1_0", docCounts[0], shard10Count);
+    assertEquals("Wrong doc count on shard1_1", docCounts[1], shard11Count);
   }
 
   protected void splitShard(String shardId) throws SolrServerException, IOException {
@@ -208,9 +240,26 @@
     baseServer.request(request);
   }
 
-  protected void indexAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id) throws Exception {
-    index("id", id);
+  protected void indexAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id, int n) throws Exception {
+    index("id", id, "n_ti", n);
 
+    int idx = getHashRangeIdx(router, ranges, docCounts, id);
+    if (idx != -1)  {
+      docCounts[idx]++;
+    }
+  }
+
+  protected void deleteAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id) throws Exception {
+    controlClient.deleteById(id);
+    cloudClient.deleteById(id);
+
+    int idx = getHashRangeIdx(router, ranges, docCounts, id);
+    if (idx != -1)  {
+      docCounts[idx]--;
+    }
+  }
+
+  private int getHashRangeIdx(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id) {
     int hash = 0;
     if (router instanceof HashBasedRouter) {
       HashBasedRouter hashBasedRouter = (HashBasedRouter) router;
@@ -219,8 +268,9 @@
     for (int i = 0; i < ranges.size(); i++) {
       DocRouter.Range range = ranges.get(i);
       if (range.includes(hash))
-        docCounts[i]++;
+        return i;
     }
+    return -1;
   }
 
   protected void logDebugHelp(int[] docCounts, QueryResponse response, long shard10Count, QueryResponse response2, long shard11Count) {
diff --git a/solr/core/src/test/org/apache/solr/cloud/SliceStateUpdateTest.java b/solr/core/src/test/org/apache/solr/cloud/SliceStateUpdateTest.java
index 1dfb422..67bd7f2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SliceStateUpdateTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SliceStateUpdateTest.java
@@ -28,7 +28,6 @@
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.CoreContainer.Initializer;
 import org.apache.zookeeper.CreateMode;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -62,8 +61,6 @@
 
   private File dataDir3;
 
-  private Initializer init2;
-
   @BeforeClass
   public static void beforeClass() {
     System.setProperty("solrcloud.skip.autorecovery", "true");
@@ -117,20 +114,21 @@
 
     System.setProperty("solr.solr.home", TEST_HOME());
     System.setProperty("hostPort", "1661");
-    CoreContainer.Initializer init1 = new CoreContainer.Initializer();
     System.setProperty("solr.data.dir", SliceStateUpdateTest.this.dataDir1.getAbsolutePath());
-    container1 = init1.initialize();
+    container1 = new CoreContainer();
 
     System.clearProperty("hostPort");
 
     System.setProperty("hostPort", "1662");
-    init2 = new CoreContainer.Initializer();
     System.setProperty("solr.data.dir", SliceStateUpdateTest.this.dataDir2.getAbsolutePath());
-    container2 = init2.initialize();
+    container2 = new CoreContainer();
     System.clearProperty("hostPort");
 
     System.clearProperty("solr.solr.home");
 
+    container1.load();
+    container2.load();
+
     log.info("####SETUP_END " + getTestName());
 
   }
diff --git a/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java b/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java
index 22948c4..11cfb7c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java
@@ -67,7 +67,6 @@
     super.setUp();
     // we expect this time of exception as shards go up and down...
     //ignoreException(".*");
-    useFactory(null);
     System.setProperty("numShards", Integer.toString(sliceCount));
   }
   
@@ -94,7 +93,7 @@
     handle.put("QTime", SKIPVAL);
     handle.put("timestamp", SKIPVAL);
     
-    waitForThingsToLevelOut(15);
+    waitForThingsToLevelOut(30);
 
     del("*:*");
     List<CloudJettyRunner> skipServers = new ArrayList<CloudJettyRunner>();
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestMultiCoreConfBootstrap.java b/solr/core/src/test/org/apache/solr/cloud/TestMultiCoreConfBootstrap.java
index 0d9b467..0e97731 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestMultiCoreConfBootstrap.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestMultiCoreConfBootstrap.java
@@ -17,8 +17,6 @@
 
 package org.apache.solr.cloud;
 
-import java.io.File;
-
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.core.CoreContainer;
@@ -30,6 +28,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.File;
+
 public class TestMultiCoreConfBootstrap extends SolrTestCaseJ4 {
   protected static Logger log = LoggerFactory.getLogger(TestMultiCoreConfBootstrap.class);
   protected CoreContainer cores = null;
@@ -99,7 +99,7 @@
   @Test
   public void testMultiCoreConfBootstrap() throws Exception {
     System.setProperty("bootstrap_conf", "true");
-    cores = new CoreContainer(home, new File(home, "solr.xml"));
+    cores = CoreContainer.createAndLoad(home, new File(home, "solr.xml"));
     SolrZkClient zkclient = cores.getZkController().getZkClient();
     // zkclient.printLayoutToStdOut();
     
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestZkChroot.java b/solr/core/src/test/org/apache/solr/cloud/TestZkChroot.java
index b28e3e2..b025d03 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestZkChroot.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestZkChroot.java
@@ -17,8 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.File;
-
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZooKeeperException;
@@ -31,6 +29,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.File;
+
 public class TestZkChroot extends SolrTestCaseJ4 {
   protected static Logger log = LoggerFactory.getLogger(TestZkChroot.class);
   protected CoreContainer cores = null;
@@ -91,7 +91,7 @@
     SolrZkClient zkClient2 = null;
     
     try {
-      cores = new CoreContainer(home, new File(home, "solr.xml"));
+      cores = CoreContainer.createAndLoad(home, new File(home, "solr.xml"));
       zkClient = cores.getZkController().getZkClient();
       
       assertTrue(zkClient.exists("/clusterstate.json", true));
@@ -122,7 +122,7 @@
           AbstractZkTestCase.TIMEOUT);
       assertFalse("Path '" + chroot + "' should not exist before the test",
           zkClient.exists(chroot, true));
-      cores = new CoreContainer(home, new File(home, "solr.xml"));
+      cores = CoreContainer.createAndLoad(home, new File(home, "solr.xml"));
       fail("There should be a zk exception, as the initial path doesn't exist");
     } catch (ZooKeeperException e) {
       // expected
@@ -150,7 +150,7 @@
           AbstractZkTestCase.TIMEOUT);
       assertFalse("Path '" + chroot + "' should not exist before the test",
           zkClient.exists(chroot, true));
-      cores = new CoreContainer(home, new File(home, "solr.xml"));
+      cores = CoreContainer.createAndLoad(home, new File(home, "solr.xml"));
       assertTrue(
           "solrconfig.xml should have been uploaded to zk to the correct config directory",
           zkClient.exists(chroot + ZkController.CONFIGS_ZKNODE + "/"
@@ -176,7 +176,7 @@
       assertTrue(zkClient.exists(chroot, true));
       assertFalse(zkClient.exists(chroot + "/clusterstate.json", true));
       
-      cores = new CoreContainer(home, new File(home, "solr.xml"));
+      cores = CoreContainer.createAndLoad(home, new File(home, "solr.xml"));
       assertTrue(zkClient.exists(chroot + "/clusterstate.json", true));
     } finally {
       if (cores != null) cores.shutdown();
diff --git a/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
index 8b8593e..01e8045 100644
--- a/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/UnloadDistributedZkTest.java
@@ -91,7 +91,7 @@
     createCmd.setCollection(collection);
     String coreDataDir = dataDir.getAbsolutePath() + File.separator
         + System.currentTimeMillis() + collection + "1";
-    createCmd.setDataDir(coreDataDir);
+    createCmd.setDataDir(getDataDir(coreDataDir));
     createCmd.setNumShards(2);
     
     SolrServer client = clients.get(0);
@@ -107,7 +107,7 @@
     createCmd.setCollection(collection);
     coreDataDir = dataDir.getAbsolutePath() + File.separator
         + System.currentTimeMillis() + collection + "2";
-    createCmd.setDataDir(coreDataDir);
+    createCmd.setDataDir(getDataDir(coreDataDir));
     
     server.request(createCmd);
     
@@ -171,7 +171,7 @@
     createCmd.setCollection("unloadcollection");
     createCmd.setNumShards(1);
     String core1DataDir = dataDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection1" + "_1n";
-    createCmd.setDataDir(core1DataDir);
+    createCmd.setDataDir(getDataDir(core1DataDir));
     server.request(createCmd);
     
     ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
@@ -189,7 +189,7 @@
     createCmd.setCoreName("unloadcollection2");
     createCmd.setCollection("unloadcollection");
     String core2dataDir = dataDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection1" + "_2n";
-    createCmd.setDataDir(core2dataDir);
+    createCmd.setDataDir(getDataDir(core2dataDir));
     server.request(createCmd);
     
     zkStateReader.updateClusterState(true);
@@ -227,7 +227,7 @@
     createCmd.setCoreName("unloadcollection3");
     createCmd.setCollection("unloadcollection");
     String core3dataDir = dataDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection" + "_3n";
-    createCmd.setDataDir(core3dataDir);
+    createCmd.setDataDir(getDataDir(core3dataDir));
     server.request(createCmd);
     
     waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
@@ -296,7 +296,7 @@
     createCmd.setCoreName("unloadcollection4");
     createCmd.setCollection("unloadcollection");
     String core4dataDir = dataDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection" + "_4n";
-    createCmd.setDataDir(core4dataDir);
+    createCmd.setDataDir(getDataDir(core4dataDir));
     server.request(createCmd);
     
     waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
@@ -334,7 +334,7 @@
     createCmd = new Create();
     createCmd.setCoreName(leaderProps.getCoreName());
     createCmd.setCollection("unloadcollection");
-    createCmd.setDataDir(core1DataDir);
+    createCmd.setDataDir(getDataDir(core1DataDir));
     server.request(createCmd);
 
     waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java b/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java
index 22a201f..d51e6ac 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java
@@ -138,6 +138,19 @@
 
     assertTrue(zkClient.exists("/path/mynewpath", true));
   }
+
+  @Test
+  public void testPut() throws Exception {
+    // test bootstrap_conf
+    String data = "my data";
+    String[] args = new String[] {"-zkhost", zkServer.getZkAddress(), "-cmd",
+        "put", "/data.txt", data};
+    ZkCLI.main(args);
+
+    zkClient.getData("/data.txt", null, null, true);
+
+    assertArrayEquals(zkClient.getData("/data.txt", null, null, true), data.getBytes("UTF-8"));
+  }
   
   @Test
   public void testList() throws Exception {
diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java b/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java
index dc930ea..3a31ea4 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java
@@ -17,11 +17,6 @@
  * the License.
  */
 
-import java.io.File;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
 import org.apache.lucene.util.LuceneTestCase.Slow;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.cloud.SolrZkClient;
@@ -34,6 +29,11 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import java.io.File;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
 @Slow
 public class ZkControllerTest extends SolrTestCaseJ4 {
 
@@ -163,7 +163,7 @@
       cc = getCoreContainer();
       
       ZkController zkController = new ZkController(cc, server.getZkAddress(), TIMEOUT, 10000,
-          "127.0.0.1", "8983", "solr", "0", 10000, 10000, new CurrentCoreDescriptorProvider() {
+          "127.0.0.1", "8983", "solr", "0", true, 10000, 10000, new CurrentCoreDescriptorProvider() {
             
             @Override
             public List<CoreDescriptor> getCurrentDescriptors() {
@@ -203,7 +203,7 @@
       cc = getCoreContainer();
       
       zkController = new ZkController(cc, server.getZkAddress(),
-          TIMEOUT, 10000, "127.0.0.1", "8983", "solr", "0", 10000, 10000, new CurrentCoreDescriptorProvider() {
+          TIMEOUT, 10000, "127.0.0.1", "8983", "solr", "0", true, 10000, 10000, new CurrentCoreDescriptorProvider() {
             
             @Override
             public List<CoreDescriptor> getCurrentDescriptors() {
@@ -240,12 +240,8 @@
   }
 
   private CoreContainer getCoreContainer() {
-    CoreContainer cc = new CoreContainer(TEMP_DIR.getAbsolutePath()) {
-      {
-        initShardHandler();
-      }
-    };
-    
+    CoreContainer cc = new CoreContainer(TEMP_DIR.getAbsolutePath());
+    cc.load();
     return cc;
   }
 
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/BasicHdfsTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/BasicHdfsTest.java
new file mode 100644
index 0000000..dd14e61
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/BasicHdfsTest.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.cloud.hdfs;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.cloud.BasicDistributedZkTest;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+
+@Slow
+@ThreadLeakScope(Scope.NONE) // hdfs client currently leaks thread(s)
+public class BasicHdfsTest extends BasicDistributedZkTest {
+
+  private static MiniDFSCluster dfsCluster;
+  
+  @BeforeClass
+  public static void setupClass() throws Exception {
+
+    dfsCluster = HdfsTestUtil.setupClass(new File(TEMP_DIR,
+        HdfsBasicDistributedZk2Test.class.getName() + "_"
+            + System.currentTimeMillis()).getAbsolutePath());
+    System.setProperty("solr.hdfs.home", dfsCluster.getURI().toString() + "/solr");
+  }
+  
+  @AfterClass
+  public static void teardownClass() throws Exception {
+    HdfsTestUtil.teardownClass(dfsCluster);
+    System.clearProperty("solr.hdfs.home");
+    dfsCluster = null;
+  }
+
+  
+  @Override
+  protected String getDataDir(String dataDir) throws IOException {
+    return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
+  }
+  
+  public BasicHdfsTest() {
+    super();
+    sliceCount = 1;
+    shardCount = 1;
+  }
+  
+  protected String getSolrXml() {
+    return "solr-no-core.xml";
+  }
+  
+  @Override
+  public void doTest() throws Exception {
+    createCollection("delete_data_dir", 1, 1, 1);
+    waitForRecoveriesToFinish("delete_data_dir", false);
+    cloudClient.setDefaultCollection("delete_data_dir");
+    cloudClient.getZkStateReader().updateClusterState(true);
+    NamedList<Object> response = cloudClient.query(
+        new SolrQuery().setRequestHandler("/admin/system")).getResponse();
+    NamedList<Object> coreInfo = (NamedList<Object>) response.get("core");
+    String dataDir = (String) ((NamedList<Object>) coreInfo.get("directory"))
+        .get("data");
+
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.DELETE.toString());
+    params.set("name", "delete_data_dir");
+    QueryRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+    cloudClient.request(request);
+    
+    Configuration conf = new Configuration();
+    conf.setBoolean("fs.hdfs.impl.disable.cache", true);
+    FileSystem fs = FileSystem.newInstance(new URI(dataDir), conf);
+    assertFalse(
+        "Data directory exists after collection removal : "
+            + dataDir, fs.exists(new Path(dataDir)));
+    fs.close();
+  }
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZk2Test.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZk2Test.java
new file mode 100644
index 0000000..2e343eb
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZk2Test.java
@@ -0,0 +1,58 @@
+package org.apache.solr.cloud.hdfs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.cloud.BasicDistributedZk2Test;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+
+@Slow
+@Nightly
+@ThreadLeakScope(Scope.NONE) // hdfs client currently leaks thread(s)
+public class HdfsBasicDistributedZk2Test extends BasicDistributedZk2Test {
+  private static MiniDFSCluster dfsCluster;
+  
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    dfsCluster = HdfsTestUtil.setupClass(new File(TEMP_DIR,
+        HdfsBasicDistributedZk2Test.class.getName() + "_"
+            + System.currentTimeMillis()).getAbsolutePath());
+  }
+  
+  @AfterClass
+  public static void teardownClass() throws Exception {
+    HdfsTestUtil.teardownClass(dfsCluster);
+    dfsCluster = null;
+  }
+
+  
+  @Override
+  protected String getDataDir(String dataDir) throws IOException {
+    return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
+  }
+
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZkTest.java
new file mode 100644
index 0000000..8f7486d
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsBasicDistributedZkTest.java
@@ -0,0 +1,58 @@
+package org.apache.solr.cloud.hdfs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.cloud.BasicDistributedZkTest;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+
+@Slow
+@Nightly
+@ThreadLeakScope(Scope.NONE) // hdfs client currently leaks thread(s)
+public class HdfsBasicDistributedZkTest extends BasicDistributedZkTest {
+  private static MiniDFSCluster dfsCluster;
+  
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    dfsCluster = HdfsTestUtil.setupClass(new File(TEMP_DIR,
+        HdfsBasicDistributedZk2Test.class.getName() + "_"
+            + System.currentTimeMillis()).getAbsolutePath());
+  }
+  
+  @AfterClass
+  public static void teardownClass() throws Exception {
+    HdfsTestUtil.teardownClass(dfsCluster);
+    dfsCluster = null;
+  }
+
+  
+  @Override
+  protected String getDataDir(String dataDir) throws IOException {
+    return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
+  }
+  
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeySafeLeaderTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeySafeLeaderTest.java
new file mode 100644
index 0000000..7e49963
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsChaosMonkeySafeLeaderTest.java
@@ -0,0 +1,69 @@
+package org.apache.solr.cloud.hdfs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.cloud.ChaosMonkeySafeLeaderTest;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+
+@Slow
+@Nightly
+@ThreadLeakScope(Scope.NONE) // hdfs client currently leaks thread(s)
+public class HdfsChaosMonkeySafeLeaderTest extends ChaosMonkeySafeLeaderTest {
+  private static MiniDFSCluster dfsCluster;
+  
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    dfsCluster = HdfsTestUtil.setupClass(new File(TEMP_DIR,
+        HdfsBasicDistributedZk2Test.class.getName() + "_"
+            + System.currentTimeMillis()).getAbsolutePath());
+  }
+  
+  @AfterClass
+  public static void teardownClass() throws Exception {
+    HdfsTestUtil.teardownClass(dfsCluster);
+    dfsCluster = null;
+  }
+  
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    
+    // super class may hard code directory
+    useFactory("org.apache.solr.core.HdfsDirectoryFactory");
+  }
+
+  
+  @Override
+  protected String getDataDir(String dataDir) throws IOException {
+    return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
+  }
+
+
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
new file mode 100644
index 0000000..226df65
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsCollectionsAPIDistributedZkTest.java
@@ -0,0 +1,64 @@
+package org.apache.solr.cloud.hdfs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.cloud.CollectionsAPIDistributedZkTest;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+
+@Slow
+@Nightly
+@ThreadLeakScope(Scope.NONE) // hdfs client currently leaks thread(s)
+public class HdfsCollectionsAPIDistributedZkTest extends CollectionsAPIDistributedZkTest {
+  private static MiniDFSCluster dfsCluster;
+  
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    dfsCluster = HdfsTestUtil.setupClass(new File(TEMP_DIR,
+        HdfsCollectionsAPIDistributedZkTest.class.getName() + "_"
+            + System.currentTimeMillis()).getAbsolutePath());
+    
+    System.setProperty("solr.hdfs.home", dfsCluster.getURI().toString() + "/solr");
+    System.setProperty("solr.hdfs.blockcache.enabled", "false");
+    
+  }
+  
+  @AfterClass
+  public static void teardownClass() throws Exception {
+    HdfsTestUtil.teardownClass(dfsCluster);
+    System.clearProperty("solr.hdfs.home");
+    System.clearProperty("solr.hdfs.blockcache.enabled");
+    dfsCluster = null;
+  }
+
+  
+  @Override
+  protected String getDataDir(String dataDir) throws IOException {
+    return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
+  }
+
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java
new file mode 100644
index 0000000..c9ee82f
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoveryZkTest.java
@@ -0,0 +1,59 @@
+package org.apache.solr.cloud.hdfs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.cloud.RecoveryZkTest;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+
+@Slow
+@Nightly
+@ThreadLeakScope(Scope.NONE) // hdfs client currently leaks thread(s)
+public class HdfsRecoveryZkTest extends RecoveryZkTest {
+  private static MiniDFSCluster dfsCluster;
+  
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    dfsCluster = HdfsTestUtil.setupClass(new File(TEMP_DIR,
+        HdfsBasicDistributedZk2Test.class.getName() + "_"
+            + System.currentTimeMillis()).getAbsolutePath());
+    System.setProperty("solr.hdfs.blockcache.blocksperbank", "2048");
+  }
+  
+  @AfterClass
+  public static void teardownClass() throws Exception {
+    HdfsTestUtil.teardownClass(dfsCluster);
+    dfsCluster = null;
+  }
+
+  
+  @Override
+  protected String getDataDir(String dataDir) throws IOException {
+    return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
+  }
+
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsSyncSliceTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsSyncSliceTest.java
new file mode 100644
index 0000000..f0a73bd
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsSyncSliceTest.java
@@ -0,0 +1,66 @@
+package org.apache.solr.cloud.hdfs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Locale;
+
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.cloud.SyncSliceTest;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+
+@Slow
+@Nightly
+@ThreadLeakScope(Scope.NONE) // hdfs client currently leaks thread(s)
+public class HdfsSyncSliceTest extends SyncSliceTest {
+  private static MiniDFSCluster dfsCluster;
+  
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    dfsCluster = HdfsTestUtil.setupClass(new File(TEMP_DIR,
+        HdfsBasicDistributedZk2Test.class.getName() + "_"
+            + System.currentTimeMillis()).getAbsolutePath());
+  }
+  
+  @AfterClass
+  public static void teardownClass() throws Exception {
+    HdfsTestUtil.teardownClass(dfsCluster);
+    dfsCluster = null;
+  }
+  
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+
+  
+  @Override
+  protected String getDataDir(String dataDir) throws IOException {
+    return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
+  }
+
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java
new file mode 100644
index 0000000..3da83ea
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java
@@ -0,0 +1,93 @@
+package org.apache.solr.cloud.hdfs;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.util.Locale;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.util.Constants;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.SolrTestCaseJ4;
+import org.junit.Assert;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class HdfsTestUtil {
+  
+  private static Locale savedLocale;
+
+  public static MiniDFSCluster setupClass(String dataDir) throws Exception {
+    LuceneTestCase.assumeFalse("HDFS tests on Windows require Cygwin", Constants.WINDOWS);
+    LuceneTestCase.assumeFalse("HDFS do not work well with FreeBSD blackhole setup", Constants.FREE_BSD);
+   // LuceneTestCase.assumeFalse("HDFS tests on Windows require Cygwin", Constants.F);
+    File dir = new File(dataDir);
+    new File(dataDir).mkdirs();
+
+    savedLocale = Locale.getDefault();
+    // TODO: we HACK around HADOOP-9643
+    Locale.setDefault(Locale.ENGLISH);
+    
+    int dataNodes = 2;
+    
+    Configuration conf = new Configuration();
+    conf.set("dfs.block.access.token.enable", "false");
+    conf.set("dfs.permissions.enabled", "false");
+    conf.set("hadoop.security.authentication", "simple");
+    conf.set("hdfs.minidfs.basedir", dir.getAbsolutePath() + File.separator + "hdfsBaseDir");
+    conf.set("dfs.namenode.name.dir", dir.getAbsolutePath() + File.separator + "nameNodeNameDir");
+    
+    
+    System.setProperty("test.build.data", dir.getAbsolutePath() + File.separator + "hdfs" + File.separator + "build");
+    System.setProperty("test.cache.data", dir.getAbsolutePath() + File.separator + "hdfs" + File.separator + "cache");
+    System.setProperty("solr.lock.type", "hdfs");
+    
+    MiniDFSCluster dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);
+    
+    SolrTestCaseJ4.useFactory("org.apache.solr.core.HdfsDirectoryFactory");
+    
+    return dfsCluster;
+  }
+  
+  public static void teardownClass(MiniDFSCluster dfsCluster) throws Exception {
+    SolrTestCaseJ4.resetFactory();
+    System.clearProperty("solr.lock.type");
+    System.clearProperty("test.build.data");
+    System.clearProperty("test.cache.data");
+    if (dfsCluster != null) {
+      dfsCluster.shutdown();
+    }
+    
+    // TODO: we HACK around HADOOP-9643
+    if (savedLocale != null) {
+      Locale.setDefault(savedLocale);
+    }
+  }
+  
+  public static String getDataDir(MiniDFSCluster dfsCluster, String dataDir)
+      throws IOException {
+    URI uri = dfsCluster.getURI();
+    String dir = uri.toString()
+        + "/"
+        + new File(dataDir).toString().replaceAll(":", "_")
+            .replaceAll("/", "_");
+    return dir;
+  }
+
+}
diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java
new file mode 100644
index 0000000..bcb7580
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsUnloadDistributedZkTest.java
@@ -0,0 +1,61 @@
+package org.apache.solr.cloud.hdfs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.util.Constants;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.solr.cloud.UnloadDistributedZkTest;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import com.carrotsearch.randomizedtesting.annotations.Nightly;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+
+@Slow
+@Nightly
+@ThreadLeakScope(Scope.NONE) // hdfs client currently leaks thread(s)
+public class HdfsUnloadDistributedZkTest extends UnloadDistributedZkTest {
+  private static MiniDFSCluster dfsCluster;
+  
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    assumeFalse("FIXME: This test fails under Java 8 all the time, see SOLR-4711", Constants.JRE_IS_MINIMUM_JAVA8);
+    
+    dfsCluster = HdfsTestUtil.setupClass(new File(TEMP_DIR,
+        HdfsUnloadDistributedZkTest.class.getName() + "_"
+            + System.currentTimeMillis()).getAbsolutePath());
+  }
+  
+  @AfterClass
+  public static void teardownClass() throws Exception {
+    HdfsTestUtil.teardownClass(dfsCluster);
+    dfsCluster = null;
+  }
+
+  
+  @Override
+  protected String getDataDir(String dataDir) throws IOException {
+    return HdfsTestUtil.getDataDir(dfsCluster, dataDir);
+  }
+
+}
diff --git a/solr/core/src/test/org/apache/solr/core/AbstractBadConfigTestBase.java b/solr/core/src/test/org/apache/solr/core/AbstractBadConfigTestBase.java
index 97f294e..1ec2ff3 100644
--- a/solr/core/src/test/org/apache/solr/core/AbstractBadConfigTestBase.java
+++ b/solr/core/src/test/org/apache/solr/core/AbstractBadConfigTestBase.java
@@ -18,9 +18,8 @@
 package org.apache.solr.core;
 
 import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.common.SolrException;
-import org.apache.solr.common.SolrException.ErrorCode;
 
+import java.util.Map;
 import java.util.regex.Pattern;
 
 public abstract class AbstractBadConfigTestBase extends SolrTestCaseJ4 {
@@ -50,26 +49,37 @@
 
     ignoreException(Pattern.quote(errString));
     try {
+
       if (null == solrHome) {
         initCore( solrconfigFile, schemaFile );
       } else {
         initCore( solrconfigFile, schemaFile, solrHome );
       }
-    } catch (Exception e) {
-      for (Throwable t = e; t != null; t = t.getCause()) {
-        // short circuit out if we found what we expected
-        if (t.getMessage() != null && -1 != t.getMessage().indexOf(errString)) return;
-      }
 
-      // otherwise, rethrow it, possibly completley unrelated
-      throw new SolrException
-        (ErrorCode.SERVER_ERROR, 
-         "Unexpected error, expected error matching: " + errString, e);
-    } finally {
+      CoreContainer cc = h.getCoreContainer();
+      for (Map.Entry<String, Exception> entry : cc.getCoreInitFailures().entrySet()) {
+        if (matches(entry.getValue(), errString))
+          return;
+      }
+    }
+    catch (Exception e) {
+      if (matches(e, errString))
+        return;
+      throw e;
+    }
+    finally {
       deleteCore();
       resetExceptionIgnores();
     }
     fail("Did not encounter any exception from: " + solrconfigFile + " using " + schemaFile);
   }
 
+  private static boolean matches(Exception e, String errString) {
+    for (Throwable t = e; t != null; t = t.getCause()) {
+      if (t.getMessage() != null && -1 != t.getMessage().indexOf(errString))
+        return true;
+    }
+    return false;
+  }
+
 }
diff --git a/solr/core/src/test/org/apache/solr/core/CoreContainerCoreInitFailuresTest.java b/solr/core/src/test/org/apache/solr/core/CoreContainerCoreInitFailuresTest.java
index 1a5a55b..622cf4c 100644
--- a/solr/core/src/test/org/apache/solr/core/CoreContainerCoreInitFailuresTest.java
+++ b/solr/core/src/test/org/apache/solr/core/CoreContainerCoreInitFailuresTest.java
@@ -17,23 +17,17 @@
 
 package org.apache.solr.core;
 
-import java.util.Map;
-import java.util.Collection;
-import java.util.regex.Pattern;
-
-import java.io.File;
-
-import org.apache.solr.common.SolrException;
-import org.apache.solr.SolrTestCaseJ4;
-
-import org.apache.lucene.util.IOUtils;
-
 import org.apache.commons.io.FileUtils;
-
+import org.apache.lucene.util.IOUtils;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.common.SolrException;
+import org.junit.After;
 import org.xml.sax.SAXParseException;
 
-import org.junit.Before;
-import org.junit.After;
+import java.io.File;
+import java.util.Collection;
+import java.util.Map;
+import java.util.regex.Pattern;
 
 public class CoreContainerCoreInitFailuresTest extends SolrTestCaseJ4 {
   
@@ -44,8 +38,7 @@
     // would be nice to do this in an @Before method,
     // but junit doesn't let @Before methods have test names
     solrHome = new File(TEMP_DIR, this.getClass().getName() + "_" + dirSuffix);
-    assertTrue("Failed to mkdirs solrhome", solrHome.mkdirs());
-    cc = new CoreContainer(solrHome.getAbsolutePath());
+    assertTrue("Failed to mkdirs solrhome [" + solrHome + "]", solrHome.mkdirs());
   }
 
   @After
@@ -68,7 +61,7 @@
     Map<String,Exception> failures = null;
     Collection<String> cores = null;
     Exception fail = null;
-    
+
     init("empty_flow");
 
     // solr.xml
@@ -77,7 +70,8 @@
 
     // ----
     // init the CoreContainer
-    cc.load(solrHome.getAbsolutePath(), solrXml);
+    cc = new CoreContainer(solrHome.getAbsolutePath());
+    cc.load();
 
     // check that we have the cores we expect
     cores = cc.getCoreNames();
@@ -150,22 +144,23 @@
     FileUtils.write(solrXml, BAD_SOLR_XML, IOUtils.CHARSET_UTF_8.toString());
 
     // our "ok" collection
-    FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-basic.xml"),
+    FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-defaults.xml"),
                        FileUtils.getFile(solrHome, "col_ok", "conf", "solrconfig.xml"));
     FileUtils.copyFile(getFile("solr/collection1/conf/schema-minimal.xml"),
                        FileUtils.getFile(solrHome, "col_ok", "conf", "schema.xml"));
-    
+
     // our "bad" collection
     ignoreException(Pattern.quote("DummyMergePolicy"));
     FileUtils.copyFile(getFile("solr/collection1/conf/bad-mp-solrconfig.xml"),
                        FileUtils.getFile(solrHome, "col_bad", "conf", "solrconfig.xml"));
     FileUtils.copyFile(getFile("solr/collection1/conf/schema-minimal.xml"),
                        FileUtils.getFile(solrHome, "col_bad", "conf", "schema.xml"));
-    
-    
+
+
     // -----
     // init the  CoreContainer with the mix of ok/bad cores
-    cc.load(solrHome.getAbsolutePath(), solrXml);
+    cc = new CoreContainer(solrHome.getAbsolutePath());
+    cc.load();
     
     // check that we have the cores we expect
     cores = cc.getCoreNames();
@@ -198,7 +193,7 @@
 
     // -----
     // "fix" the bad collection
-    FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-basic.xml"),
+    FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-defaults.xml"),
                        FileUtils.getFile(solrHome, "col_bad", "conf", "solrconfig.xml"));
     final CoreDescriptor fixed = new CoreDescriptor(cc, "col_bad", "col_bad");
     cc.register("col_bad", cc.create(fixed), false);
@@ -293,8 +288,8 @@
       fail("corrupt solrconfig.xml failed to trigger exception from reload");
     } catch (SolrException e) {
       assertTrue("We're supposed to have a wrapped SAXParserException here, but we don't",
-          e.getCause() instanceof SAXParseException);
-      SAXParseException se = (SAXParseException)e.getCause();
+          e.getCause().getCause() instanceof SAXParseException);
+      SAXParseException se = (SAXParseException)e.getCause().getCause();
       assertTrue("reload exception doesn't refer to slrconfig.xml " + se.getSystemId(),
           0 < se.getSystemId().indexOf("solrconfig.xml"));
 
@@ -318,13 +313,13 @@
     fail = failures.get("col_bad");
     assertNotNull("null failure for test core", fail);
     assertTrue("init failure isn't SAXParseException",
-               fail instanceof SAXParseException);
+               fail.getCause() instanceof SAXParseException);
     assertTrue("init failure doesn't mention problem: " + fail.toString(),
-               0 < ((SAXParseException)fail).getSystemId().indexOf("solrconfig.xml"));
+               0 < ((SAXParseException)fail.getCause()).getSystemId().indexOf("solrconfig.xml"));
 
     // ----
     // fix col_bad's config (again) and RELOAD to fix failure
-    FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-basic.xml"),
+    FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-defaults.xml"),
                        FileUtils.getFile(solrHome, "col_bad", "conf", "solrconfig.xml"));
     cc.reload("col_bad");
     
diff --git a/solr/core/src/test/org/apache/solr/core/OpenCloseCoreStressTest.java b/solr/core/src/test/org/apache/solr/core/OpenCloseCoreStressTest.java
index 4ff58d3..3db0f61 100644
--- a/solr/core/src/test/org/apache/solr/core/OpenCloseCoreStressTest.java
+++ b/solr/core/src/test/org/apache/solr/core/OpenCloseCoreStressTest.java
@@ -207,9 +207,10 @@
         }
       } while (secondsRemaining > 0);
 
-      assertTrue("We didn't index any documents, somethings really messsed up", cumulativeDocs > 0);
+      assertTrue("We didn't index any documents, somethings really messed up", cumulativeDocs > 0);
     } catch (Exception e) {
       e.printStackTrace();
+      fail("Caught unexpected exception");
     }
   }
 
@@ -241,6 +242,8 @@
     FileUtils.copyFile(new File(testConf, "schema-tiny.xml"), new File(conf, "schema-tiny.xml"));
 
     FileUtils.copyFile(new File(testConf, "solrconfig-minimal.xml"), new File(conf, "solrconfig-minimal.xml"));
+    FileUtils.copyFile(new File(testConf, "solrconfig.snippet.randomindexconfig.xml"),
+        new File(conf, "solrconfig.snippet.randomindexconfig.xml"));
 
     if (!oldStyle) {
       FileUtils.copyFile(new File(testSrcRoot, "conf/core.properties"), new File(coreDir, "core.properties"));
@@ -479,7 +482,7 @@
     try {
       QueryResponse response = server.query(params);
       numFound = response.getResults().getNumFound();
-    } catch (SolrServerException e) {
+    } catch (Exception e) {
       e.printStackTrace();
     }
     return numFound;
diff --git a/solr/core/src/test/org/apache/solr/core/QueryResultKeyTest.java b/solr/core/src/test/org/apache/solr/core/QueryResultKeyTest.java
index 3702d25..8ad39d5 100644
--- a/solr/core/src/test/org/apache/solr/core/QueryResultKeyTest.java
+++ b/solr/core/src/test/org/apache/solr/core/QueryResultKeyTest.java
@@ -38,7 +38,7 @@
     // the hashcode should be the same even when the list
     // of filters is in a different order
     
-    Sort sort = new Sort(new SortField("test", SortField.Type.BYTE));
+    Sort sort = new Sort(new SortField("test", SortField.Type.INT));
     List<Query> filters = new ArrayList<Query>();
     filters.add(new TermQuery(new Term("test", "field")));
     filters.add(new TermQuery(new Term("test2", "field2")));
diff --git a/solr/core/src/test/org/apache/solr/core/SolrCoreCheckLockOnStartupTest.java b/solr/core/src/test/org/apache/solr/core/SolrCoreCheckLockOnStartupTest.java
index e69ce23..c1ec8f1 100644
--- a/solr/core/src/test/org/apache/solr/core/SolrCoreCheckLockOnStartupTest.java
+++ b/solr/core/src/test/org/apache/solr/core/SolrCoreCheckLockOnStartupTest.java
@@ -29,6 +29,7 @@
 import org.junit.Test;
 
 import java.io.File;
+import java.util.Map;
 
 public class SolrCoreCheckLockOnStartupTest extends SolrTestCaseJ4 {
 
@@ -62,13 +63,9 @@
     try {
       //opening a new core on the same index
       initCore("solrconfig-simplelock.xml", "schema.xml");
+      if (checkForCoreInitException(LockObtainFailedException.class))
+        return;
       fail("Expected " + LockObtainFailedException.class.getSimpleName());
-    } catch (Throwable t) {
-      assertTrue(t instanceof RuntimeException);
-      assertNotNull(t.getCause());
-      assertTrue(t.getCause() instanceof RuntimeException);
-      assertNotNull(t.getCause().getCause());
-      assertTrue(t.getCause().getCause().toString(), t.getCause().getCause() instanceof LockObtainFailedException);
     } finally {
       indexWriter.close();
       directory.close();
@@ -79,24 +76,33 @@
   @Test
   public void testNativeLockErrorOnStartup() throws Exception {
 
-    Directory directory = newFSDirectory(new File(dataDir, "index"), new NativeFSLockFactory());
+    File indexDir = new File(dataDir, "index");
+    log.info("Acquiring lock on {}", indexDir.getAbsolutePath());
+    Directory directory = newFSDirectory(indexDir, new NativeFSLockFactory());
     //creates a new IndexWriter without releasing the lock yet
     IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig(Version.LUCENE_40, null));
 
     try {
       //opening a new core on the same index
       initCore("solrconfig-nativelock.xml", "schema.xml");
+      CoreContainer cc = h.getCoreContainer();
+      if (checkForCoreInitException(LockObtainFailedException.class))
+        return;
       fail("Expected " + LockObtainFailedException.class.getSimpleName());
-    } catch(Throwable t) {
-      assertTrue(t instanceof RuntimeException);
-      assertNotNull(t.getCause());
-      assertTrue(t.getCause() instanceof RuntimeException);
-      assertNotNull(t.getCause().getCause());
-      assertTrue(t.getCause().getCause() instanceof  LockObtainFailedException);
     } finally {
       indexWriter.close();
       directory.close();
       deleteCore();
     }
   }
+
+  private boolean checkForCoreInitException(Class<? extends Exception> clazz) {
+    for (Map.Entry<String, Exception> entry : h.getCoreContainer().getCoreInitFailures().entrySet()) {
+      for (Throwable t = entry.getValue(); t != null; t = t.getCause()) {
+        if (clazz.isInstance(t))
+          return true;
+      }
+    }
+    return false;
+  }
 }
diff --git a/solr/core/src/test/org/apache/solr/core/TestArbitraryIndexDir.java b/solr/core/src/test/org/apache/solr/core/TestArbitraryIndexDir.java
index 5820597..2f21333 100644
--- a/solr/core/src/test/org/apache/solr/core/TestArbitraryIndexDir.java
+++ b/solr/core/src/test/org/apache/solr/core/TestArbitraryIndexDir.java
@@ -50,6 +50,7 @@
   static String savedFactory;
   @BeforeClass
   public static void beforeClass() {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     savedFactory = System.getProperty("solr.DirectoryFactory");
     System.setProperty("solr.directoryFactory", "org.apache.solr.core.MockFSDirectoryFactory");
   }
diff --git a/solr/core/src/test/org/apache/solr/core/TestConfig.java b/solr/core/src/test/org/apache/solr/core/TestConfig.java
index 6debff6..1e2398e 100644
--- a/solr/core/src/test/org/apache/solr/core/TestConfig.java
+++ b/solr/core/src/test/org/apache/solr/core/TestConfig.java
@@ -138,11 +138,26 @@
   // If defaults change, add test methods to cover each version
   @Test
   public void testDefaults() throws Exception {
+
+    SolrConfig sc = new SolrConfig(new SolrResourceLoader("solr/collection1"), "solrconfig-defaults.xml", null);
+    SolrIndexConfig sic = sc.indexConfig;
+    assertEquals("default ramBufferSizeMB", 100.0D, sic.ramBufferSizeMB, 0.0D);
+    assertEquals("default LockType", SolrIndexConfig.LOCK_TYPE_NATIVE, sic.lockType);
+    assertEquals("default useCompoundFile", false, sic.useCompoundFile);
+
+  }
+
+
+  // sanity check that sys propertis are working as expected
+  public void testSanityCheckTestSysPropsAreUsed() throws Exception {
+    final boolean expectCFS 
+      = Boolean.parseBoolean(System.getProperty("useCompoundFile"));
+
     SolrConfig sc = new SolrConfig(new SolrResourceLoader("solr/collection1"), "solrconfig-basic.xml", null);
     SolrIndexConfig sic = sc.indexConfig;
-    assertTrue("default ramBufferSizeMB should be 100", sic.ramBufferSizeMB == 100);
-    assertTrue("default useCompoundFile should be false", sic.useCompoundFile == false);
-    assertTrue("default LockType should be native", sic.lockType.equals(SolrIndexConfig.LOCK_TYPE_NATIVE));
+    assertEquals("default ramBufferSizeMB", 100.0D, sic.ramBufferSizeMB, 0.0D);
+    assertEquals("default LockType", SolrIndexConfig.LOCK_TYPE_NATIVE, sic.lockType);
+    assertEquals("useCompoundFile sysprop", expectCFS, sic.useCompoundFile);
   }
 
 }
diff --git a/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java b/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java
index d56f2bf..259566e 100644
--- a/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java
+++ b/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java
@@ -17,6 +17,16 @@
 
 package org.apache.solr.core;
 
+import org.apache.commons.io.FileUtils;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util._TestUtil;
+import org.apache.solr.SolrTestCaseJ4;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.xml.sax.SAXException;
+
+import javax.xml.parsers.ParserConfigurationException;
 import java.io.BufferedWriter;
 import java.io.File;
 import java.io.FileOutputStream;
@@ -27,17 +37,6 @@
 import java.util.jar.JarEntry;
 import java.util.jar.JarOutputStream;
 
-import javax.xml.parsers.ParserConfigurationException;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util._TestUtil;
-import org.apache.solr.SolrTestCaseJ4;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.xml.sax.SAXException;
-
 public class TestCoreContainer extends SolrTestCaseJ4 {
 
   private static String oldSolrHome;
@@ -70,9 +69,10 @@
     assertTrue("Failed to mkdirs workDir", solrHomeDirectory.mkdirs());
 
     FileUtils.copyDirectory(new File(SolrTestCaseJ4.TEST_HOME()), solrHomeDirectory);
+    System.out.println("Using solrconfig from " + new File(SolrTestCaseJ4.TEST_HOME()).getAbsolutePath());
 
     CoreContainer ret = new CoreContainer(solrHomeDirectory.getAbsolutePath());
-    ret.load(solrHomeDirectory.getAbsolutePath(), new File(solrHomeDirectory, "solr.xml"));
+    ret.load();
     return ret;
   }
 
@@ -161,13 +161,13 @@
       SolrCore template = null;
       try {
         template = cores.getCore("collection1");
-        instDir = template.getCoreDescriptor().getInstanceDir();
+        instDir = template.getCoreDescriptor().getRawInstanceDir();
       } finally {
         if (null != template) template.close();
       }
     }
     
-    final File instDirFile = new File(instDir);
+    final File instDirFile = new File(cores.getSolrHome(), instDir);
     assertTrue("instDir doesn't exist: " + instDir, instDirFile.exists());
     
     // sanity check the basic persistence of the default init
@@ -262,14 +262,8 @@
     File solrHomeDirectory = new File(TEMP_DIR, this.getClass().getName()
         + "_noCores");
     SetUpHome(solrHomeDirectory, EMPTY_SOLR_XML);
-    CoreContainer.Initializer init = new CoreContainer.Initializer();
-    CoreContainer cores = null;
-    try {
-      cores = init.initialize();
-    }
-    catch(Exception e) {
-      fail("CoreContainer not created" + e.getMessage());
-    }
+    CoreContainer cores = new CoreContainer(solrHomeDirectory.getAbsolutePath());
+    cores.load();
     try {
       //assert zero cores
       assertEquals("There should not be cores", 0, cores.getCores().size());
@@ -362,24 +356,21 @@
     FileUtils.writeStringToFile(new File(tmpRoot, "explicit-lib-solr.xml"), "<solr sharedLib=\"lib\"><cores/></solr>", "UTF-8");
     FileUtils.writeStringToFile(new File(tmpRoot, "custom-lib-solr.xml"), "<solr sharedLib=\"customLib\"><cores/></solr>", "UTF-8");
 
-    final CoreContainer cc1 = new CoreContainer(tmpRoot.getAbsolutePath());
-    cc1.load(tmpRoot.getAbsolutePath(), new File(tmpRoot, "default-lib-solr.xml"));
+    final CoreContainer cc1 = CoreContainer.createAndLoad(tmpRoot.getAbsolutePath(), new File(tmpRoot, "default-lib-solr.xml"));
     try {
       cc1.loader.openResource("defaultSharedLibFile").close();
     } finally {
       cc1.shutdown();
     }
 
-    final CoreContainer cc2 = new CoreContainer(tmpRoot.getAbsolutePath());
-    cc2.load(tmpRoot.getAbsolutePath(), new File(tmpRoot, "explicit-lib-solr.xml"));
+    final CoreContainer cc2 = CoreContainer.createAndLoad(tmpRoot.getAbsolutePath(), new File(tmpRoot, "explicit-lib-solr.xml"));
     try {
       cc2.loader.openResource("defaultSharedLibFile").close();
     } finally {
       cc2.shutdown();
     }
 
-    final CoreContainer cc3 = new CoreContainer(tmpRoot.getAbsolutePath());
-    cc3.load(tmpRoot.getAbsolutePath(), new File(tmpRoot, "custom-lib-solr.xml"));
+    final CoreContainer cc3 = CoreContainer.createAndLoad(tmpRoot.getAbsolutePath(), new File(tmpRoot, "custom-lib-solr.xml"));
     try {
       cc3.loader.openResource("customSharedLibFile").close();
     } finally {
@@ -392,22 +383,4 @@
       "  <cores adminPath=\"/admin/cores\" transientCacheSize=\"32\" >\n" +
       "  </cores>\n" +
       "</solr>";
-
-  private static final String SOLR_XML_SAME_NAME ="<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n" +
-      "<solr persistent=\"false\">\n" +
-      "  <cores adminPath=\"/admin/cores\" transientCacheSize=\"32\" >\n" +
-      "    <core name=\"core1\" instanceDir=\"core1\" dataDir=\"core1\"/> \n" +
-      "    <core name=\"core1\" instanceDir=\"core2\" dataDir=\"core2\"/> \n " +
-      "  </cores>\n" +
-      "</solr>";
-
-  private static final String SOLR_XML_SAME_DATADIR ="<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n" +
-      "<solr persistent=\"false\">\n" +
-      "  <cores adminPath=\"/admin/cores\" transientCacheSize=\"32\" >\n" +
-      "    <core name=\"core2\" instanceDir=\"core2\" dataDir=\"../samedatadir\" schema=\"schema-tiny.xml\" config=\"solrconfig-minimal.xml\" /> \n" +
-      "    <core name=\"core1\" instanceDir=\"core2\" dataDir=\"../samedatadir\" schema=\"schema-tiny.xml\" config=\"solrconfig-minimal.xml\"  /> \n " +
-      "  </cores>\n" +
-      "</solr>";
-
-
 }
diff --git a/solr/core/src/test/org/apache/solr/core/TestCoreDiscovery.java b/solr/core/src/test/org/apache/solr/core/TestCoreDiscovery.java
index dd4973a..c71a4d9 100644
--- a/solr/core/src/test/org/apache/solr/core/TestCoreDiscovery.java
+++ b/solr/core/src/test/org/apache/solr/core/TestCoreDiscovery.java
@@ -17,10 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.File;
-import java.io.FileOutputStream;
-import java.util.Properties;
-
 import org.apache.commons.io.FileUtils;
 import org.apache.lucene.util.IOUtils;
 import org.apache.solr.SolrTestCaseJ4;
@@ -29,6 +25,10 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import java.io.File;
+import java.io.FileOutputStream;
+import java.util.Properties;
+
 public class TestCoreDiscovery extends SolrTestCaseJ4 {
 
   @BeforeClass
@@ -102,14 +102,12 @@
     assertTrue("Failed to mkdirs for " + confDir.getAbsolutePath(), confDir.mkdirs());
     FileUtils.copyFile(new File(top, "schema-tiny.xml"), new File(confDir, "schema-tiny.xml"));
     FileUtils.copyFile(new File(top, "solrconfig-minimal.xml"), new File(confDir, "solrconfig-minimal.xml"));
+    FileUtils.copyFile(new File(top, "solrconfig.snippet.randomindexconfig.xml"), new File(confDir, "solrconfig.snippet.randomindexconfig.xml"));
   }
 
   private CoreContainer init() throws Exception {
-
-    CoreContainer.Initializer init = new CoreContainer.Initializer();
-
-    final CoreContainer cores = init.initialize();
-    cores.setPersistent(false);
+    final CoreContainer cores = new CoreContainer();
+    cores.load();
     return cores;
   }
 
@@ -182,10 +180,14 @@
       cc = init();
       fail("Should have thrown exception in testDuplicateNames");
     } catch (SolrException se) {
+      Throwable cause = se.getCause();
+      String message = cause.getMessage();
       assertTrue("Should have seen an exception because two cores had the same name",
-          "Core  + desc.getName() + \" defined twice".indexOf(se.getMessage()) != -1);
-      assertTrue("/core1 should have been mentioned in the message", "/core1".indexOf(se.getMessage()) != -1);
-      assertTrue("/core2 should have been mentioned in the message", "/core2".indexOf(se.getMessage()) != -1);
+          message.indexOf("Core core1 defined more than once") != -1);
+      assertTrue(File.separator + "core1 should have been mentioned in the message: " + message,
+          message.indexOf(File.separator + "core1") != -1);
+      assertTrue(File.separator + "core2 should have been mentioned in the message:" + message,
+          message.indexOf(File.separator + "core2") != -1);
     } finally {
       if (cc != null) {
         cc.shutdown();
diff --git a/solr/core/src/test/org/apache/solr/core/TestInfoStreamLogging.java b/solr/core/src/test/org/apache/solr/core/TestInfoStreamLogging.java
new file mode 100644
index 0000000..fa6bc3f
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/core/TestInfoStreamLogging.java
@@ -0,0 +1,38 @@
+package org.apache.solr.core;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.util.PrintStreamInfoStream;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.update.LoggingInfoStream;
+import org.junit.BeforeClass;
+
+public class TestInfoStreamLogging extends SolrTestCaseJ4 {
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("solrconfig-infostream-logging.xml","schema.xml");
+  }
+  
+  public void testIndexConfig() throws Exception {
+    IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore().getLatestSchema());
+
+    assertTrue(iwc.getInfoStream() instanceof LoggingInfoStream);
+  }
+}
diff --git a/solr/core/src/test/org/apache/solr/core/TestLazyCores.java b/solr/core/src/test/org/apache/solr/core/TestLazyCores.java
index 7c17bf7..e4c2105 100644
--- a/solr/core/src/test/org/apache/solr/core/TestLazyCores.java
+++ b/solr/core/src/test/org/apache/solr/core/TestLazyCores.java
@@ -66,7 +66,7 @@
     File solrXml = new File(solrHomeDirectory, "solr.xml");
     FileUtils.write(solrXml, LOTS_SOLR_XML, IOUtils.CHARSET_UTF_8.toString());
     final CoreContainer cores = new CoreContainer(solrHomeDirectory.getAbsolutePath());
-    cores.load(solrHomeDirectory.getAbsolutePath(), solrXml);
+    cores.load();
     //  h.getCoreContainer().load(solrHomeDirectory.getAbsolutePath(), new File(solrHomeDirectory, "solr.xml"));
 
     cores.setPersistent(false);
@@ -285,8 +285,8 @@
           CoreAdminParams.CoreAdminAction.CREATE.toString(),
           CoreAdminParams.DATA_DIR, dataDir,
           CoreAdminParams.NAME, name,
-          "schema", "schema-tiny.xml",
-          "config", "solrconfig-minimal.xml");
+          "schema", "schema.xml",
+          "config", "solrconfig.xml");
 
       admin.handleRequestBody(request, resp);
       fail("Should have thrown an error");
@@ -347,29 +347,29 @@
       CoreDescriptor d1 = new CoreDescriptor(cc, "core1", "./core1");
       d1.setTransient(true);
       d1.setLoadOnStartup(true);
-      d1.setSchemaName("schema-tiny.xml");
-      d1.setConfigName("solrconfig-minimal.xml");
+      d1.setSchemaName("schema.xml");
+      d1.setConfigName("solrconfig.xml");
       SolrCore core1 = cc.create(d1);
 
       CoreDescriptor d2 = new CoreDescriptor(cc, "core2", "./core2");
       d2.setTransient(true);
       d2.setLoadOnStartup(false);
-      d2.setSchemaName("schema-tiny.xml");
-      d2.setConfigName("solrconfig-minimal.xml");
+      d2.setSchemaName("schema.xml");
+      d2.setConfigName("solrconfig.xml");
       SolrCore core2 = cc.create(d2);
 
       CoreDescriptor d3 = new CoreDescriptor(cc, "core3", "./core3");
       d3.setTransient(false);
       d3.setLoadOnStartup(true);
-      d3.setSchemaName("schema-tiny.xml");
-      d3.setConfigName("solrconfig-minimal.xml");
+      d3.setSchemaName("schema.xml");
+      d3.setConfigName("solrconfig.xml");
       SolrCore core3 = cc.create(d3);
 
       CoreDescriptor d4 = new CoreDescriptor(cc, "core4", "./core4");
       d4.setTransient(false);
       d4.setLoadOnStartup(false);
-      d4.setSchemaName("schema-tiny.xml");
-      d4.setConfigName("solrconfig-minimal.xml");
+      d4.setSchemaName("schema.xml");
+      d4.setConfigName("solrconfig.xml");
       SolrCore core4 = cc.create(d4);
 
       final File oneXml = new File(solrHomeDirectory, "lazy1.solr.xml");
@@ -455,31 +455,23 @@
 
   private final static String LOTS_SOLR_XML = " <solr persistent=\"false\"> " +
       "<cores adminPath=\"/admin/cores\" defaultCoreName=\"collectionLazy2\" transientCacheSize=\"4\">  " +
-      "<core name=\"collection1\" instanceDir=\"collection1\" config=\"solrconfig-minimal.xml\" schema=\"schema-tiny.xml\" /> " +
+      "<core name=\"collection1\" instanceDir=\"collection1\"  /> " +
 
-      "<core name=\"collectionLazy2\" instanceDir=\"collection2\" transient=\"true\" loadOnStartup=\"true\"  " +
-      " config=\"solrconfig-minimal.xml\" schema=\"schema-tiny.xml\" /> " +
+      "<core name=\"collectionLazy2\" instanceDir=\"collection2\" transient=\"true\" loadOnStartup=\"true\"   /> " +
 
-      "<core name=\"collectionLazy3\" instanceDir=\"collection3\" transient=\"on\" loadOnStartup=\"false\" " +
-      "config=\"solrconfig-minimal.xml\" schema=\"schema-tiny.xml\"  /> " +
+      "<core name=\"collectionLazy3\" instanceDir=\"collection3\" transient=\"on\" loadOnStartup=\"false\"    /> " +
 
-      "<core name=\"collectionLazy4\" instanceDir=\"collection4\" transient=\"false\" loadOnStartup=\"false\" " +
-      "config=\"solrconfig-minimal.xml\" schema=\"schema-tiny.xml\"  /> " +
+      "<core name=\"collectionLazy4\" instanceDir=\"collection4\" transient=\"false\" loadOnStartup=\"false\" /> " +
 
-      "<core name=\"collectionLazy5\" instanceDir=\"collection5\" transient=\"false\" loadOnStartup=\"true\" " +
-      "config=\"solrconfig-minimal.xml\" schema=\"schema-tiny.xml\"  /> " +
+      "<core name=\"collectionLazy5\" instanceDir=\"collection5\" transient=\"false\" loadOnStartup=\"true\" /> " +
 
-      "<core name=\"collectionLazy6\" instanceDir=\"collection6\" transient=\"true\" loadOnStartup=\"false\" " +
-      "config=\"solrconfig-minimal.xml\" schema=\"schema-tiny.xml\"  /> " +
+      "<core name=\"collectionLazy6\" instanceDir=\"collection6\" transient=\"true\" loadOnStartup=\"false\" /> " +
 
-      "<core name=\"collectionLazy7\" instanceDir=\"collection7\" transient=\"true\" loadOnStartup=\"false\" " +
-      "config=\"solrconfig-minimal.xml\" schema=\"schema-tiny.xml\"  /> " +
+      "<core name=\"collectionLazy7\" instanceDir=\"collection7\" transient=\"true\" loadOnStartup=\"false\" /> " +
 
-      "<core name=\"collectionLazy8\" instanceDir=\"collection8\" transient=\"true\" loadOnStartup=\"false\" " +
-      "config=\"solrconfig-minimal.xml\" schema=\"schema-tiny.xml\"  /> " +
+      "<core name=\"collectionLazy8\" instanceDir=\"collection8\" transient=\"true\" loadOnStartup=\"false\" /> " +
 
-      "<core name=\"collectionLazy9\" instanceDir=\"collection9\" transient=\"true\" loadOnStartup=\"false\" " +
-      "config=\"solrconfig-minimal.xml\" schema=\"schema-tiny.xml\"  /> " +
+      "<core name=\"collectionLazy9\" instanceDir=\"collection9\" transient=\"true\" loadOnStartup=\"false\" /> " +
 
       "</cores> " +
       "</solr>";
diff --git a/solr/core/src/test/org/apache/solr/core/TestMergePolicyConfig.java b/solr/core/src/test/org/apache/solr/core/TestMergePolicyConfig.java
index ba36777..b47eef5 100644
--- a/solr/core/src/test/org/apache/solr/core/TestMergePolicyConfig.java
+++ b/solr/core/src/test/org/apache/solr/core/TestMergePolicyConfig.java
@@ -17,37 +17,173 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.AtomicReaderContext;
+import org.apache.lucene.index.SegmentReader;
+import org.apache.lucene.index.SegmentInfo;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.MergePolicy;
 import org.apache.lucene.index.TieredMergePolicy;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.util.RefCounted;
+import org.apache.solr.search.SolrIndexSearcher;
 import org.apache.solr.SolrTestCaseJ4;
-import org.junit.BeforeClass;
+import org.junit.After;
+import java.util.concurrent.atomic.AtomicInteger;
 
 public class TestMergePolicyConfig extends SolrTestCaseJ4 {
+  
+  private static AtomicInteger docIdCounter = new AtomicInteger(42);
 
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    initCore("solrconfig-mergepolicy.xml","schema.xml");
+  @After
+  public void after() throws Exception {
+    deleteCore();
+  }
+
+  public void testDefaultMergePolicyConfig() throws Exception {
+    initCore("solrconfig-mergepolicy-defaults.xml","schema-minimal.xml");
+    IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore().getLatestSchema());
+    assertEquals(false, iwc.getUseCompoundFile());
+
+    TieredMergePolicy tieredMP = assertAndCast(TieredMergePolicy.class,
+                                               iwc.getMergePolicy());
+    assertEquals(0.0D, tieredMP.getNoCFSRatio(), 0.0D);
+
+    assertCommitSomeNewDocs();
+    assertCompoundSegments(h.getCore(), false);
+  }
+
+  public void testLegacyMergePolicyConfig() throws Exception {
+    final boolean expectCFS 
+      = Boolean.parseBoolean(System.getProperty("useCompoundFile"));
+
+    initCore("solrconfig-mergepolicy-legacy.xml","schema-minimal.xml");
+    IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore().getLatestSchema());
+    assertEquals(expectCFS, iwc.getUseCompoundFile());
+
+
+    TieredMergePolicy tieredMP = assertAndCast(TieredMergePolicy.class,
+                                               iwc.getMergePolicy());
+
+    assertEquals(7, tieredMP.getMaxMergeAtOnce());
+    assertEquals(7.0D, tieredMP.getSegmentsPerTier(), 0.0D);
+    assertEquals(expectCFS ? 1.0D : 0.0D, tieredMP.getNoCFSRatio(), 0.0D);
+
+    assertCommitSomeNewDocs();
+    assertCompoundSegments(h.getCore(), expectCFS);
   }
   
   public void testTieredMergePolicyConfig() throws Exception {
-    IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore().getLatestSchema());
-    MergePolicy mp = iwc.getMergePolicy();
-    assertTrue(mp instanceof TieredMergePolicy);
-    TieredMergePolicy tieredMP = (TieredMergePolicy) mp;
+    final boolean expectCFS 
+      = Boolean.parseBoolean(System.getProperty("useCompoundFile"));
 
-    // mp-specific setter
-    assertEquals(19, tieredMP.getMaxMergeAtOnceExplicit());
-    
-    // make sure we apply compoundFile and mergeFactor
-    assertEquals(false, tieredMP.getUseCompoundFile());
+    initCore("solrconfig-mergepolicy.xml","schema-minimal.xml");
+    IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore().getLatestSchema());
+    assertEquals(expectCFS, iwc.getUseCompoundFile());
+
+
+    TieredMergePolicy tieredMP = assertAndCast(TieredMergePolicy.class,
+                                               iwc.getMergePolicy());
+
+    // set by legacy <mergeFactor> setting
     assertEquals(7, tieredMP.getMaxMergeAtOnce());
     
-    // make sure we overrode segmentsPerTier (split from maxMergeAtOnce out of mergeFactor)
+    // mp-specific setters
+    assertEquals(19, tieredMP.getMaxMergeAtOnceExplicit());
+    assertEquals(0.1D, tieredMP.getNoCFSRatio(), 0.0D);
+    // make sure we overrode segmentsPerTier 
+    // (split from maxMergeAtOnce out of mergeFactor)
     assertEquals(9D, tieredMP.getSegmentsPerTier(), 0.001);
     
-    // make sure we overrode noCFSRatio (useless because we disabled useCompoundFile,
-    // but just to make sure it works)
-    assertEquals(1.0D, tieredMP.getNoCFSRatio(), 0.001);
+    assertCommitSomeNewDocs();
+    // even though we have a single segment (which is 100% of the size of 
+    // the index which is higher then our 0.6D threashold) the
+    // compound ratio doesn't matter because the segment was never merged
+    assertCompoundSegments(h.getCore(), expectCFS);
+
+    assertCommitSomeNewDocs();
+    assertNumSegments(h.getCore(), 2);
+    assertCompoundSegments(h.getCore(), expectCFS);
+
+    assertU(optimize());
+    assertNumSegments(h.getCore(), 1);
+    // we've now forced a merge, and the MP ratio should be in play
+    assertCompoundSegments(h.getCore(), false);
   }
+
+  /**
+   * Given a Type and an object asserts that the object is non-null and an 
+   * instance of the specified Type.  The object is then cast to that type and 
+   * returned.
+   */
+  public static <T> T assertAndCast(Class<? extends T> clazz, Object o) {
+    assertNotNull(clazz);
+    assertNotNull(o);
+    assertTrue(clazz.isInstance(o));
+    return clazz.cast(o);
+  }
+
+  public static void assertCommitSomeNewDocs() {
+    for (int i = 0; i < 5; i++) {
+      int val = docIdCounter.getAndIncrement();
+      assertU(adoc("id", "" + val,
+                   "a_s", val + "_" + val + "_" + val + "_" + val,
+                   "b_s", val + "_" + val + "_" + val + "_" + val,
+                   "c_s", val + "_" + val + "_" + val + "_" + val,
+                   "d_s", val + "_" + val + "_" + val + "_" + val,
+                   "e_s", val + "_" + val + "_" + val + "_" + val,
+                   "f_s", val + "_" + val + "_" + val + "_" + val));
+    }
+    assertU(commit());
+  }
+
+  /**
+   * Given an SolrCore, asserts that the number of leave segments in 
+   * the index reader matches the expected value.
+   */
+  public static void assertNumSegments(SolrCore core, int expected) {
+    RefCounted<SolrIndexSearcher> searcherRef = core.getRegisteredSearcher();
+    try {
+      assertEquals(expected, searcherRef.get().getIndexReader().leaves().size());
+    } finally {
+      searcherRef.decref();
+    }
+  }
+
+  /**
+   * Given an SolrCore, asserts that each segment in the (searchable) index 
+   * has a compound file status that matches the expected input.
+   */
+  public static void assertCompoundSegments(SolrCore core, boolean compound) {
+    RefCounted<SolrIndexSearcher> searcherRef = core.getRegisteredSearcher();
+    try {
+      assertCompoundSegments(searcherRef.get().getIndexReader(), compound);
+    } finally {
+      searcherRef.decref();
+    }
+  }
+
+  /**
+   * Given an IndexReader, asserts that there is at least one AtomcReader leaf,
+   * and that all AtomicReader leaves are SegmentReader's that have a compound 
+   * file status that matches the expected input.
+   */
+  private static void assertCompoundSegments(IndexReader reader, 
+                                             boolean compound) {
+
+    assertNotNull("Null leaves", reader.leaves());
+    assertTrue("no leaves", 0 < reader.leaves().size());
+
+    for (AtomicReaderContext atomic : reader.leaves()) {
+      assertTrue("not a segment reader: " + atomic.reader().toString(), 
+                 atomic.reader() instanceof SegmentReader);
+      
+      assertEquals("Compound status incorrect for: " + 
+                   atomic.reader().toString(),
+                   compound,
+                   ((SegmentReader)atomic.reader()).getSegmentInfo().info.getUseCompoundFile());
+    }
+  }
+
 }
diff --git a/solr/core/src/test/org/apache/solr/core/TestReloadAndDeleteDocs.java b/solr/core/src/test/org/apache/solr/core/TestReloadAndDeleteDocs.java
new file mode 100644
index 0000000..d22c437
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/core/TestReloadAndDeleteDocs.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.core;
+
+import org.apache.solr.SolrTestCaseJ4;
+import org.junit.After;
+
+/** Inspired by SOLR-4858 */
+public class TestReloadAndDeleteDocs extends SolrTestCaseJ4 {
+  
+  @After
+  public void after() throws Exception {
+    System.clearProperty("enable.update.log");
+    deleteCore();
+  }
+
+  public void testReloadAndDeleteDocsNoUpdateLog() throws Exception {
+    doTest(false);
+  }
+
+  public void testReloadAndDeleteDocsWithUpdateLog() throws Exception {
+    doTest(true);
+  }
+
+  private void doTest(final boolean useUpdateLog) throws Exception {
+    System.setProperty("enable.update.log", useUpdateLog ? "true" : "false");
+    initCore("solrconfig.xml", "schema.xml", TEST_HOME());
+    assertEquals("UpdateLog existence doesn't match sys prop (test config changed?)",
+                 useUpdateLog,
+                 null != h.getCore().getUpdateHandler().getUpdateLog());
+    h.reload();
+    assertU("<delete><query>*:*</query></delete>");
+  }
+}
diff --git a/solr/core/src/test/org/apache/solr/core/TestShardHandlerFactory.java b/solr/core/src/test/org/apache/solr/core/TestShardHandlerFactory.java
index d2e0332..0f5fbca 100644
--- a/solr/core/src/test/org/apache/solr/core/TestShardHandlerFactory.java
+++ b/solr/core/src/test/org/apache/solr/core/TestShardHandlerFactory.java
@@ -29,8 +29,7 @@
 public class TestShardHandlerFactory extends SolrTestCaseJ4 {
 
   public void testXML() throws Exception {
-    CoreContainer cc = new CoreContainer(TEST_HOME());
-    cc.load(TEST_HOME(), new File(TEST_HOME(), "solr-shardhandler.xml"));
+    CoreContainer cc = CoreContainer.createAndLoad(TEST_HOME(), new File(TEST_HOME(), "solr-shardhandler.xml"));
     ShardHandlerFactory factory = cc.getShardHandlerFactory();
     assertTrue(factory instanceof MockShardHandlerFactory);
     NamedList args = ((MockShardHandlerFactory)factory).args;
diff --git a/solr/core/src/test/org/apache/solr/core/TestSolrIndexConfig.java b/solr/core/src/test/org/apache/solr/core/TestSolrIndexConfig.java
index 9fa3af1..e32e610 100644
--- a/solr/core/src/test/org/apache/solr/core/TestSolrIndexConfig.java
+++ b/solr/core/src/test/org/apache/solr/core/TestSolrIndexConfig.java
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.update.LoggingInfoStream;
 import org.junit.BeforeClass;
 
 public class TestSolrIndexConfig extends SolrTestCaseJ4 {
@@ -32,5 +33,6 @@
     IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore().getLatestSchema());
 
     assertEquals(123, iwc.getMaxThreadStates());
+    assertTrue(iwc.getInfoStream() instanceof LoggingInfoStream);
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/core/TestSolrXMLSerializer.java b/solr/core/src/test/org/apache/solr/core/TestSolrXMLSerializer.java
index 2aa88f0..2c4249e 100644
--- a/solr/core/src/test/org/apache/solr/core/TestSolrXMLSerializer.java
+++ b/solr/core/src/test/org/apache/solr/core/TestSolrXMLSerializer.java
@@ -143,6 +143,11 @@
     solrXMLDef.containerProperties = containerProperties ;
     solrXMLDef.solrAttribs = rootSolrAttribs;
     solrXMLDef.coresAttribs = coresAttribs;
+    solrXMLDef.loggingAttribs = new HashMap<String, String>();
+    solrXMLDef.shardHandlerProps = new HashMap<String, String>();
+    solrXMLDef.shardHandlerAttribs = new HashMap<String, String>();
+    solrXMLDef.loggingAttribs = new HashMap<String, String>();
+    solrXMLDef.watcherAttribs = new HashMap<String, String>();
     return solrXMLDef;
   }
   
diff --git a/solr/core/src/test/org/apache/solr/core/TestSolrXml.java b/solr/core/src/test/org/apache/solr/core/TestSolrXml.java
index 09cac4f..aa51aa8 100644
--- a/solr/core/src/test/org/apache/solr/core/TestSolrXml.java
+++ b/solr/core/src/test/org/apache/solr/core/TestSolrXml.java
@@ -44,7 +44,7 @@
       boolean oldStyle = (config.getNode("solr/cores", false) != null);
       ConfigSolr cfg;
       if (oldStyle) {
-        cfg = new ConfigSolrXmlOld(config, cc);
+        cfg = new ConfigSolrXmlOld(config);
       } else {
         cfg = new ConfigSolrXml(config, cc);
       }
@@ -103,7 +103,7 @@
       boolean oldStyle = (config.getNode("solr/cores", false) != null);
       ConfigSolr cfg;
       if (oldStyle) {
-        cfg = new ConfigSolrXmlOld(config, cc);
+        cfg = new ConfigSolrXmlOld(config);
       } else {
         cfg = new ConfigSolrXml(config, cc);
       }
diff --git a/solr/core/src/test/org/apache/solr/core/TestSolrXmlPersistence.java b/solr/core/src/test/org/apache/solr/core/TestSolrXmlPersistence.java
new file mode 100644
index 0000000..1a7210a
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/core/TestSolrXmlPersistence.java
@@ -0,0 +1,638 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.core;
+
+import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.lucene.util.IOUtils;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.common.params.CoreAdminParams;
+import org.apache.solr.handler.admin.CoreAdminHandler;
+import org.apache.solr.response.SolrQueryResponse;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.RuleChain;
+import org.junit.rules.TestRule;
+import org.w3c.dom.Document;
+import org.w3c.dom.NamedNodeMap;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.xml.sax.SAXException;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class TestSolrXmlPersistence extends SolrTestCaseJ4 {
+
+  private File solrHomeDirectory = new File(TEMP_DIR, this.getClass().getName());
+
+  /*
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("solrconfig-minimal.xml", "schema-tiny.xml");
+  }
+  */
+
+  @Rule
+  public TestRule solrTestRules =
+      RuleChain.outerRule(new SystemPropertiesRestoreRule());
+
+
+  private CoreContainer init(String solrXmlString, String... subDirs) throws Exception {
+
+    createTempDir();
+    solrHomeDirectory = dataDir;
+
+    for (String s : subDirs) {
+      copyMinConf(new File(solrHomeDirectory, s));
+    }
+
+    File solrXml = new File(solrHomeDirectory, "solr.xml");
+    FileUtils.write(solrXml, solrXmlString, IOUtils.CHARSET_UTF_8.toString());
+
+    final CoreContainer cores = createCoreContainer(solrHomeDirectory.getAbsolutePath(), solrXmlString);
+    return cores;
+  }
+
+
+  // take a solr.xml with system vars in <solr>, <cores> and <core> and <core/properties> tags that have system
+  // variables defined. Insure that after persisting solr.xml, they're all still there as ${} syntax.
+  // Also insure that nothing extra crept in.
+  @Test
+  public void testSystemVars() throws Exception {
+    //Set these system props in order to insure that we don't write out the values rather than the ${} syntax.
+    System.setProperty("solr.zkclienttimeout", "93");
+    System.setProperty("solrconfig", "solrconfig.xml");
+    System.setProperty("schema", "schema.xml");
+    System.setProperty("zkHostSet", "localhost:9983");
+
+    CoreContainer cc = init(SOLR_XML_LOTS_SYSVARS, "SystemVars1", "SystemVars2");
+    try {
+
+      // This seems odd, but it's just a little self check to see if the comparison strings are being created correctly
+      persistContainedInOrig(cc, new File(solrHomeDirectory, "solr_copy.xml"));
+
+      // Is everything in the persisted file identical to the original?
+      final File persistXml = new File(solrHomeDirectory, "sysvars.solr.xml");
+      // Side effect here is that the new file is persisted and available later.
+      persistContainedInOrig(cc, persistXml);
+
+      // Is everything in the original contained in the persisted one?
+      assertXmlFile(persistXml, getAllNodes(new File(solrHomeDirectory, "solr.xml")));
+
+    } finally {
+      cc.shutdown();
+      if (solrHomeDirectory.exists()) {
+        FileUtils.deleteDirectory(solrHomeDirectory);
+      }
+    }
+  }
+
+  @Test
+  public void testReload() throws Exception {
+    // Whether the core is transient or not can make a difference.
+    doReloadTest("SystemVars2");
+    doReloadTest("SystemVars1");
+
+  }
+
+  private void doReloadTest(String which) throws Exception {
+
+    CoreContainer cc = init(SOLR_XML_LOTS_SYSVARS, "SystemVars1", "SystemVars2");
+    try {
+      final CoreAdminHandler admin = new CoreAdminHandler(cc);
+      SolrQueryResponse resp = new SolrQueryResponse();
+      admin.handleRequestBody
+          (req(CoreAdminParams.ACTION,
+              CoreAdminParams.CoreAdminAction.RELOAD.toString(),
+              CoreAdminParams.CORE, which),
+              resp);
+      assertNull("Exception on reload", resp.getException());
+
+      persistContainedInOrig(cc, new File(solrHomeDirectory, "reload1.solr.xml"));
+
+    } finally {
+      cc.shutdown();
+      if (solrHomeDirectory.exists()) {
+        FileUtils.deleteDirectory(solrHomeDirectory);
+      }
+    }
+
+  }
+
+  @Test
+  public void testRename() throws Exception {
+    doTestRename("SystemVars1");
+    doTestRename("SystemVars2");
+  }
+
+  private void doTestRename(String which) throws Exception {
+    CoreContainer cc = init(SOLR_XML_LOTS_SYSVARS, "SystemVars1", "SystemVars2");
+    try {
+      final CoreAdminHandler admin = new CoreAdminHandler(cc);
+      SolrQueryResponse resp = new SolrQueryResponse();
+      admin.handleRequestBody
+          (req(CoreAdminParams.ACTION,
+              CoreAdminParams.CoreAdminAction.RENAME.toString(),
+              CoreAdminParams.CORE, which,
+              CoreAdminParams.OTHER, "RenamedCore"),
+              resp);
+      assertNull("Exception on rename", resp.getException());
+
+      File persistXml = new File(solrHomeDirectory, "rename.solr.xml");
+      File origXml = new File(solrHomeDirectory, "solr.xml");
+
+      // OK, Assure that if I change everything that has been renamed with the original value for the core, it matches
+      // the old list
+      cc.persistFile(persistXml);
+      String[] persistList = getAllNodes(persistXml);
+      String[] expressions = new String[persistList.length];
+
+      for (int idx = 0; idx < persistList.length; ++idx) {
+        expressions[idx] = persistList[idx].replaceAll("RenamedCore", which);
+      }
+
+      assertXmlFile(origXml, expressions);
+
+      // Now the other way, If I replace the original name in the original XML file with "RenamedCore", does it match
+      // what was persisted?
+      persistList = getAllNodes(origXml);
+      expressions = new String[persistList.length];
+      for (int idx = 0; idx < persistList.length; ++idx) {
+        // /solr/cores/core[@name='SystemVars1' and @collection='${collection:collection1}']
+        expressions[idx] = persistList[idx].replace("@name='" + which + "'", "@name='RenamedCore'");
+      }
+
+      assertXmlFile(persistXml, expressions);
+    } finally {
+      cc.shutdown();
+      if (solrHomeDirectory.exists()) {
+        FileUtils.deleteDirectory(solrHomeDirectory);
+      }
+    }
+  }
+
+  @Test
+  public void testSwap() throws Exception {
+    doTestSwap("SystemVars1", "SystemVars2");
+    doTestSwap("SystemVars2", "SystemVars1");
+  }
+
+  private void doTestSwap(String from, String to) throws Exception {
+    CoreContainer cc = init(SOLR_XML_LOTS_SYSVARS, "SystemVars1", "SystemVars2");
+    try {
+      final CoreAdminHandler admin = new CoreAdminHandler(cc);
+      SolrQueryResponse resp = new SolrQueryResponse();
+      admin.handleRequestBody
+          (req(CoreAdminParams.ACTION,
+              CoreAdminParams.CoreAdminAction.SWAP.toString(),
+              CoreAdminParams.CORE, from,
+              CoreAdminParams.OTHER, to),
+              resp);
+      assertNull("Exception on swap", resp.getException());
+
+      File persistXml = new File(solrHomeDirectory, "rename.solr.xml");
+      File origXml = new File(solrHomeDirectory, "solr.xml");
+
+      cc.persistFile(persistXml);
+      String[] persistList = getAllNodes(persistXml);
+      String[] expressions = new String[persistList.length];
+
+      // Now manually change the names back and it should match exactly to the original XML.
+      for (int idx = 0; idx < persistList.length; ++idx) {
+        String fromName = "@name='" + from + "'";
+        String toName = "@name='" + to + "'";
+        if (persistList[idx].contains(fromName)) {
+          expressions[idx] = persistList[idx].replace(fromName, toName);
+        } else {
+          expressions[idx] = persistList[idx].replace(toName, fromName);
+        }
+      }
+
+      assertXmlFile(origXml, expressions);
+
+    } finally {
+      cc.shutdown();
+      if (solrHomeDirectory.exists()) {
+        FileUtils.deleteDirectory(solrHomeDirectory);
+      }
+    }
+  }
+
+  @Test
+  public void testMinimalXml() throws Exception {
+    CoreContainer cc = init(SOLR_XML_MINIMAL, "SystemVars1");
+    try {
+      persistContainedInOrig(cc, new File(solrHomeDirectory, "minimal.solr.xml"));
+      origContainedInPersist(cc, new File(solrHomeDirectory, "minimal.solr.xml"));
+    } finally {
+      cc.shutdown();
+      if (solrHomeDirectory.exists()) {
+        FileUtils.deleteDirectory(solrHomeDirectory);
+      }
+    }
+  }
+
+
+
+  @Test
+  public void testUnloadCreate() throws Exception {
+    doTestUnloadCreate("SystemVars1");
+    doTestUnloadCreate("SystemVars2");
+  }
+
+  private void doTestUnloadCreate(String which) throws Exception {
+    CoreContainer cc = init(SOLR_XML_LOTS_SYSVARS, "SystemVars1", "SystemVars2");
+    try {
+      final CoreAdminHandler admin = new CoreAdminHandler(cc);
+
+      SolrQueryResponse resp = new SolrQueryResponse();
+      admin.handleRequestBody
+          (req(CoreAdminParams.ACTION,
+              CoreAdminParams.CoreAdminAction.UNLOAD.toString(),
+              CoreAdminParams.CORE, which),
+              resp);
+      assertNull("Exception on unload", resp.getException());
+
+      persistContainedInOrig(cc, new File(solrHomeDirectory, "unloadcreate1.solr.xml"));
+
+      String instPath = new File(solrHomeDirectory, which).getAbsolutePath();
+      admin.handleRequestBody
+          (req(CoreAdminParams.ACTION,
+              CoreAdminParams.CoreAdminAction.CREATE.toString(),
+              CoreAdminParams.INSTANCE_DIR, instPath,
+              CoreAdminParams.NAME, which),
+              resp);
+      assertNull("Exception on create", resp.getException());
+
+      File persistXml = new File(solrHomeDirectory, "rename.solr.xml");
+      File origXml = new File(solrHomeDirectory, "solr.xml");
+
+      cc.persistFile(persistXml);
+      String[] persistList = getAllNodes(persistXml);
+      String[] expressions = new String[persistList.length];
+
+      // Now manually change the names back and it should match exactly to the original XML.
+      for (int idx = 0; idx < persistList.length; ++idx) {
+        String name = "@name='" + which + "'";
+
+        if (persistList[idx].contains(name)) {
+          if (persistList[idx].contains("@schema='schema.xml'")) {
+            expressions[idx] = persistList[idx].replace("schema.xml", "${schema:schema.xml}");
+          } else if (persistList[idx].contains("@config='solrconfig.xml'")) {
+            expressions[idx] = persistList[idx].replace("solrconfig.xml", "${solrconfig:solrconfig.xml}");
+          } else if (persistList[idx].contains("@instanceDir=")) {
+            expressions[idx] = persistList[idx].replaceFirst("instanceDir\\='.*?'", "instanceDir='" + which + "/'");
+          } else {
+            expressions[idx] = persistList[idx];
+          }
+        } else {
+          expressions[idx] = persistList[idx];
+        }
+      }
+
+      assertXmlFile(origXml, expressions);
+
+
+    } finally {
+      cc.shutdown();
+      if (solrHomeDirectory.exists()) {
+        FileUtils.deleteDirectory(solrHomeDirectory);
+      }
+    }
+  }
+
+  private void persistContainedInOrig(CoreContainer cc, File persistXml) throws IOException,
+      SAXException, ParserConfigurationException {
+    cc.persistFile(persistXml);
+    // Is everything that's in the original file persisted?
+    String[] expressions = getAllNodes(persistXml);
+    assertXmlFile(new File(solrHomeDirectory, "solr.xml"), expressions);
+  }
+
+  private void origContainedInPersist(CoreContainer cc, File persistXml) throws IOException,
+      SAXException, ParserConfigurationException {
+    cc.persistFile(persistXml);
+    // Is everything that's in the original file persisted?
+    String[] expressions = getAllNodes(new File(solrHomeDirectory, "solr.xml"));
+    assertXmlFile(persistXml, expressions);
+  }
+
+
+  @Test
+  public void testCreateAndManipulateCores() throws Exception {
+    CoreContainer cc = init(SOLR_XML_LOTS_SYSVARS, "SystemVars1", "SystemVars2", "new_one", "new_two");
+    try {
+      final CoreAdminHandler admin = new CoreAdminHandler(cc);
+      String instPathOne = new File(solrHomeDirectory, "new_one").getAbsolutePath();
+      SolrQueryResponse resp = new SolrQueryResponse();
+      admin.handleRequestBody
+          (req(CoreAdminParams.ACTION,
+              CoreAdminParams.CoreAdminAction.CREATE.toString(),
+              CoreAdminParams.INSTANCE_DIR, instPathOne,
+              CoreAdminParams.NAME, "new_one"),
+              resp);
+      assertNull("Exception on create", resp.getException());
+
+      admin.handleRequestBody
+          (req(CoreAdminParams.ACTION,
+              CoreAdminParams.CoreAdminAction.CREATE.toString(),
+              CoreAdminParams.NAME, "new_two"),
+              resp);
+      assertNull("Exception on create", resp.getException());
+
+      File persistXml1 = new File(solrHomeDirectory, "create_man_1.xml");
+      origContainedInPersist(cc, persistXml1);
+
+      // We know all the original data is in persist, now check for newly-created files.
+      String[] expressions = new  String[2];
+      String instHome = new File(solrHomeDirectory, "new_one").getAbsolutePath();
+      expressions[0] = "/solr/cores/core[@name='new_one' and @instanceDir='" + instHome + "']";
+      expressions[1] = "/solr/cores/core[@name='new_two' and @instanceDir='new_two" + File.separator + "']";
+
+      assertXmlFile(persistXml1, expressions);
+
+      // Next, swap a created core and check
+      resp = new SolrQueryResponse();
+      admin.handleRequestBody
+          (req(CoreAdminParams.ACTION,
+              CoreAdminParams.CoreAdminAction.SWAP.toString(),
+              CoreAdminParams.CORE, "new_one",
+              CoreAdminParams.OTHER, "SystemVars2"),
+              resp);
+      assertNull("Exception on swap", resp.getException());
+
+      File persistXml2 = new File(solrHomeDirectory, "create_man_2.xml");
+
+      cc.persistFile(persistXml2);
+      String[] persistList = getAllNodes(persistXml2);
+      expressions = new String[persistList.length];
+
+      // Now manually change the names back and it should match exactly to the original XML.
+      for (int idx = 0; idx < persistList.length; ++idx) {
+        String fromName = "@name='new_one'";
+        String toName = "@name='SystemVars2'";
+        if (persistList[idx].contains(fromName)) {
+          expressions[idx] = persistList[idx].replace(fromName, toName);
+        } else {
+          expressions[idx] = persistList[idx].replace(toName, fromName);
+        }
+      }
+
+      assertXmlFile(persistXml1, expressions);
+
+      // Then rename the other created core and check
+      admin.handleRequestBody
+          (req(CoreAdminParams.ACTION,
+              CoreAdminParams.CoreAdminAction.RENAME.toString(),
+              CoreAdminParams.CORE, "new_two",
+              CoreAdminParams.OTHER, "RenamedCore"),
+              resp);
+      assertNull("Exception on rename", resp.getException());
+
+      File persistXml3 = new File(solrHomeDirectory, "create_man_3.xml");
+
+      // OK, Assure that if I change everything that has been renamed with the original value for the core, it matches
+      // the old list
+      cc.persistFile(persistXml3);
+      persistList = getAllNodes(persistXml3);
+      expressions = new String[persistList.length];
+
+      for (int idx = 0; idx < persistList.length; ++idx) {
+        expressions[idx] = persistList[idx].replaceAll("RenamedCore", "new_two");
+      }
+      assertXmlFile(persistXml2, expressions);
+
+      // Now the other way, If I replace the original name in the original XML file with "RenamedCore", does it match
+      // what was persisted?
+      persistList = getAllNodes(persistXml2);
+      expressions = new String[persistList.length];
+      for (int idx = 0; idx < persistList.length; ++idx) {
+        // /solr/cores/core[@name='SystemVars1' and @collection='${collection:collection1}']
+        expressions[idx] = persistList[idx].replace("@name='new_two'", "@name='RenamedCore'");
+      }
+      assertXmlFile(persistXml3, expressions);
+
+    } finally {
+      cc.shutdown();
+      if (solrHomeDirectory.exists()) {
+        FileUtils.deleteDirectory(solrHomeDirectory);
+      }
+    }
+
+
+  }
+  @Test
+  public void testCreatePersistCore() throws Exception {
+    // Template for creating a core.
+    CoreContainer cc = init(SOLR_XML_LOTS_SYSVARS, "SystemVars1", "SystemVars2", "props1", "props2");
+    try {
+      final CoreAdminHandler admin = new CoreAdminHandler(cc);
+      // create a new core (using CoreAdminHandler) w/ properties
+      String instPath1 = new File(solrHomeDirectory, "props1").getAbsolutePath();
+      SolrQueryResponse resp = new SolrQueryResponse();
+      admin.handleRequestBody
+          (req(CoreAdminParams.ACTION,
+              CoreAdminParams.CoreAdminAction.CREATE.toString(),
+              CoreAdminParams.NAME, "props1",
+              CoreAdminParams.TRANSIENT, "true",
+              CoreAdminParams.LOAD_ON_STARTUP, "true",
+              CoreAdminParams.PROPERTY_PREFIX + "prefix1", "valuep1",
+              CoreAdminParams.PROPERTY_PREFIX + "prefix2", "valueP2",
+              "wt", "json", // need to insure that extra parameters are _not_ preserved (actually happened).
+              "qt", "admin/cores"),
+              resp);
+      assertNull("Exception on create", resp.getException());
+
+      String instPath2 = new File(solrHomeDirectory, "props2").getAbsolutePath();
+      admin.handleRequestBody
+          (req(CoreAdminParams.ACTION,
+              CoreAdminParams.CoreAdminAction.CREATE.toString(),
+              CoreAdminParams.INSTANCE_DIR, instPath2,
+              CoreAdminParams.NAME, "props2",
+              CoreAdminParams.PROPERTY_PREFIX + "prefix2_1", "valuep2_1",
+              CoreAdminParams.PROPERTY_PREFIX + "prefix2_2", "valueP2_2",
+              CoreAdminParams.CONFIG, "solrconfig.xml",
+              CoreAdminParams.DATA_DIR, "./dataDirTest",
+              CoreAdminParams.SCHEMA, "schema.xml"),
+              resp);
+      assertNull("Exception on create", resp.getException());
+
+      // Everything that was in the original XML file should be in the persisted one.
+      final File persistXml = new File(solrHomeDirectory, "persist_create_core.solr.xml");
+      cc.persistFile(persistXml);
+      assertXmlFile(persistXml, getAllNodes(new File(solrHomeDirectory, "solr.xml")));
+
+      // And the params for the new core should be in the persisted file.
+      assertXmlFile
+          (persistXml
+              , "/solr/cores/core[@name='props1']/property[@name='prefix1' and @value='valuep1']"
+              , "/solr/cores/core[@name='props1']/property[@name='prefix2' and @value='valueP2']"
+              , "/solr/cores/core[@name='props1' and @transient='true']"
+              , "/solr/cores/core[@name='props1' and @loadOnStartup='true']"
+              , "/solr/cores/core[@name='props1' and @instanceDir='props1" + File.separator + "']"
+              , "/solr/cores/core[@name='props2']/property[@name='prefix2_1' and @value='valuep2_1']"
+              , "/solr/cores/core[@name='props2']/property[@name='prefix2_2' and @value='valueP2_2']"
+              , "/solr/cores/core[@name='props2' and @config='solrconfig.xml']"
+              , "/solr/cores/core[@name='props2' and @schema='schema.xml']"
+              , "/solr/cores/core[@name='props2' and not(@loadOnStartup)]"
+              , "/solr/cores/core[@name='props2' and not(@transient)]"
+              , "/solr/cores/core[@name='props2' and @instanceDir='" + instPath2 + "']"
+              , "/solr/cores/core[@name='props2' and @dataDir='./dataDirTest']"
+          );
+
+    } finally {
+      cc.shutdown();
+      if (solrHomeDirectory.exists()) {
+        FileUtils.deleteDirectory(solrHomeDirectory);
+      }
+
+    }
+  }
+
+  private String[] getAllNodes(File xmlFile) throws ParserConfigurationException, IOException, SAXException {
+    List<String> expressions = new ArrayList<String>(); // XPATH and value for all elements in the indicated XML
+    DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory
+        .newInstance();
+    DocumentBuilder docBuilder = docBuilderFactory.newDocumentBuilder();
+    Document document = docBuilder.parse(xmlFile);
+
+    Node root = document.getDocumentElement();
+    gatherNodes(root, expressions, "");
+    return expressions.toArray(new String[expressions.size()]);
+  }
+
+
+  // Note this is pretty specialized for a solr.xml file because working with the DOM is such a pain.
+
+  private static List<String> qualified = new ArrayList<String>() {{
+    add("core");
+    add("property");
+    add("int");
+    add("str");
+    add("long");
+    add("property");
+  }};
+
+  private static List<String> addText = new ArrayList<String>() {{
+    add("int");
+    add("str");
+    add("long");
+  }};
+
+  // path is the path to parent node
+
+  private void gatherNodes(Node node, List<String> expressions, String path) {
+
+    String nodeName = node.getNodeName();
+    String thisPath = path + "/" + nodeName;
+    //Parent[@id='1']/Children/child[@name]
+    // Add in the xpaths for verification of any attributes.
+    NamedNodeMap attrs = node.getAttributes();
+    String qualifier = "";
+    if (attrs.getLength() > 0) {
+      // Assemble the prefix for qualifying all of the attributes with the same name
+      if (qualified.contains(nodeName)) {
+        qualifier = "@name='" + node.getAttributes().getNamedItem("name").getTextContent() + "'";
+      }
+
+      for (int idx = 0; idx < attrs.getLength(); ++idx) {
+
+        Node attr = attrs.item(idx);
+        if (StringUtils.isNotBlank(qualifier) && "name".equals(attr.getNodeName())) {
+          continue; // Already added "name" attribute in qualifier string.
+        }
+        if (StringUtils.isNotBlank(qualifier)) {
+          // Create [@name="stuff" and @attrib="value"] fragment
+          expressions.add(thisPath +
+              "[" + qualifier + " and @" + attr.getNodeName() + "='" + attr.getTextContent() + "']");
+
+        } else {
+          // Create [@attrib="value"] fragment
+          expressions.add(thisPath +
+              "[" + qualifier + " @" + attr.getNodeName() + "='" + attr.getTextContent() + "']");
+        }
+      }
+    }
+    // Now add the text for special nodes
+    // a[normalize-space(text())='somesite']
+    if (addText.contains(nodeName)) {
+      expressions.add(thisPath + "[" + qualifier + " and text()='" + node.getTextContent() + "']");
+    }
+    // Now collect all the child element nodes.
+    NodeList nodeList = node.getChildNodes();
+    for (int i = 0; i < nodeList.getLength(); i++) {
+
+      Node currentNode = nodeList.item(i);
+      if (currentNode.getNodeType() == Node.ELEMENT_NODE) {
+        if (StringUtils.isNotBlank(qualifier)) {
+          gatherNodes(currentNode, expressions, thisPath + "[" + qualifier + "]");
+        } else {
+          gatherNodes(currentNode, expressions, thisPath);
+        }
+      }
+    }
+  }
+
+  private static String SOLR_XML_LOTS_SYSVARS =
+      "<solr persistent=\"${solr.xml.persist:false}\" coreLoadThreads=\"12\" sharedLib=\"${something:.}\" >\n" +
+          "  <logging class=\"${logclass:log4j.class}\" enabled=\"{logenable:true}\">\n" +
+          "     <watcher size=\"{watchSize:13}\" threshold=\"${logThresh:54}\" />\n" +
+          "  </logging>\n" +
+          "  <shardHandlerFactory name=\"${shhandler:shardHandlerFactory}\" class=\"${handlefac:HttpShardHandlerFactory}\">\n" +
+          "     <int name=\"socketTimeout\">${socketTimeout:120000}</int> \n" +
+          "     <int name=\"connTimeout\">${connTimeout:15000}</int> \n" +
+          "  </shardHandlerFactory> \n" +
+          "  <cores adminPath=\"/admin/cores\" defaultCoreName=\"SystemVars1\" host=\"127.0.0.1\" \n" +
+          "       hostPort=\"${hostPort:8983}\" hostContext=\"${hostContext:solr}\" \n" +
+          "       zkClientTimeout=\"${solr.zkclienttimeout:30000}\" \n" +
+          "       shareSchema=\"${shareSchema:false}\" distribUpdateConnTimeout=\"${distribUpdateConnTimeout:15000}\" \n" +
+          "       distribUpdateSoTimeout=\"${distribUpdateSoTimeout:120000}\" \n" +
+          "       leaderVoteWait=\"${leadVoteWait:32}\" managementPath=\"${manpath:/var/lib/path}\" transientCacheSize=\"${tranSize:128}\"> \n" +
+          "     <core name=\"SystemVars1\" instanceDir=\"SystemVars1/\" shard=\"${shard:32}\" \n" +
+          "          collection=\"${collection:collection1}\" config=\"${solrconfig:solrconfig.xml}\" \n" +
+          "          schema=\"${schema:schema.xml}\" ulogDir=\"${ulog:./}\" roles=\"${myrole:boss}\" \n" +
+          "          dataDir=\"${data:./}\" loadOnStartup=\"${onStart:true}\" transient=\"${tran:true}\" \n" +
+          "          coreNodeName=\"${coreNode:utterlyridiculous}\" \n" +
+          "       >\n" +
+          "     </core>\n" +
+          "     <core name=\"SystemVars2\" instanceDir=\"SystemVars2/\" shard=\"${shard:32}\" \n" +
+          "          collection=\"${collection:collection2}\" config=\"${solrconfig:solrconfig.xml}\" \n" +
+          "          coreNodeName=\"${coreNodeName:}\" schema=\"${schema:schema.xml}\">\n" +
+          "      <property name=\"collection\" value=\"{collection:collection2}\"/>\n" +
+          "      <property name=\"schema\" value=\"${schema:schema.xml}\"/>\n" +
+          "      <property name=\"coreNodeName\" value=\"EricksCore\"/>\n" +
+          "     </core>\n" +
+          "   </cores>\n" +
+          "</solr>";
+
+
+  private static String SOLR_XML_MINIMAL =
+          "<solr >\n" +
+          "  <cores> \n" +
+          "     <core name=\"SystemVars1\" instanceDir=\"SystemVars1/\" />\n" +
+          "   </cores>\n" +
+          "</solr>";
+
+}
diff --git a/solr/core/src/test/org/apache/solr/handler/JsonLoaderTest.java b/solr/core/src/test/org/apache/solr/handler/JsonLoaderTest.java
index 916cf69..9dd5c71 100644
--- a/solr/core/src/test/org/apache/solr/handler/JsonLoaderTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/JsonLoaderTest.java
@@ -18,6 +18,7 @@
 package org.apache.solr.handler;
 
 import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.SolrInputField;
 import org.apache.solr.common.util.ContentStreamBase;
@@ -30,7 +31,11 @@
 import org.apache.solr.update.processor.BufferingRequestProcessor;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.xml.sax.SAXException;
 
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.List;
 import java.util.Map;
 
 public class JsonLoaderTest extends SolrTestCaseJ4 {
@@ -236,6 +241,198 @@
         ,"/response/docs/[0]=={'foo2_s':['hi','there']}"
     );
   }
+  
+  @Test
+  public void testBooleanValuesInAdd() throws Exception {
+    String str = "{'add':[{'id':'1','b1':true,'b2':false,'b3':[false,true]}]}".replace('\'', '"');
+    SolrQueryRequest req = req();
+    SolrQueryResponse rsp = new SolrQueryResponse();
+    BufferingRequestProcessor p = new BufferingRequestProcessor(null);
+    JsonLoader loader = new JsonLoader();
+    loader.load(req, rsp, new ContentStreamBase.StringStream(str), p);
+
+    assertEquals(1, p.addCommands.size());
+
+    AddUpdateCommand add = p.addCommands.get(0);
+    SolrInputDocument d = add.solrDoc;
+    SolrInputField f = d.getField("b1");
+    assertEquals(Boolean.TRUE, f.getValue());
+    f = d.getField("b2");
+    assertEquals(Boolean.FALSE, f.getValue());
+    f = d.getField("b3");
+    assertEquals(2, ((List)f.getValue()).size());
+    assertEquals(Boolean.FALSE, ((List)f.getValue()).get(0));
+    assertEquals(Boolean.TRUE, ((List)f.getValue()).get(1));
+
+    req.close();
+  }
+
+  @Test
+  public void testIntegerValuesInAdd() throws Exception {
+    String str = "{'add':[{'id':'1','i1':256,'i2':-5123456789,'i3':[0,1]}]}".replace('\'', '"');
+    SolrQueryRequest req = req();
+    SolrQueryResponse rsp = new SolrQueryResponse();
+    BufferingRequestProcessor p = new BufferingRequestProcessor(null);
+    JsonLoader loader = new JsonLoader();
+    loader.load(req, rsp, new ContentStreamBase.StringStream(str), p);
+
+    assertEquals(1, p.addCommands.size());
+
+    AddUpdateCommand add = p.addCommands.get(0);
+    SolrInputDocument d = add.solrDoc;
+    SolrInputField f = d.getField("i1");
+    assertEquals(256L, f.getValue());
+    f = d.getField("i2");
+    assertEquals(-5123456789L, f.getValue());
+    f = d.getField("i3");
+    assertEquals(2, ((List)f.getValue()).size());
+    assertEquals(0L, ((List)f.getValue()).get(0));
+    assertEquals(1L, ((List)f.getValue()).get(1));
+
+    req.close();
+  }
+
+
+  @Test
+  public void testDecimalValuesInAdd() throws Exception {
+    String str = "{'add':[{'id':'1','d1':256.78,'d2':-5123456789.0,'d3':0.0,'d3':1.0,'d4':1.7E-10}]}".replace('\'', '"');
+    SolrQueryRequest req = req();
+    SolrQueryResponse rsp = new SolrQueryResponse();
+    BufferingRequestProcessor p = new BufferingRequestProcessor(null);
+    JsonLoader loader = new JsonLoader();
+    loader.load(req, rsp, new ContentStreamBase.StringStream(str), p);
+
+    assertEquals(1, p.addCommands.size());
+
+    AddUpdateCommand add = p.addCommands.get(0);
+    SolrInputDocument d = add.solrDoc;
+    SolrInputField f = d.getField("d1");
+    assertEquals(256.78, f.getValue());
+    f = d.getField("d2");
+    assertEquals(-5123456789.0, f.getValue());
+    f = d.getField("d3");
+    assertEquals(2, ((List)f.getValue()).size());
+    assertTrue(((List)f.getValue()).contains(0.0));
+    assertTrue(((List) f.getValue()).contains(1.0));
+    f = d.getField("d4");
+    assertEquals(1.7E-10, f.getValue());
+
+    req.close();
+  }
+
+  @Test
+  public void testBigDecimalValuesInAdd() throws Exception {
+    String str = ("{'add':[{'id':'1','bd1':0.12345678901234567890123456789012345,"
+                 + "'bd2':12345678901234567890.12345678901234567890,'bd3':0.012345678901234567890123456789012345,"
+                 + "'bd3':123456789012345678900.012345678901234567890}]}").replace('\'', '"');
+    SolrQueryRequest req = req();
+    SolrQueryResponse rsp = new SolrQueryResponse();
+    BufferingRequestProcessor p = new BufferingRequestProcessor(null);
+    JsonLoader loader = new JsonLoader();
+    loader.load(req, rsp, new ContentStreamBase.StringStream(str), p);
+
+    assertEquals(1, p.addCommands.size());
+
+    AddUpdateCommand add = p.addCommands.get(0);
+    SolrInputDocument d = add.solrDoc;
+    SolrInputField f = d.getField("bd1");                        
+    assertTrue(f.getValue() instanceof String);
+    assertEquals("0.12345678901234567890123456789012345", f.getValue());
+    f = d.getField("bd2");
+    assertTrue(f.getValue() instanceof String);
+    assertEquals("12345678901234567890.12345678901234567890", f.getValue());
+    f = d.getField("bd3");
+    assertEquals(2, ((List)f.getValue()).size());
+    assertTrue(((List)f.getValue()).contains("0.012345678901234567890123456789012345"));
+    assertTrue(((List)f.getValue()).contains("123456789012345678900.012345678901234567890"));
+
+    req.close();
+  }
+
+  @Test
+  public void testBigIntegerValuesInAdd() throws Exception {
+    String str = ("{'add':[{'id':'1','bi1':123456789012345678901,'bi2':1098765432109876543210,"
+                 + "'bi3':[1234567890123456789012,10987654321098765432109]}]}").replace('\'', '"');
+    SolrQueryRequest req = req();
+    SolrQueryResponse rsp = new SolrQueryResponse();
+    BufferingRequestProcessor p = new BufferingRequestProcessor(null);
+    JsonLoader loader = new JsonLoader();
+    loader.load(req, rsp, new ContentStreamBase.StringStream(str), p);
+
+    assertEquals(1, p.addCommands.size());
+
+    AddUpdateCommand add = p.addCommands.get(0);
+    SolrInputDocument d = add.solrDoc;
+    SolrInputField f = d.getField("bi1");
+    assertTrue(f.getValue() instanceof String);
+    assertEquals("123456789012345678901", f.getValue());
+    f = d.getField("bi2");
+    assertTrue(f.getValue() instanceof String);
+    assertEquals("1098765432109876543210", f.getValue());
+    f = d.getField("bi3");
+    assertEquals(2, ((List)f.getValue()).size());
+    assertTrue(((List)f.getValue()).contains("1234567890123456789012"));
+    assertTrue(((List)f.getValue()).contains("10987654321098765432109"));
+
+    req.close();
+  }
+
+
+  @Test
+  public void testAddNonStringValues() throws Exception {
+    // BigInteger and BigDecimal should be typed as strings, since there is no direct support for them
+    updateJ(("[{'id':'1','boolean_b':false,'long_l':19,'double_d':18.6,'big_integer_s':12345678901234567890,"
+        +"      'big_decimal_s':0.1234567890123456789012345}]").replace('\'', '"'), params("commit","true"));
+    assertJQ(req("q","id:1", "fl","boolean_b,long_l,double_d,big_integer_s,big_decimal_s")
+        ,"/response/docs/[0]=={'boolean_b':[false],'long_l':[19],'double_d':[18.6],"
+                             +"'big_integer_s':['12345678901234567890'],"
+                             +"'big_decimal_s':['0.1234567890123456789012345']}]}"
+    );
+  }
+
+
+  @Test
+  public void testAddBigIntegerValueToTrieField() throws Exception {
+    // Adding a BigInteger to a long field should fail
+    // BigInteger.longValue() returns only the low-order 64 bits.
+    try {
+      updateJ(("[{'id':'1','big_integer_tl':12345678901234567890}]").replace('\'', '"'), null);
+      fail("A BigInteger value should overflow a long field");
+    } catch (SolrException e) {
+      if ( ! (e.getCause() instanceof NumberFormatException)) {
+        throw e;
+      }
+    }
+
+    // Adding a BigInteger to an integer field should fail
+    // BigInteger.intValue() returns only the low-order 32 bits.
+    try {
+      updateJ(("[{'id':'1','big_integer_ti':12345678901234567890}]").replace('\'', '"'), null);
+      fail("A BigInteger value should overflow an integer field");
+    } catch (SolrException e) {
+      if ( ! (e.getCause() instanceof NumberFormatException)) {
+        throw e;
+      }
+    }
+
+  }
+
+  @Test
+  public void testAddBigDecimalValueToTrieField() throws Exception {
+    // Adding a BigDecimal to a double field should succeed by reducing precision
+    updateJ(("[{'id':'1','big_decimal_td':100000000000000000000000000001234567890.0987654321}]").replace('\'', '"'),
+            params("commit", "true"));
+    assertJQ(req("q","id:1", "fl","big_decimal_td"), 
+             "/response/docs/[0]=={'big_decimal_td':[1.0E38]}"
+    );
+
+    // Adding a BigDecimal to a float field should succeed by reducing precision
+    updateJ(("[{'id':'2','big_decimal_tf':100000000000000000000000000001234567890.0987654321}]").replace('\'', '"'),
+            params("commit", "true"));
+    assertJQ(req("q","id:2", "fl","big_decimal_tf"),
+             "/response/docs/[0]=={'big_decimal_tf':[1.0E38]}"
+    );
+  }
 
   // The delete syntax was both extended for simplification in 4.0
   @Test
diff --git a/solr/core/src/test/org/apache/solr/handler/TestCSVLoader.java b/solr/core/src/test/org/apache/solr/handler/TestCSVLoader.java
index a2deb3b..ed23a08 100755
--- a/solr/core/src/test/org/apache/solr/handler/TestCSVLoader.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestCSVLoader.java
@@ -35,6 +35,7 @@
 
   @BeforeClass
   public static void beforeClass() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml","schema12.xml");
   }
 
diff --git a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
index 8c591c9..388bbc8 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
@@ -103,7 +103,7 @@
     super.setUp();
 //    System.setProperty("solr.directoryFactory", "solr.StandardDirectoryFactory");
     // For manual testing only
-    // useFactory(null); // force an FS factory
+    // useFactory(null); // force an FS factory.
     master = new SolrInstance("master", null);
     master.setUp();
     masterJetty = createJetty(master);
@@ -339,6 +339,40 @@
     }
   }
 
+
+  /**
+   * Verify that things still work if an IW has not been opened (and hence the CommitPoints have not been communicated to the deletion policy)
+   */
+  public void testNoWriter() throws Exception {
+    useFactory(null);    // force a persistent directory
+
+    // stop and start so they see the new directory setting
+    slaveJetty.stop();
+    masterJetty.stop();
+    slaveJetty.start(true);
+    masterJetty.start(true);
+
+    index(slaveClient, "id", "123456");
+    slaveClient.commit();
+    slaveJetty.stop();
+    slaveJetty.start(true);
+
+    // Currently we open a writer on-demand.  This is to test that we are correctly testing
+    // the code path when SolrDeletionPolicy.getLatestCommit() returns null.
+    // When we are using an ephemeral directory, an IW will always be opened to create the index and hence
+    // getLatestCommit will always be non-null.
+    CoreContainer cores = ((SolrDispatchFilter) slaveJetty.getDispatchFilter().getFilter()).getCores();
+    Collection<SolrCore> theCores = cores.getCores();
+    assertEquals(1, theCores.size());
+    SolrCore core = (SolrCore)theCores.toArray()[0];
+    assertNull( core.getDeletionPolicy().getLatestCommit() );
+
+
+    pullFromMasterToSlave();  // this will cause SnapPuller to be invoked and we will test when SolrDeletionPolicy.getLatestCommit() returns null
+
+    resetFactory();
+  }
+
   /**
    * Verify that empty commits and/or commits with openSearcher=false 
    * on the master do not cause subsequent replication problems on the slave 
@@ -1507,7 +1541,7 @@
     }
 
     public String getDataDir() {
-      return dataDir.toString();
+      return dataDir.getAbsolutePath();
     }
 
     public String getSolrConfigFile() {
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/LoggingHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/LoggingHandlerTest.java
index a81e0f1..5eaf9ef 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/LoggingHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/LoggingHandlerTest.java
@@ -17,15 +17,22 @@
 
 package org.apache.solr.handler.admin;
 
-import java.util.logging.Logger;
-
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.logging.jul.JulInfo;
+import org.apache.solr.logging.log4j.Log4jInfo;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+
 public class LoggingHandlerTest extends SolrTestCaseJ4 {
+
+  // TODO: This only tests Log4j at the moment, as that's what's defined
+  // through the CoreContainer.
+
+  // TODO: Would be nice to throw an exception on trying to set a
+  // log level that doesn't exist
   
   @BeforeClass
   public static void beforeClass() throws Exception {
@@ -35,7 +42,8 @@
   @Test
   public void testLogLevelHandlerOutput() throws Exception {
     Logger tst = Logger.getLogger("org.apache.solr.SolrTestCaseJ4");
-    JulInfo wrap = new JulInfo(tst.getName(), tst);
+    tst.setLevel(Level.INFO);
+    Log4jInfo wrap = new Log4jInfo(tst.getName(), tst);
     
     assertQ("Show Log Levels OK",
             req(CommonParams.QT,"/admin/logging")
@@ -46,8 +54,8 @@
     assertQ("Set and remove a level",
             req(CommonParams.QT,"/admin/logging",  
                 "set", "org.xxx.yyy.abc:null",
-                "set", "org.xxx.yyy.zzz:FINEST")
-            ,"//arr[@name='loggers']/lst/str[.='org.xxx.yyy.zzz']/../str[@name='level'][.='FINEST']"
+                "set", "org.xxx.yyy.zzz:TRACE")
+            ,"//arr[@name='loggers']/lst/str[.='org.xxx.yyy.zzz']/../str[@name='level'][.='TRACE']"
             );
   }
 }
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/LukeRequestHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/LukeRequestHandlerTest.java
index 34e9d77..dcf73f6 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/LukeRequestHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/LukeRequestHandlerTest.java
@@ -17,17 +17,18 @@
 
 package org.apache.solr.handler.admin;
 
-import java.util.Arrays;
-import java.util.EnumSet;
-
 import org.apache.solr.common.luke.FieldFlag;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.schema.IndexSchema;
 import org.apache.solr.util.AbstractSolrTestCase;
+import org.apache.solr.util.TestHarness;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import java.util.Arrays;
+import java.util.EnumSet;
+
 /**
  * :TODO: currently only tests some of the utilities in the LukeRequestHandler
  */
@@ -35,6 +36,7 @@
 
   @BeforeClass
   public static void beforeClass() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml", "schema12.xml");
   }
 
@@ -155,7 +157,7 @@
     try {
       // First, determine that the two fields ARE there
       String response = h.query(req);
-      assertNull(h.validateXPath(response,
+      assertNull(TestHarness.validateXPath(response,
           getFieldXPathPrefix("solr_t") + "[@name='index']",
           getFieldXPathPrefix("solr_s") + "[@name='index']"
       ));
@@ -164,7 +166,7 @@
       for (String f : Arrays.asList("solr_ti",
           "solr_td", "solr_pl", "solr_dt", "solr_b")) {
 
-        assertNotNull(h.validateXPath(response,
+        assertNotNull(TestHarness.validateXPath(response,
             getFieldXPathPrefix(f) + "[@name='index']"));
 
       }
@@ -174,7 +176,7 @@
       for (String f : Arrays.asList("solr_t", "solr_s", "solr_ti",
           "solr_td", "solr_pl", "solr_dt", "solr_b")) {
 
-        assertNull(h.validateXPath(response,
+        assertNull(TestHarness.validateXPath(response,
             getFieldXPathPrefix(f) + "[@name='index']"));
       }
     } catch (Exception e) {
@@ -186,7 +188,7 @@
     SolrQueryRequest req = req("qt", "/admin/luke", "show", "schema");
 
     String xml = h.query(req);
-    String r = h.validateXPath
+    String r = TestHarness.validateXPath
       (xml,
        field("text") + "/arr[@name='copySources']/str[.='title']",
        field("text") + "/arr[@name='copySources']/str[.='subject']",
@@ -216,7 +218,7 @@
 
     SolrQueryRequest req = req("qt", "/admin/luke", "show", "schema", "indent", "on");
     String xml = h.query(req);
-    String result = h.validateXPath(xml, field("bday") + "/arr[@name='copyDests']/str[.='catchall_t']");
+    String result = TestHarness.validateXPath(xml, field("bday") + "/arr[@name='copyDests']/str[.='catchall_t']");
     assertNull(xml, result);
 
     // Put back the configuration expected by the rest of the tests in this suite
diff --git a/solr/core/src/test/org/apache/solr/handler/component/BadComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/BadComponentTest.java
index ae94d3c..17361e0 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/BadComponentTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/BadComponentTest.java
@@ -19,7 +19,6 @@
  */
 
 import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.common.SolrException;
 import org.junit.Test;
 
 /**
@@ -34,14 +33,7 @@
       ignoreException(".*QueryElevationComponent.*");
       System.setProperty("elevate.file", "foo.xml");
       initCore("solrconfig-elevate.xml", "schema12.xml");
-      assertTrue(false);
-    } catch (RuntimeException e) {
-      //TODO: better way of checking this?
-      if (e.getCause() instanceof SolrException){
-        assertTrue(true);
-      } else {
-        assertTrue(false);
-      }
+      assertTrue(hasInitException("QueryElevationComponent"));
     } finally {
       System.clearProperty("elevate.file");
       resetExceptionIgnores();
diff --git a/solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentDistributedTest.java b/solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentDistributedTest.java
index 5f655e0..2f610fd 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentDistributedTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentDistributedTest.java
@@ -31,6 +31,7 @@
     handle.put("maxScore", SKIPVAL);
     handle.put("score", SKIPVAL);
     handle.put("[docid]", SKIPVAL);
+    handle.put("_version_", SKIPVAL); // not a cloud test, but may use updateLog
 
     // SOLR-3720: TODO: TVC doesn't "merge" df and idf .. should it?
     handle.put("df", SKIPVAL);
diff --git a/solr/core/src/test/org/apache/solr/handler/component/TermsComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/TermsComponentTest.java
index dc0364d..34d88e9 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/TermsComponentTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/TermsComponentTest.java
@@ -32,6 +32,7 @@
 
   @BeforeClass
   public static void beforeTest() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml","schema12.xml");
 
     assertNull(h.validateUpdate(adoc("id", "0", "lowerfilt", "a", "standardfilt", "a", "foo_i","1")));
diff --git a/solr/core/src/test/org/apache/solr/request/TestBinaryResponseWriter.java b/solr/core/src/test/org/apache/solr/request/TestBinaryResponseWriter.java
index 559e166..1b60aeb 100644
--- a/solr/core/src/test/org/apache/solr/request/TestBinaryResponseWriter.java
+++ b/solr/core/src/test/org/apache/solr/request/TestBinaryResponseWriter.java
@@ -45,6 +45,7 @@
   
   @BeforeClass
   public static void beforeClass() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml", "schema12.xml");
   }
 
diff --git a/solr/core/src/test/org/apache/solr/response/TestCSVResponseWriter.java b/solr/core/src/test/org/apache/solr/response/TestCSVResponseWriter.java
index ebd765d..8a6b85a 100644
--- a/solr/core/src/test/org/apache/solr/response/TestCSVResponseWriter.java
+++ b/solr/core/src/test/org/apache/solr/response/TestCSVResponseWriter.java
@@ -32,6 +32,7 @@
 public class TestCSVResponseWriter extends SolrTestCaseJ4 {
   @BeforeClass
   public static void beforeClass() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml","schema12.xml");
     createIndex();
   }
diff --git a/solr/core/src/test/org/apache/solr/schema/NotRequiredUniqueKeyTest.java b/solr/core/src/test/org/apache/solr/schema/NotRequiredUniqueKeyTest.java
index 24f962d..f895f0d 100644
--- a/solr/core/src/test/org/apache/solr/schema/NotRequiredUniqueKeyTest.java
+++ b/solr/core/src/test/org/apache/solr/schema/NotRequiredUniqueKeyTest.java
@@ -32,6 +32,7 @@
 
   @BeforeClass
   public static void beforeTests() throws Exception {
+    System.setProperty("enable.update.log", "false"); // usecase doesn't work with updateLog
     initCore("solrconfig.xml","schema-not-required-unique-key.xml");
   }
 
diff --git a/solr/core/src/test/org/apache/solr/schema/PrimitiveFieldTypeTest.java b/solr/core/src/test/org/apache/solr/schema/PrimitiveFieldTypeTest.java
index 8415236..be28360 100644
--- a/solr/core/src/test/org/apache/solr/schema/PrimitiveFieldTypeTest.java
+++ b/solr/core/src/test/org/apache/solr/schema/PrimitiveFieldTypeTest.java
@@ -40,6 +40,7 @@
   public void setUp()  throws Exception {
     super.setUp();
     // set some system properties for use by tests
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     System.setProperty("solr.test.sys.prop1", "propone");
     System.setProperty("solr.test.sys.prop2", "proptwo");
 
@@ -58,11 +59,9 @@
     TrieIntField ti;
     SortableIntField si;
     LongField l;
-    ShortField sf;
     FloatField f;
     DoubleField d;
     BoolField b;
-    ByteField bf;
     
     
     // ***********************
@@ -94,10 +93,6 @@
     l.init(schema, initMap);
     assertFalse(l.hasProperty(FieldType.OMIT_NORMS));
 
-    sf = new ShortField();
-    sf.init(schema, initMap);
-    assertFalse(sf.hasProperty(FieldType.OMIT_NORMS));
-
     f = new FloatField();
     f.init(schema, initMap);
     assertFalse(f.hasProperty(FieldType.OMIT_NORMS));
@@ -114,10 +109,6 @@
     b.init(schema, initMap);
     assertFalse(b.hasProperty(FieldType.OMIT_NORMS));
 
-    bf = new ByteField();
-    bf.init(schema, initMap);
-    assertFalse(bf.hasProperty(FieldType.OMIT_NORMS));
-
     // Non-primitive fields
     t = new TextField();
     t.init(schema, initMap);
@@ -156,10 +147,6 @@
     l.init(schema, initMap);
     assertTrue(l.hasProperty(FieldType.OMIT_NORMS));
 
-    sf = new ShortField();
-    sf.init(schema, initMap);
-    assertTrue(sf.hasProperty(FieldType.OMIT_NORMS));
-
     f = new FloatField();
     f.init(schema, initMap);
     assertTrue(f.hasProperty(FieldType.OMIT_NORMS));
@@ -176,10 +163,6 @@
     b.init(schema, initMap);
     assertTrue(b.hasProperty(FieldType.OMIT_NORMS));
 
-    bf = new ByteField();
-    bf.init(schema, initMap);
-    assertTrue(bf.hasProperty(FieldType.OMIT_NORMS));
-
     // Non-primitive fields
     t = new TextField();
     t.init(schema, initMap);
diff --git a/solr/core/src/test/org/apache/solr/schema/TestCollationField.java b/solr/core/src/test/org/apache/solr/schema/TestCollationField.java
index 09c6dd7..08ea28a 100644
--- a/solr/core/src/test/org/apache/solr/schema/TestCollationField.java
+++ b/solr/core/src/test/org/apache/solr/schema/TestCollationField.java
@@ -74,6 +74,7 @@
     
     // copy over configuration files
     FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig-basic.xml"), new File(confDir, "solrconfig.xml"));
+    FileUtils.copyFile(getFile("solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml"), new File(confDir, "solrconfig.snippet.randomindexconfig.xml"));
     FileUtils.copyFile(getFile("solr/collection1/conf/schema-collate.xml"), new File(confDir, "schema.xml"));
     
     // generate custom collation rules (DIN 5007-2), saving to customrules.dat
diff --git a/solr/core/src/test/org/apache/solr/schema/TestManagedSchema.java b/solr/core/src/test/org/apache/solr/schema/TestManagedSchema.java
index 7fb8a31..8bc29c9 100644
--- a/solr/core/src/test/org/apache/solr/schema/TestManagedSchema.java
+++ b/solr/core/src/test/org/apache/solr/schema/TestManagedSchema.java
@@ -55,7 +55,9 @@
     FileUtils.copyFileToDirectory(new File(testHomeConfDir, "solrconfig-mutable-managed-schema.xml"), tmpConfDir);
     FileUtils.copyFileToDirectory(new File(testHomeConfDir, "solrconfig-managed-schema.xml"), tmpConfDir);
     FileUtils.copyFileToDirectory(new File(testHomeConfDir, "solrconfig-basic.xml"), tmpConfDir);
+    FileUtils.copyFileToDirectory(new File(testHomeConfDir, "solrconfig.snippet.randomindexconfig.xml"), tmpConfDir);
     FileUtils.copyFileToDirectory(new File(testHomeConfDir, "schema-one-field-no-dynamic-field.xml"), tmpConfDir);
+    FileUtils.copyFileToDirectory(new File(testHomeConfDir, "schema-one-field-no-dynamic-field-unique-key.xml"), tmpConfDir);
     FileUtils.copyFileToDirectory(new File(testHomeConfDir, "schema-minimal.xml"), tmpConfDir);
     FileUtils.copyFileToDirectory(new File(testHomeConfDir, "schema_codec.xml"), tmpConfDir);
     FileUtils.copyFileToDirectory(new File(testHomeConfDir, "schema-bm25.xml"), tmpConfDir);
@@ -385,4 +387,39 @@
     assertQ(req(fieldName + ":thing"), "//*[@numFound='1']");
   }
 
+  public void testPersistUniqueKey() throws Exception {
+    assertSchemaResource(collection, "managed-schema");
+    deleteCore();
+    File managedSchemaFile = new File(tmpConfDir, "managed-schema");
+    assertTrue(managedSchemaFile.delete()); // Delete managed-schema so it won't block parsing a new schema
+    initCore("solrconfig-mutable-managed-schema.xml", "schema-one-field-no-dynamic-field-unique-key.xml", tmpSolrHome.getPath());
+
+    assertTrue(managedSchemaFile.exists());
+    String managedSchemaContents = FileUtils.readFileToString(managedSchemaFile, "UTF-8");
+    assertFalse(managedSchemaContents.contains("\"new_field\""));
+
+    Map<String,Object> options = new HashMap<String,Object>();
+    options.put("stored", "false");
+    IndexSchema oldSchema = h.getCore().getLatestSchema();
+    assertEquals("str", oldSchema.getUniqueKeyField().getName());
+    String fieldName = "new_field";
+    String fieldType = "string";
+    SchemaField newField = oldSchema.newField(fieldName, fieldType, options);
+    IndexSchema newSchema = oldSchema.addField(newField);
+    assertEquals("str", newSchema.getUniqueKeyField().getName());
+    h.getCore().setLatestSchema(newSchema);
+    log.info("####close harness");
+    h.close();
+    log.info("####close harness end");
+    initCore();
+
+    assertTrue(managedSchemaFile.exists());
+    FileInputStream stream = new FileInputStream(managedSchemaFile);
+    managedSchemaContents = IOUtils.toString(stream, "UTF-8");
+    stream.close(); // Explicitly close so that Windows can delete this file
+    assertTrue(managedSchemaContents.contains("<field name=\"new_field\" type=\"string\" stored=\"false\"/>"));
+    IndexSchema newNewSchema = h.getCore().getLatestSchema();
+    assertNotNull(newNewSchema.getUniqueKeyField());
+    assertEquals("str", newNewSchema.getUniqueKeyField().getName());
+  }
 }
diff --git a/solr/core/src/test/org/apache/solr/search/ReturnFieldsTest.java b/solr/core/src/test/org/apache/solr/search/ReturnFieldsTest.java
index 0d73990..87280b4 100644
--- a/solr/core/src/test/org/apache/solr/search/ReturnFieldsTest.java
+++ b/solr/core/src/test/org/apache/solr/search/ReturnFieldsTest.java
@@ -47,6 +47,7 @@
 
   @BeforeClass
   public static void beforeClass() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml", "schema12.xml");
     String v = "how now brown cow";
     assertU(adoc("id","1", "text",v,  "text_np", v, "#foo_s", v));
diff --git a/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java b/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
index bcfdb28..37f2247 100755
--- a/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
+++ b/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
@@ -41,6 +41,7 @@
 
   @BeforeClass
   public static void beforeClass() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml", "schema12.xml");
     index();
   }
diff --git a/solr/core/src/test/org/apache/solr/search/TestFiltering.java b/solr/core/src/test/org/apache/solr/search/TestFiltering.java
index cc71f82..b0a4bd8 100644
--- a/solr/core/src/test/org/apache/solr/search/TestFiltering.java
+++ b/solr/core/src/test/org/apache/solr/search/TestFiltering.java
@@ -31,6 +31,7 @@
 
   @BeforeClass
   public static void beforeTests() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml","schema12.xml");
   }
 
diff --git a/solr/core/src/test/org/apache/solr/search/TestFoldingMultitermQuery.java b/solr/core/src/test/org/apache/solr/search/TestFoldingMultitermQuery.java
index db800fc..08fcfa8 100644
--- a/solr/core/src/test/org/apache/solr/search/TestFoldingMultitermQuery.java
+++ b/solr/core/src/test/org/apache/solr/search/TestFoldingMultitermQuery.java
@@ -46,8 +46,6 @@
           "float_f", num,
           "long_f", num,
           "double_f", num,
-          "byte_f", num,
-          "short_f", num,
           "bool_f", boolVal,
           "date_f", "200" + Integer.toString(i % 10) + "-01-01T00:00:00Z",
           "content", docs[i],
@@ -269,7 +267,7 @@
 
   @Test
   public void testNonTextTypes() {
-    String[] intTypes = {"int_f", "float_f", "long_f", "double_f", "byte_f", "short_f"};
+    String[] intTypes = {"int_f", "float_f", "long_f", "double_f"};
 
     for (String str : intTypes) {
       assertQ(req("q", str + ":" + "0"),
diff --git a/solr/core/src/test/org/apache/solr/search/TestPseudoReturnFields.java b/solr/core/src/test/org/apache/solr/search/TestPseudoReturnFields.java
index be5da9a..f1da7c2 100644
--- a/solr/core/src/test/org/apache/solr/search/TestPseudoReturnFields.java
+++ b/solr/core/src/test/org/apache/solr/search/TestPseudoReturnFields.java
@@ -49,6 +49,7 @@
 
   @BeforeClass
   public static void beforeTests() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml","schema12.xml");
 
 
diff --git a/solr/core/src/test/org/apache/solr/search/TestRecovery.java b/solr/core/src/test/org/apache/solr/search/TestRecovery.java
index 5a8e8c0..4b5ed59 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRecovery.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRecovery.java
@@ -17,6 +17,9 @@
 package org.apache.solr.search;
 
 
+import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
+
+import org.apache.solr.common.SolrException;
 import org.noggit.ObjectBuilder;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.request.SolrQueryRequest;
@@ -41,8 +44,16 @@
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 
-import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
-import static org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.update.DirectUpdateHandler2;
+import org.apache.solr.update.UpdateHandler;
+import org.apache.solr.update.UpdateLog;
+import org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.noggit.ObjectBuilder;
 
 public class TestRecovery extends SolrTestCaseJ4 {
 
@@ -487,7 +498,40 @@
   }
 
 
+
+  // we need to make sure that the log is informed of a core reload
   @Test
+  public void testReload() throws Exception {
+    long version = addAndGetVersion(sdoc("id","reload1") , null);
+
+    h.reload();
+
+    version = addAndGetVersion(sdoc("id","reload1", "_version_", Long.toString(version)), null);
+
+    assertU(commit());
+
+    // if we try the optimistic concurrency again, the tlog lookup maps should be clear
+    // and we should go to the index to check the version.  This indirectly tests that
+    // the update log was informed of the reload.  See SOLR-4858
+
+    version = addAndGetVersion(sdoc("id","reload1", "_version_", Long.toString(version)), null);
+
+    // a deleteByQuery currently forces open a new realtime reader via the update log.
+    // This also tests that the update log was informed of the new udpate handler.
+
+    deleteByQueryAndGetVersion("foo_t:hownowbrowncow", null);
+
+    version = addAndGetVersion(sdoc("id","reload1", "_version_", Long.toString(version)), null);
+
+    // if the update log was not informed of the new update handler, then the old core will
+    // incorrectly be used for some of the operations above and opened searchers
+    // will never be closed.  This used to cause the test framework to fail because of unclosed directory checks.
+    // SolrCore.openNewSearcher was modified to throw an error if the core is closed, resulting in
+    // a faster fail.
+  }
+
+
+    @Test
   public void testBufferingFlags() throws Exception {
 
     DirectUpdateHandler2.commitOnClose = false;
@@ -710,16 +754,17 @@
       clearIndex();
       assertU(commit());
 
-      File logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+      UpdateLog ulog = h.getCore().getUpdateHandler().getUpdateLog();
+      File logDir = new File(h.getCore().getUpdateHandler().getUpdateLog().getLogDir());
 
       h.close();
 
-      String[] files = UpdateLog.getLogList(logDir);
+      String[] files = ulog.getLogList(logDir);
       for (String file : files) {
         new File(logDir, file).delete();
       }
 
-      assertEquals(0, UpdateLog.getLogList(logDir).length);
+      assertEquals(0, ulog.getLogList(logDir).length);
 
       createCore();
 
@@ -737,7 +782,7 @@
       assertU(commit());
       assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
 
-      assertEquals(2, UpdateLog.getLogList(logDir).length);
+      assertEquals(2, ulog.getLogList(logDir).length);
 
       addDocs(105, start, versions);  start+=105;
       assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
@@ -745,7 +790,7 @@
       assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
 
       // previous two logs should be gone now
-      assertEquals(1, UpdateLog.getLogList(logDir).length);
+      assertEquals(1, ulog.getLogList(logDir).length);
 
       addDocs(1, start, versions);  start+=1;
       h.close();
@@ -765,14 +810,14 @@
       assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
 
       // previous logs should be gone now
-      assertEquals(1, UpdateLog.getLogList(logDir).length);
+      assertEquals(1, ulog.getLogList(logDir).length);
 
       //
       // test that a corrupt tlog file doesn't stop us from coming up, or seeing versions before that tlog file.
       //
       addDocs(1, start, new LinkedList<Long>()); // don't add this to the versions list because we are going to lose it...
       h.close();
-      files = UpdateLog.getLogList(logDir);
+      files = ulog.getLogList(logDir);
       Arrays.sort(files);
       RandomAccessFile raf = new RandomAccessFile(new File(logDir, files[files.length-1]), "rw");
       raf.writeChars("This is a trashed log file that really shouldn't work at all, but we'll see...");
@@ -820,7 +865,8 @@
         }
       };
 
-      File logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+      UpdateLog ulog = h.getCore().getUpdateHandler().getUpdateLog();
+      File logDir = new File(h.getCore().getUpdateHandler().getUpdateLog().getLogDir());
 
       clearIndex();
       assertU(commit());
@@ -830,7 +876,7 @@
       assertU(adoc("id","F3"));
 
       h.close();
-      String[] files = UpdateLog.getLogList(logDir);
+      String[] files = ulog.getLogList(logDir);
       Arrays.sort(files);
       RandomAccessFile raf = new RandomAccessFile(new File(logDir, files[files.length-1]), "rw");
       raf.seek(raf.length());  // seek to end
@@ -874,7 +920,8 @@
     try {
       DirectUpdateHandler2.commitOnClose = false;
 
-      File logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+      UpdateLog ulog = h.getCore().getUpdateHandler().getUpdateLog();
+      File logDir = new File(h.getCore().getUpdateHandler().getUpdateLog().getLogDir());
 
       clearIndex();
       assertU(commit());
@@ -886,7 +933,7 @@
       h.close();
 
 
-      String[] files = UpdateLog.getLogList(logDir);
+      String[] files = ulog.getLogList(logDir);
       Arrays.sort(files);
       RandomAccessFile raf = new RandomAccessFile(new File(logDir, files[files.length-1]), "rw");
       long len = raf.length();
@@ -957,7 +1004,8 @@
         }
       };
 
-      File logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+      UpdateLog ulog = h.getCore().getUpdateHandler().getUpdateLog();
+      File logDir = new File(h.getCore().getUpdateHandler().getUpdateLog().getLogDir());
 
       clearIndex();
       assertU(commit());
@@ -967,7 +1015,7 @@
       assertU(adoc("id","CCCCCC"));
 
       h.close();
-      String[] files = UpdateLog.getLogList(logDir);
+      String[] files = ulog.getLogList(logDir);
       Arrays.sort(files);
       String fname = files[files.length-1];
       RandomAccessFile raf = new RandomAccessFile(new File(logDir, fname), "rw");
@@ -1037,17 +1085,18 @@
 
   // stops the core, removes the transaction logs, restarts the core.
   void deleteLogs() throws Exception {
-    File logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+    UpdateLog ulog = h.getCore().getUpdateHandler().getUpdateLog();
+    File logDir = new File(h.getCore().getUpdateHandler().getUpdateLog().getLogDir());
 
     h.close();
 
     try {
-      String[] files = UpdateLog.getLogList(logDir);
+      String[] files = ulog.getLogList(logDir);
       for (String file : files) {
         new File(logDir, file).delete();
       }
 
-      assertEquals(0, UpdateLog.getLogList(logDir).length);
+      assertEquals(0, ulog.getLogList(logDir).length);
     } finally {
       // make sure we create the core again, even if the assert fails so it won't mess
       // up the next test.
diff --git a/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java b/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java
new file mode 100644
index 0000000..a786702
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java
@@ -0,0 +1,1176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.search;
+
+
+import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayDeque;
+import java.util.Arrays;
+import java.util.Deque;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.Future;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.cloud.hdfs.HdfsBasicDistributedZk2Test;
+import org.apache.solr.cloud.hdfs.HdfsTestUtil;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.update.DirectUpdateHandler2;
+import org.apache.solr.update.HdfsUpdateLog;
+import org.apache.solr.update.UpdateHandler;
+import org.apache.solr.update.UpdateLog;
+import org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.noggit.ObjectBuilder;
+
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+
+@ThreadLeakScope(Scope.NONE) // hdfs mini cluster currently leaks threads
+@ThreadLeakLingering(linger = 0)
+// TODO: longer term this should be combined with TestRecovery somehow
+public class TestRecoveryHdfs extends SolrTestCaseJ4 {
+
+  // means that we've seen the leader and have version info (i.e. we are a non-leader replica)
+  private static String FROM_LEADER = DistribPhase.FROMLEADER.toString(); 
+
+  private static int timeout=60;  // acquire timeout in seconds.  change this to a huge number when debugging to prevent threads from advancing.
+  
+  private static MiniDFSCluster dfsCluster;
+
+  private static String hdfsUri;
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    dfsCluster = HdfsTestUtil.setupClass(new File(TEMP_DIR,
+        HdfsBasicDistributedZk2Test.class.getName() + "_"
+            + System.currentTimeMillis()).getAbsolutePath());
+    hdfsUri = dfsCluster.getFileSystem().getUri().toString();
+    
+    hdfsDataDir = hdfsUri + "/solr/shard1";
+    System.setProperty("solr.data.dir", hdfsUri + "/solr/shard1");
+    System.setProperty("solr.ulog.dir", hdfsUri + "/solr/shard1");
+    
+    initCore("solrconfig-tlog.xml","schema15.xml");
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    System.clearProperty("solr.ulog.dir");
+    System.clearProperty("solr.data.dir");
+    System.clearProperty("test.build.data");
+    System.clearProperty("test.cache.data");
+    hdfsDataDir = null;
+    dfsCluster = null;
+    HdfsTestUtil.teardownClass(dfsCluster);
+    FileSystem.closeAll();
+  }
+
+  // since we make up fake versions in these tests, we can get messed up by a DBQ with a real version
+  // since Solr can think following updates were reordered.
+  @Override
+  public void clearIndex() {
+    try {
+      deleteByQueryAndGetVersion("*:*", params("_version_", Long.toString(-Long.MAX_VALUE), DISTRIB_UPDATE_PARAM,FROM_LEADER));
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+
+  @Test
+  public void testLogReplay() throws Exception {
+    try {
+
+      DirectUpdateHandler2.commitOnClose = false;
+      final Semaphore logReplay = new Semaphore(0);
+      final Semaphore logReplayFinish = new Semaphore(0);
+
+      UpdateLog.testing_logReplayHook = new Runnable() {
+        @Override
+        public void run() {
+          try {
+            assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+          } catch (Exception e) {
+            throw new RuntimeException(e);
+          }
+        }
+      };
+
+      UpdateLog.testing_logReplayFinishHook = new Runnable() {
+        @Override
+        public void run() {
+          logReplayFinish.release();
+        }
+      };
+
+
+      clearIndex();
+      assertU(commit());
+
+      Deque<Long> versions = new ArrayDeque<Long>();
+      versions.addFirst(addAndGetVersion(sdoc("id", "A1"), null));
+      versions.addFirst(addAndGetVersion(sdoc("id", "A11"), null));
+      versions.addFirst(addAndGetVersion(sdoc("id", "A12"), null));
+      versions.addFirst(deleteByQueryAndGetVersion("id:A11", null));
+      versions.addFirst(addAndGetVersion(sdoc("id", "A13"), null));
+
+      assertJQ(req("q","*:*"),"/response/numFound==0");
+
+      assertJQ(req("qt","/get", "getVersions",""+versions.size()) ,"/versions==" + versions);
+
+      h.close();
+      createCore();
+      // Solr should kick this off now
+      // h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog();
+
+      // verify that previous close didn't do a commit
+      // recovery should be blocked by our hook
+      assertJQ(req("q","*:*") ,"/response/numFound==0");
+
+      // make sure we can still access versions after a restart
+      assertJQ(req("qt","/get", "getVersions",""+versions.size()),"/versions==" + versions);
+
+      // unblock recovery
+      logReplay.release(1000);
+
+      // make sure we can still access versions during recovery
+      assertJQ(req("qt","/get", "getVersions",""+versions.size()),"/versions==" + versions);
+
+      // wait until recovery has finished
+      assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
+
+      assertJQ(req("q","*:*") ,"/response/numFound==3");
+
+      // make sure we can still access versions after recovery
+      assertJQ(req("qt","/get", "getVersions",""+versions.size()) ,"/versions==" + versions);
+
+      assertU(adoc("id","A2"));
+      assertU(adoc("id","A3"));
+      assertU(delI("A2"));
+      assertU(adoc("id","A4"));
+
+      assertJQ(req("q","*:*") ,"/response/numFound==3");
+
+      h.close();
+      createCore();
+      // Solr should kick this off now
+      // h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog();
+
+      // wait until recovery has finished
+      assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
+      assertJQ(req("q","*:*") ,"/response/numFound==5");
+      assertJQ(req("q","id:A2") ,"/response/numFound==0");
+
+      // no updates, so insure that recovery does not run
+      h.close();
+      int permits = logReplay.availablePermits();
+      createCore();
+      // Solr should kick this off now
+      // h.getCore().getUpdateHandler().getUpdateLog().recoverFromLog();
+
+      assertJQ(req("q","*:*") ,"/response/numFound==5");
+      Thread.sleep(100);
+      assertEquals(permits, logReplay.availablePermits()); // no updates, so insure that recovery didn't run
+
+      assertEquals(UpdateLog.State.ACTIVE, h.getCore().getUpdateHandler().getUpdateLog().getState());
+
+    } finally {
+      DirectUpdateHandler2.commitOnClose = true;
+      UpdateLog.testing_logReplayHook = null;
+      UpdateLog.testing_logReplayFinishHook = null;
+    }
+
+  }
+
+  @Test
+  public void testBuffering() throws Exception {
+
+    DirectUpdateHandler2.commitOnClose = false;
+    final Semaphore logReplay = new Semaphore(0);
+    final Semaphore logReplayFinish = new Semaphore(0);
+
+    UpdateLog.testing_logReplayHook = new Runnable() {
+      @Override
+      public void run() {
+        try {
+          assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      }
+    };
+
+    UpdateLog.testing_logReplayFinishHook = new Runnable() {
+      @Override
+      public void run() {
+        logReplayFinish.release();
+      }
+    };
+
+
+    SolrQueryRequest req = req();
+    UpdateHandler uhandler = req.getCore().getUpdateHandler();
+    UpdateLog ulog = uhandler.getUpdateLog();
+
+    try {
+      clearIndex();
+      assertU(commit());
+
+      assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
+      ulog.bufferUpdates();
+      assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
+      Future<UpdateLog.RecoveryInfo> rinfoFuture = ulog.applyBufferedUpdates();
+      assertTrue(rinfoFuture == null);
+      assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
+
+      ulog.bufferUpdates();
+      assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
+
+      // simulate updates from a leader
+      updateJ(jsonAdd(sdoc("id","B1", "_version_","1010")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","B11", "_version_","1015")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonDelQ("id:B1 id:B11 id:B2 id:B3"), params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_","-1017"));
+      updateJ(jsonAdd(sdoc("id","B2", "_version_","1020")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","B3", "_version_","1030")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      deleteAndGetVersion("B1", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_","-2010"));
+
+      assertJQ(req("qt","/get", "getVersions","6")
+          ,"=={'versions':[-2010,1030,1020,-1017,1015,1010]}"
+      );
+
+      assertU(commit());
+
+      assertJQ(req("qt","/get", "getVersions","6")
+          ,"=={'versions':[-2010,1030,1020,-1017,1015,1010]}"
+      );
+
+      // updates should be buffered, so we should not see any results yet.
+      assertJQ(req("q", "*:*")
+          , "/response/numFound==0"
+      );
+
+      // real-time get should also not show anything (this could change in the future,
+      // but it's currently used for validating version numbers too, so it would
+      // be bad for updates to be visible if we're just buffering.
+      assertJQ(req("qt","/get", "id","B3")
+          ,"=={'doc':null}"
+      );
+
+
+      rinfoFuture = ulog.applyBufferedUpdates();
+      assertTrue(rinfoFuture != null);
+
+      assertEquals(UpdateLog.State.APPLYING_BUFFERED, ulog.getState());
+
+      logReplay.release(1000);
+
+      UpdateLog.RecoveryInfo rinfo = rinfoFuture.get();
+      assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
+
+
+      assertJQ(req("qt","/get", "getVersions","6")
+          ,"=={'versions':[-2010,1030,1020,-1017,1015,1010]}"
+      );
+
+
+      assertJQ(req("q", "*:*")
+          , "/response/numFound==2"
+      );
+
+      // move back to recovering
+      ulog.bufferUpdates();
+      assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
+
+      Long ver = getVer(req("qt","/get", "id","B3"));
+      assertEquals(1030L, ver.longValue());
+
+      // add a reordered doc that shouldn't overwrite one in the index
+      updateJ(jsonAdd(sdoc("id","B3", "_version_","3")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+
+      // reorder two buffered updates
+      updateJ(jsonAdd(sdoc("id","B4", "_version_","1040")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      deleteAndGetVersion("B4", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_","-940"));   // this update should not take affect
+      updateJ(jsonAdd(sdoc("id","B6", "_version_","1060")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","B5", "_version_","1050")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","B8", "_version_","1080")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+
+      // test that delete by query is at least buffered along with everything else so it will delete the
+      // currently buffered id:8 (even if it doesn't currently support versioning)
+      updateJ("{\"delete\": { \"query\":\"id:B2 OR id:B8\" }}", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_","-3000"));
+
+      assertJQ(req("qt","/get", "getVersions","13")
+          ,"=={'versions':[-3000,1080,1050,1060,-940,1040,3,-2010,1030,1020,-1017,1015,1010]}"  // the "3" appears because versions aren't checked while buffering
+      );
+
+      logReplay.drainPermits();
+      rinfoFuture = ulog.applyBufferedUpdates();
+      assertTrue(rinfoFuture != null);
+      assertEquals(UpdateLog.State.APPLYING_BUFFERED, ulog.getState());
+
+      // apply a single update
+      logReplay.release(1);
+
+      // now add another update
+      updateJ(jsonAdd(sdoc("id","B7", "_version_","1070")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+
+      // a reordered update that should be dropped
+      deleteAndGetVersion("B5", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_","-950"));
+
+      deleteAndGetVersion("B6", params(DISTRIB_UPDATE_PARAM,FROM_LEADER, "_version_","-2060"));
+
+      logReplay.release(1000);
+      UpdateLog.RecoveryInfo recInfo = rinfoFuture.get();
+
+      assertJQ(req("q", "*:*", "sort","id asc", "fl","id,_version_")
+          , "/response/docs==["
+                           + "{'id':'B3','_version_':1030}"
+                           + ",{'id':'B4','_version_':1040}"
+                           + ",{'id':'B5','_version_':1050}"
+                           + ",{'id':'B7','_version_':1070}"
+                           +"]"
+      );
+
+      assertEquals(1, recInfo.deleteByQuery);
+
+      assertEquals(UpdateLog.State.ACTIVE, ulog.getState()); // leave each test method in a good state
+    } finally {
+      DirectUpdateHandler2.commitOnClose = true;
+      UpdateLog.testing_logReplayHook = null;
+      UpdateLog.testing_logReplayFinishHook = null;
+
+      req().close();
+    }
+
+  }
+
+
+  @Test
+  @Ignore("HDFS-3107: no truncate support yet")
+  public void testDropBuffered() throws Exception {
+
+    DirectUpdateHandler2.commitOnClose = false;
+    final Semaphore logReplay = new Semaphore(0);
+    final Semaphore logReplayFinish = new Semaphore(0);
+
+    UpdateLog.testing_logReplayHook = new Runnable() {
+      @Override
+      public void run() {
+        try {
+          assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      }
+    };
+
+    UpdateLog.testing_logReplayFinishHook = new Runnable() {
+      @Override
+      public void run() {
+        logReplayFinish.release();
+      }
+    };
+
+
+    SolrQueryRequest req = req();
+    UpdateHandler uhandler = req.getCore().getUpdateHandler();
+    UpdateLog ulog = uhandler.getUpdateLog();
+
+    try {
+      clearIndex();
+      assertU(commit());
+
+      assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
+      ulog.bufferUpdates();
+      assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
+      Future<UpdateLog.RecoveryInfo> rinfoFuture = ulog.applyBufferedUpdates();
+      assertTrue(rinfoFuture == null);
+      assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
+
+      ulog.bufferUpdates();
+      assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
+
+      // simulate updates from a leader
+      updateJ(jsonAdd(sdoc("id","C1", "_version_","101")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","C2", "_version_","102")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","C3", "_version_","103")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+
+      assertTrue(ulog.dropBufferedUpdates());
+      ulog.bufferUpdates();
+      updateJ(jsonAdd(sdoc("id", "C4", "_version_","104")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id", "C5", "_version_","105")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+
+      logReplay.release(1000);
+      rinfoFuture = ulog.applyBufferedUpdates();
+      UpdateLog.RecoveryInfo rinfo = rinfoFuture.get();
+      assertEquals(2, rinfo.adds);
+
+      assertJQ(req("qt","/get", "getVersions","2")
+          ,"=={'versions':[105,104]}"
+      );
+
+      // this time add some docs first before buffering starts (so tlog won't be at pos 0)
+      updateJ(jsonAdd(sdoc("id","C100", "_version_","200")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","C101", "_version_","201")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+
+      ulog.bufferUpdates();
+      updateJ(jsonAdd(sdoc("id","C103", "_version_","203")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","C104", "_version_","204")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+
+      assertTrue(ulog.dropBufferedUpdates());
+      ulog.bufferUpdates();
+      updateJ(jsonAdd(sdoc("id","C105", "_version_","205")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","C106", "_version_","206")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+
+      rinfoFuture = ulog.applyBufferedUpdates();
+      rinfo = rinfoFuture.get();
+      assertEquals(2, rinfo.adds);
+
+      assertJQ(req("q", "*:*", "sort","_version_ asc", "fl","id,_version_")
+          , "/response/docs==["
+          + "{'id':'C4','_version_':104}"
+          + ",{'id':'C5','_version_':105}"
+          + ",{'id':'C100','_version_':200}"
+          + ",{'id':'C101','_version_':201}"
+          + ",{'id':'C105','_version_':205}"
+          + ",{'id':'C106','_version_':206}"
+          +"]"
+      );
+
+      assertJQ(req("qt","/get", "getVersions","6")
+          ,"=={'versions':[206,205,201,200,105,104]}"
+      );
+
+      ulog.bufferUpdates();
+      assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
+      updateJ(jsonAdd(sdoc("id","C301", "_version_","998")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","C302", "_version_","999")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      assertTrue(ulog.dropBufferedUpdates());
+
+      // make sure we can overwrite with a lower version
+      // TODO: is this functionality needed?
+      updateJ(jsonAdd(sdoc("id","C301", "_version_","301")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","C302", "_version_","302")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+
+      assertU(commit());
+
+      assertJQ(req("qt","/get", "getVersions","2")
+          ,"=={'versions':[302,301]}"
+      );
+
+      assertJQ(req("q", "*:*", "sort","_version_ desc", "fl","id,_version_", "rows","2")
+          , "/response/docs==["
+          + "{'id':'C302','_version_':302}"
+          + ",{'id':'C301','_version_':301}"
+          +"]"
+      );
+
+
+      updateJ(jsonAdd(sdoc("id","C2", "_version_","302")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+
+
+
+
+      assertEquals(UpdateLog.State.ACTIVE, ulog.getState()); // leave each test method in a good state
+    } finally {
+      DirectUpdateHandler2.commitOnClose = true;
+      UpdateLog.testing_logReplayHook = null;
+      UpdateLog.testing_logReplayFinishHook = null;
+
+      req().close();
+    }
+
+  }
+
+
+  @Test
+  public void testBufferingFlags() throws Exception {
+
+    DirectUpdateHandler2.commitOnClose = false;
+    final Semaphore logReplayFinish = new Semaphore(0);
+
+    UpdateLog.testing_logReplayFinishHook = new Runnable() {
+      @Override
+      public void run() {
+        logReplayFinish.release();
+      }
+    };
+
+
+    SolrQueryRequest req = req();
+    UpdateHandler uhandler = req.getCore().getUpdateHandler();
+    UpdateLog ulog = uhandler.getUpdateLog();
+
+    try {
+      clearIndex();
+      assertU(commit());
+
+      assertEquals(UpdateLog.State.ACTIVE, ulog.getState());
+      ulog.bufferUpdates();
+
+      // simulate updates from a leader
+      updateJ(jsonAdd(sdoc("id","Q1", "_version_","101")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","Q2", "_version_","102")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","Q3", "_version_","103")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      assertEquals(UpdateLog.State.BUFFERING, ulog.getState());
+
+      req.close();
+      h.close();
+      createCore();
+
+      req = req();
+      uhandler = req.getCore().getUpdateHandler();
+      ulog = uhandler.getUpdateLog();
+
+      logReplayFinish.acquire();  // wait for replay to finish
+
+      assertTrue((ulog.getStartingOperation() & UpdateLog.FLAG_GAP) != 0);   // since we died while buffering, we should see this last
+
+      //
+      // Try again to ensure that the previous log replay didn't wipe out our flags
+      //
+
+      req.close();
+      h.close();
+      createCore();
+
+      req = req();
+      uhandler = req.getCore().getUpdateHandler();
+      ulog = uhandler.getUpdateLog();
+
+      assertTrue((ulog.getStartingOperation() & UpdateLog.FLAG_GAP) != 0);
+
+      // now do some normal non-buffered adds
+      updateJ(jsonAdd(sdoc("id","Q4", "_version_","114")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","Q5", "_version_","115")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","Q6", "_version_","116")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      assertU(commit());
+
+      req.close();
+      h.close();
+      createCore();
+
+      req = req();
+      uhandler = req.getCore().getUpdateHandler();
+      ulog = uhandler.getUpdateLog();
+
+      assertTrue((ulog.getStartingOperation() & UpdateLog.FLAG_GAP) == 0);
+
+      ulog.bufferUpdates();
+      // simulate receiving no updates
+      ulog.applyBufferedUpdates();
+      updateJ(jsonAdd(sdoc("id","Q7", "_version_","117")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER)); // do another add to make sure flags are back to normal
+
+      req.close();
+      h.close();
+      createCore();
+
+      req = req();
+      uhandler = req.getCore().getUpdateHandler();
+      ulog = uhandler.getUpdateLog();
+
+      assertTrue((ulog.getStartingOperation() & UpdateLog.FLAG_GAP) == 0); // check flags on Q7
+
+      logReplayFinish.acquire();
+      assertEquals(UpdateLog.State.ACTIVE, ulog.getState()); // leave each test method in a good state
+    } finally {
+      DirectUpdateHandler2.commitOnClose = true;
+      UpdateLog.testing_logReplayHook = null;
+      UpdateLog.testing_logReplayFinishHook = null;
+
+      req().close();
+    }
+
+  }
+
+
+
+  // make sure that on a restart, versions don't start too low
+  @Test
+  public void testVersionsOnRestart() throws Exception {
+    clearIndex();
+    assertU(commit());
+
+    assertU(adoc("id","D1", "val_i","1"));
+    assertU(adoc("id","D2", "val_i","1"));
+    assertU(commit());
+    long v1 = getVer(req("q","id:D1"));
+    long v1a = getVer(req("q","id:D2"));
+
+    h.close();
+    createCore();
+
+    assertU(adoc("id","D1", "val_i","2"));
+    assertU(commit());
+    long v2 = getVer(req("q","id:D1"));
+
+    assert(v2 > v1);
+
+    assertJQ(req("qt","/get", "getVersions","2")
+        ,"/versions==[" + v2 + "," + v1a + "]"
+    );
+
+  }
+
+  // make sure that log isn't needlessly replayed after a clean shutdown
+  @Test
+  public void testCleanShutdown() throws Exception {
+    DirectUpdateHandler2.commitOnClose = true;
+    final Semaphore logReplay = new Semaphore(0);
+    final Semaphore logReplayFinish = new Semaphore(0);
+
+    UpdateLog.testing_logReplayHook = new Runnable() {
+      @Override
+      public void run() {
+        try {
+          assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      }
+    };
+
+    UpdateLog.testing_logReplayFinishHook = new Runnable() {
+      @Override
+      public void run() {
+        logReplayFinish.release();
+      }
+    };
+
+
+    SolrQueryRequest req = req();
+    UpdateHandler uhandler = req.getCore().getUpdateHandler();
+    UpdateLog ulog = uhandler.getUpdateLog();
+
+    try {
+      clearIndex();
+      assertU(commit());
+
+      assertU(adoc("id","E1", "val_i","1"));
+      assertU(adoc("id","E2", "val_i","1"));
+
+      // set to a high enough number so this test won't hang on a bug
+      logReplay.release(10);
+
+      h.close();
+      createCore();
+
+      // make sure the docs got committed
+      assertJQ(req("q","*:*"),"/response/numFound==2");
+
+      // make sure no replay happened
+      assertEquals(10, logReplay.availablePermits());
+
+    } finally {
+      DirectUpdateHandler2.commitOnClose = true;
+      UpdateLog.testing_logReplayHook = null;
+      UpdateLog.testing_logReplayFinishHook = null;
+
+      req().close();
+    }
+  }
+  
+  
+  private void addDocs(int nDocs, int start, LinkedList<Long> versions) throws Exception {
+    for (int i=0; i<nDocs; i++) {
+      versions.addFirst( addAndGetVersion( sdoc("id",Integer.toString(start + nDocs)) , null) );
+    }
+  }
+
+  @Test
+  public void testRemoveOldLogs() throws Exception {
+    try {
+      DirectUpdateHandler2.commitOnClose = false;
+      final Semaphore logReplay = new Semaphore(0);
+      final Semaphore logReplayFinish = new Semaphore(0);
+
+      UpdateLog.testing_logReplayHook = new Runnable() {
+        @Override
+        public void run() {
+          try {
+            assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+          } catch (Exception e) {
+            throw new RuntimeException(e);
+          }
+        }
+      };
+
+      UpdateLog.testing_logReplayFinishHook = new Runnable() {
+        @Override
+        public void run() {
+          logReplayFinish.release();
+        }
+      };
+
+
+      clearIndex();
+      assertU(commit());
+
+      String logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+ 
+      
+      Configuration conf = new Configuration();
+      conf.setBoolean("fs.hdfs.impl.disable.cache", true);
+      FileSystem fs;
+      try {
+        URI uri = new URI(hdfsUri);
+        fs = FileSystem.newInstance(uri, conf);
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      } catch (URISyntaxException e) {
+        throw new RuntimeException(e);
+      }
+      
+      h.close();
+
+      String[] files = HdfsUpdateLog.getLogList(fs, new Path(logDir));
+      for (String file : files) {
+        fs.delete(new Path(logDir, file), false);
+      }
+
+      assertEquals(0, HdfsUpdateLog.getLogList(fs, new Path(logDir)).length);
+
+      createCore();
+
+      int start = 0;
+      int maxReq = 50;
+
+      LinkedList<Long> versions = new LinkedList<Long>();
+      addDocs(10, start, versions); start+=10;
+      assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
+      assertU(commit());
+      assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
+
+      addDocs(10, start, versions);  start+=10;
+      assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
+      assertU(commit());
+      assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
+
+      assertEquals(2, HdfsUpdateLog.getLogList(fs, new Path(logDir)).length);
+
+      addDocs(105, start, versions);  start+=105;
+      assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
+      assertU(commit());
+      assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
+
+      // previous two logs should be gone now
+      assertEquals(1, HdfsUpdateLog.getLogList(fs, new Path(logDir)).length);
+
+      addDocs(1, start, versions);  start+=1;
+      h.close();
+      createCore();      // trigger recovery, make sure that tlog reference handling is correct
+
+      // test we can get versions while replay is happening
+      assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
+
+      logReplay.release(1000);
+      assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
+
+      assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
+
+      addDocs(105, start, versions);  start+=105;
+      assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
+      assertU(commit());
+      assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start)));
+
+      // previous logs should be gone now
+      assertEquals(1, HdfsUpdateLog.getLogList(fs, new Path(logDir)).length);
+
+      //
+      // test that a corrupt tlog file doesn't stop us from coming up, or seeing versions before that tlog file.
+      //
+      addDocs(1, start, new LinkedList<Long>()); // don't add this to the versions list because we are going to lose it...
+      h.close();
+      files = HdfsUpdateLog.getLogList(fs, new Path(logDir));;
+      Arrays.sort(files);
+
+      FSDataOutputStream dos = fs.create(new Path(new Path(logDir), files[files.length-1]), (short)1);
+      dos.writeUTF("This is a trashed log file that really shouldn't work at all, but we'll see..");
+      dos.close();
+
+      ignoreException("Failure to open existing");
+      createCore();
+      // we should still be able to get the list of versions (not including the trashed log file)
+      assertJQ(req("qt", "/get", "getVersions", "" + maxReq), "/versions==" + versions.subList(0, Math.min(maxReq, start)));
+      resetExceptionIgnores();
+
+    } finally {
+      DirectUpdateHandler2.commitOnClose = true;
+      UpdateLog.testing_logReplayHook = null;
+      UpdateLog.testing_logReplayFinishHook = null;
+    }
+  }
+
+  //
+  // test that a partially written last tlog entry (that will cause problems for both reverse reading and for
+  // log replay) doesn't stop us from coming up, and from recovering the documents that were not cut off.
+  //
+
+  @Test
+  public void testTruncatedLog() throws Exception {
+    try {
+      DirectUpdateHandler2.commitOnClose = false;
+      final Semaphore logReplay = new Semaphore(0);
+      final Semaphore logReplayFinish = new Semaphore(0);
+
+      UpdateLog.testing_logReplayHook = new Runnable() {
+        @Override
+        public void run() {
+          try {
+            assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+          } catch (Exception e) {
+            throw new RuntimeException(e);
+          }
+        }
+      };
+
+      UpdateLog.testing_logReplayFinishHook = new Runnable() {
+        @Override
+        public void run() {
+          logReplayFinish.release();
+        }
+      };
+
+      String logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+
+      clearIndex();
+      assertU(commit());
+
+      assertU(adoc("id","F1"));
+      assertU(adoc("id","F2"));
+      assertU(adoc("id","F3"));
+
+      Configuration conf = new Configuration();
+      conf.setBoolean("fs.hdfs.impl.disable.cache", true);
+      FileSystem fs;
+      try {
+        URI uri = new URI(hdfsUri);
+        fs = FileSystem.newInstance(uri, conf);
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      } catch (URISyntaxException e) {
+        throw new RuntimeException(e);
+      }
+      
+      h.close();
+      
+
+      
+      String[] files = HdfsUpdateLog.getLogList(fs, new Path(logDir));
+      Arrays.sort(files);
+
+      FSDataOutputStream dos = fs.append(new Path(logDir, files[files.length-1]));
+    
+      dos.writeLong(0xffffffffffffffffL);
+      dos.writeChars("This should be appended to a good log file, representing a bad partially written record.");
+      dos.close();
+
+      logReplay.release(1000);
+      logReplayFinish.drainPermits();
+      ignoreException("OutOfBoundsException");  // this is what the corrupted log currently produces... subject to change.
+      createCore();
+      assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
+      resetExceptionIgnores();
+      assertJQ(req("q","*:*") ,"/response/numFound==3");
+
+      //
+      // Now test that the bad log file doesn't mess up retrieving latest versions
+      //
+
+      updateJ(jsonAdd(sdoc("id","F4", "_version_","104")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","F5", "_version_","105")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","F6", "_version_","106")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+
+      // This currently skips the bad log file and also returns the version of the clearIndex (del *:*)
+      // assertJQ(req("qt","/get", "getVersions","6"), "/versions==[106,105,104]");
+      assertJQ(req("qt","/get", "getVersions","3"), "/versions==[106,105,104]");
+
+    } finally {
+      DirectUpdateHandler2.commitOnClose = true;
+      UpdateLog.testing_logReplayHook = null;
+      UpdateLog.testing_logReplayFinishHook = null;
+    }
+  }
+
+
+  //
+  // test that a corrupt tlog doesn't stop us from coming up
+  //
+  @Test
+  public void testCorruptLog() throws Exception {
+    try {
+      DirectUpdateHandler2.commitOnClose = false;
+
+      String logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+ 
+      clearIndex();
+      assertU(commit());
+
+      assertU(adoc("id","G1"));
+      assertU(adoc("id","G2"));
+      assertU(adoc("id","G3"));
+
+      h.close();
+      Configuration conf = new Configuration();
+      conf.setBoolean("fs.hdfs.impl.disable.cache", true);
+      FileSystem fs;
+      try {
+        URI uri = new URI(hdfsUri);
+        fs = FileSystem.newInstance(uri, conf);
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      } catch (URISyntaxException e) {
+        throw new RuntimeException(e);
+      }
+
+      String[] files = HdfsUpdateLog.getLogList(fs, new Path(logDir));
+      Arrays.sort(files);
+
+      FSDataOutputStream dos = fs.create(new Path(logDir, files[files.length-1]), (short)1);
+      dos.write(new byte[(int)800]);  // zero out file
+      dos.close();
+
+
+      ignoreException("Failure to open existing log file");  // this is what the corrupted log currently produces... subject to change.
+      createCore();
+      resetExceptionIgnores();
+
+      // just make sure it responds
+      assertJQ(req("q","*:*") ,"/response/numFound==0");
+
+      //
+      // Now test that the bad log file doesn't mess up retrieving latest versions
+      //
+
+      updateJ(jsonAdd(sdoc("id","G4", "_version_","104")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","G5", "_version_","105")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+      updateJ(jsonAdd(sdoc("id","G6", "_version_","106")), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+
+      // This currently skips the bad log file and also returns the version of the clearIndex (del *:*)
+      // assertJQ(req("qt","/get", "getVersions","6"), "/versions==[106,105,104]");
+      assertJQ(req("qt","/get", "getVersions","3"), "/versions==[106,105,104]");
+
+      assertU(commit());
+
+      assertJQ(req("q","*:*") ,"/response/numFound==3");
+
+      // This messes up some other tests (on windows) if we don't remove the bad log.
+      // This *should* hopefully just be because the tests are too fragile and not because of real bugs - but it should be investigated further.
+      deleteLogs();
+
+    } finally {
+      DirectUpdateHandler2.commitOnClose = true;
+      UpdateLog.testing_logReplayHook = null;
+      UpdateLog.testing_logReplayFinishHook = null;
+    }
+  }
+
+
+
+  // in rare circumstances, two logs can be left uncapped (lacking a commit at the end signifying that all the content in the log was committed)
+  @Test
+  public void testRecoveryMultipleLogs() throws Exception {
+    try {
+      DirectUpdateHandler2.commitOnClose = false;
+      final Semaphore logReplay = new Semaphore(0);
+      final Semaphore logReplayFinish = new Semaphore(0);
+
+      UpdateLog.testing_logReplayHook = new Runnable() {
+        @Override
+        public void run() {
+          try {
+            assertTrue(logReplay.tryAcquire(timeout, TimeUnit.SECONDS));
+          } catch (Exception e) {
+            throw new RuntimeException(e);
+          }
+        }
+      };
+
+      UpdateLog.testing_logReplayFinishHook = new Runnable() {
+        @Override
+        public void run() {
+          logReplayFinish.release();
+        }
+      };
+
+      String logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+      Configuration conf = new Configuration();
+      conf.setBoolean("fs.hdfs.impl.disable.cache", true);
+      FileSystem fs;
+      try {
+        URI uri = new URI(hdfsUri);
+        fs = FileSystem.newInstance(uri, conf);
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      } catch (URISyntaxException e) {
+        throw new RuntimeException(e);
+      }
+      clearIndex();
+      assertU(commit());
+
+      assertU(adoc("id","AAAAAA"));
+      assertU(adoc("id","BBBBBB"));
+      assertU(adoc("id","CCCCCC"));
+
+      h.close();
+      String[] files = HdfsUpdateLog.getLogList(fs, new Path(logDir));
+      Arrays.sort(files);
+      String fname = files[files.length-1];
+
+      FSDataOutputStream dos = fs.append(new Path(logDir, files[files.length-1]));
+      dos.writeLong(0xffffffffffffffffL);
+      dos.writeChars("This should be appended to a good log file, representing a bad partially written record.");
+      dos.close();
+      
+      FSDataInputStream dis = fs.open(new Path(logDir, files[files.length-1]));
+      byte[] content = new byte[(int)dis.available()];
+
+      dis.readFully(content);
+
+      dis.close();
+
+      // Now make a newer log file with just the IDs changed.  NOTE: this may not work if log format changes too much!
+      findReplace("AAAAAA".getBytes("UTF-8"), "aaaaaa".getBytes("UTF-8"), content);
+      findReplace("BBBBBB".getBytes("UTF-8"), "bbbbbb".getBytes("UTF-8"), content);
+      findReplace("CCCCCC".getBytes("UTF-8"), "cccccc".getBytes("UTF-8"), content);
+
+      // WARNING... assumes format of .00000n where n is less than 9
+      long logNumber = Long.parseLong(fname.substring(fname.lastIndexOf(".") + 1));
+      String fname2 = String.format(Locale.ROOT,
+          UpdateLog.LOG_FILENAME_PATTERN,
+          UpdateLog.TLOG_NAME,
+          logNumber + 1);
+      
+      dos = fs.create(new Path(logDir, fname2), (short)1);
+      dos.write(content);
+      dos.close();
+      
+
+      logReplay.release(1000);
+      logReplayFinish.drainPermits();
+      ignoreException("OutOfBoundsException");  // this is what the corrupted log currently produces... subject to change.
+      createCore();
+      assertTrue(logReplayFinish.tryAcquire(timeout, TimeUnit.SECONDS));
+      resetExceptionIgnores();
+      assertJQ(req("q","*:*") ,"/response/numFound==6");
+
+    } finally {
+      DirectUpdateHandler2.commitOnClose = true;
+      UpdateLog.testing_logReplayHook = null;
+      UpdateLog.testing_logReplayFinishHook = null;
+    }
+  }
+
+
+  // NOTE: replacement must currently be same size
+  private static void findReplace(byte[] from, byte[] to, byte[] data) {
+    int idx = -from.length;
+    for(;;) {
+      idx = indexOf(from, data, idx + from.length);  // skip over previous match
+      if (idx < 0) break;
+      for (int i=0; i<to.length; i++) {
+        data[idx+i] = to[i];
+      }
+    }
+  }
+  
+  private static int indexOf(byte[] target, byte[] data, int start) {
+    outer: for (int i=start; i<data.length - target.length; i++) {
+      for (int j=0; j<target.length; j++) {
+        if (data[i+j] != target[j]) continue outer;
+      }
+      return i;
+    }
+    return -1;
+  }
+
+  // stops the core, removes the transaction logs, restarts the core.
+  void deleteLogs() throws Exception {
+    String logDir = h.getCore().getUpdateHandler().getUpdateLog().getLogDir();
+    Configuration conf = new Configuration();
+    conf.setBoolean("fs.hdfs.impl.disable.cache", true);
+    FileSystem fs;
+    try {
+      URI uri = new URI(hdfsUri);
+      fs = FileSystem.newInstance(uri, conf);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    } catch (URISyntaxException e) {
+      throw new RuntimeException(e);
+    }
+    h.close();
+
+    try {
+      String[] files = HdfsUpdateLog.getLogList(fs, new Path(logDir));
+      for (String file : files) {
+        //new File(logDir, file).delete();
+        fs.delete(new Path(logDir, file), false);
+      }
+
+      assertEquals(0, HdfsUpdateLog.getLogList(fs, new Path(logDir)).length);
+    } finally {
+      // make sure we create the core again, even if the assert fails so it won't mess
+      // up the next test.
+      createCore();
+      assertJQ(req("q","*:*") ,"/response/numFound==");   // ensure it works
+    }
+  }
+
+  private static Long getVer(SolrQueryRequest req) throws Exception {
+    String response = JQ(req);
+    Map rsp = (Map) ObjectBuilder.fromJSON(response);
+    Map doc = null;
+    if (rsp.containsKey("doc")) {
+      doc = (Map)rsp.get("doc");
+    } else if (rsp.containsKey("docs")) {
+      List lst = (List)rsp.get("docs");
+      if (lst.size() > 0) {
+        doc = (Map)lst.get(0);
+      }
+    } else if (rsp.containsKey("response")) {
+      Map responseMap = (Map)rsp.get("response");
+      List lst = (List)responseMap.get("docs");
+      if (lst.size() > 0) {
+        doc = (Map)lst.get(0);
+      }
+    }
+
+    if (doc == null) return null;
+
+    return (Long)doc.get("_version_");
+  }
+}
+
diff --git a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
index 114b1c8..44e09bf 100644
--- a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
+++ b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
@@ -25,6 +25,7 @@
 public class TestSolrQueryParser extends SolrTestCaseJ4 {
   @BeforeClass
   public static void beforeClass() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml", "schema12.xml");
     createIndex();
   }
diff --git a/solr/core/src/test/org/apache/solr/search/TestValueSourceCache.java b/solr/core/src/test/org/apache/solr/search/TestValueSourceCache.java
index b6da591..57c4064 100644
--- a/solr/core/src/test/org/apache/solr/search/TestValueSourceCache.java
+++ b/solr/core/src/test/org/apache/solr/search/TestValueSourceCache.java
@@ -78,8 +78,6 @@
         "val1_i1",
         "val1_l1",
         "val1_b1",
-        "val1_by1",
-        "val1_sh1"
     };
     for (String template : templates) {
       for (String nums : numbers) {
diff --git a/solr/core/src/test/org/apache/solr/search/function/distance/DistanceFunctionTest.java b/solr/core/src/test/org/apache/solr/search/function/distance/DistanceFunctionTest.java
index bc2764b..65808bd 100644
--- a/solr/core/src/test/org/apache/solr/search/function/distance/DistanceFunctionTest.java
+++ b/solr/core/src/test/org/apache/solr/search/function/distance/DistanceFunctionTest.java
@@ -30,6 +30,7 @@
 public class DistanceFunctionTest extends SolrTestCaseJ4 {
   @BeforeClass
   public static void beforeClass() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml", "schema12.xml");
   }
 
diff --git a/solr/core/src/test/org/apache/solr/store/blockcache/BlockCacheTest.java b/solr/core/src/test/org/apache/solr/store/blockcache/BlockCacheTest.java
new file mode 100644
index 0000000..70fd813
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/store/blockcache/BlockCacheTest.java
@@ -0,0 +1,110 @@
+package org.apache.solr.store.blockcache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.Test;
+
+public class BlockCacheTest extends LuceneTestCase {
+  @Test
+  public void testBlockCache() {
+    int blocksInTest = 2000000;
+    int blockSize = 1024;
+    
+    int slabSize = blockSize * 4096;
+    long totalMemory = 2 * slabSize;
+    
+    BlockCache blockCache = new BlockCache(new Metrics(new Configuration()), true,totalMemory,slabSize,blockSize);
+    byte[] buffer = new byte[1024];
+    Random random = random();
+    byte[] newData = new byte[blockSize];
+    AtomicLong hitsInCache = new AtomicLong();
+    AtomicLong missesInCache = new AtomicLong();
+    long storeTime = 0;
+    long fetchTime = 0;
+    int passes = 10000;
+
+    BlockCacheKey blockCacheKey = new BlockCacheKey();
+
+    for (int j = 0; j < passes; j++) {
+      long block = random.nextInt(blocksInTest);
+      int file = 0;
+      blockCacheKey.setBlock(block);
+      blockCacheKey.setFile(file);
+
+      if (blockCache.fetch(blockCacheKey, buffer)) {
+        hitsInCache.incrementAndGet();
+      } else {
+        missesInCache.incrementAndGet();
+      }
+
+      byte[] testData = testData(random, blockSize, newData);
+      long t1 = System.nanoTime();
+      blockCache.store(blockCacheKey, 0, testData, 0, blockSize);
+      storeTime += (System.nanoTime() - t1);
+
+      long t3 = System.nanoTime();
+      if (blockCache.fetch(blockCacheKey, buffer)) {
+        fetchTime += (System.nanoTime() - t3);
+        assertTrue(Arrays.equals(testData, buffer));
+      }
+    }
+    System.out.println("Cache Hits    = " + hitsInCache.get());
+    System.out.println("Cache Misses  = " + missesInCache.get());
+    System.out.println("Store         = " + (storeTime / (double) passes) / 1000000.0);
+    System.out.println("Fetch         = " + (fetchTime / (double) passes) / 1000000.0);
+    System.out.println("# of Elements = " + blockCache.getSize());
+  }
+
+  /**
+   * Verify checking of buffer size limits against the cached block size.
+   */
+  @Test
+  public void testLongBuffer() {
+    Random random = random();
+    int blockSize = BlockCache._32K;
+    int slabSize = blockSize * 1024;
+    long totalMemory = 2 * slabSize;
+
+    BlockCache blockCache = new BlockCache(new Metrics(new Configuration()),
+        true, totalMemory, slabSize);
+    BlockCacheKey blockCacheKey = new BlockCacheKey();
+    blockCacheKey.setBlock(0);
+    blockCacheKey.setFile(0);
+    byte[] newData = new byte[blockSize*3];
+    byte[] testData = testData(random, blockSize, newData);
+
+    assertTrue(blockCache.store(blockCacheKey, 0, testData, 0, blockSize));
+    assertTrue(blockCache.store(blockCacheKey, 0, testData, blockSize, blockSize));
+    assertTrue(blockCache.store(blockCacheKey, 0, testData, blockSize*2, blockSize));
+
+    assertTrue(blockCache.store(blockCacheKey, 1, testData, 0, blockSize - 1));
+    assertTrue(blockCache.store(blockCacheKey, 1, testData, blockSize, blockSize - 1));
+    assertTrue(blockCache.store(blockCacheKey, 1, testData, blockSize*2, blockSize - 1));
+  }
+
+  private static byte[] testData(Random random, int size, byte[] buf) {
+    random.nextBytes(buf);
+    return buf;
+  }
+}
diff --git a/solr/core/src/test/org/apache/solr/store/blockcache/BlockDirectoryTest.java b/solr/core/src/test/org/apache/solr/store/blockcache/BlockDirectoryTest.java
new file mode 100644
index 0000000..5d0445c
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/store/blockcache/BlockDirectoryTest.java
@@ -0,0 +1,257 @@
+package org.apache.solr.store.blockcache;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.MergeInfo;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.store.hdfs.HdfsDirectory;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap;
+
+public class BlockDirectoryTest extends LuceneTestCase {
+
+  private class MapperCache implements Cache {
+    public Map<String, byte[]> map = new ConcurrentLinkedHashMap.Builder<String, byte[]>().maximumWeightedCapacity(8).build();
+
+    @Override
+    public void update(String name, long blockId, int blockOffset, byte[] buffer, int offset, int length) {
+      byte[] cached = map.get(name + blockId);
+      if (cached != null) {
+        int newlen = Math.max(cached.length, blockOffset + length);
+        byte[] b = new byte[newlen];
+        System.arraycopy(cached, 0, b, 0, cached.length);
+        System.arraycopy(buffer, offset, b, blockOffset, length);
+        cached = b;
+      } else {
+        cached = copy(blockOffset, buffer, offset, length);
+      }
+      map.put(name + blockId, cached);
+    }
+
+    private byte[] copy(int blockOffset, byte[] buffer, int offset, int length) {
+      byte[] b = new byte[length + blockOffset];
+      System.arraycopy(buffer, offset, b, blockOffset, length);
+      return b;
+    }
+
+    @Override
+    public boolean fetch(String name, long blockId, int blockOffset, byte[] b, int off, int lengthToReadInBlock) {
+      // return false;
+      byte[] data = map.get(name + blockId);
+      if (data == null) {
+        return false;
+      }
+      System.arraycopy(data, blockOffset, b, off, lengthToReadInBlock);
+      return true;
+    }
+
+    @Override
+    public void delete(String name) {
+
+    }
+
+    @Override
+    public long size() {
+      return map.size();
+    }
+
+    @Override
+    public void renameCacheFile(String source, String dest) {
+    }
+  }
+
+  private static final int MAX_NUMBER_OF_WRITES = 10000;
+  private static final int MIN_FILE_SIZE = 100;
+  private static final int MAX_FILE_SIZE = 100000;
+  private static final int MIN_BUFFER_SIZE = 1;
+  private static final int MAX_BUFFER_SIZE = 12000;
+  private static final int MAX_NUMBER_OF_READS = 20000;
+  private BlockDirectory directory;
+  private File file;
+  private Random random;
+  private MapperCache mapperCache;
+
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+    file = new File(TEMP_DIR, HdfsDirectory.class.getName() + "-" + System.currentTimeMillis());
+    rm(file);
+    file.mkdirs();
+    FSDirectory dir = FSDirectory.open(new File(file, "base"));
+    mapperCache = new MapperCache();
+    directory = new BlockDirectory("test", dir, mapperCache, null, true, true);
+    random = random();
+  }
+  
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+    directory.close();
+    FileUtils.deleteDirectory(file);
+  }
+
+  @Test
+  public void testEOF() throws IOException {
+    Directory fsDir = FSDirectory.open(new File(file, "normal"));
+    String name = "test.eof";
+    createFile(name, fsDir, directory);
+    long fsLength = fsDir.fileLength(name);
+    long hdfsLength = directory.fileLength(name);
+    assertEquals(fsLength, hdfsLength);
+    testEof(name, fsDir, fsLength);
+    testEof(name, directory, hdfsLength);
+    fsDir.close();
+  }
+
+  private void testEof(String name, Directory directory, long length) throws IOException {
+    IndexInput input = directory.openInput(name, new IOContext());
+    try {
+    input.seek(length);
+      try {
+        input.readByte();
+        fail("should throw eof");
+      } catch (IOException e) {
+      }
+    } finally {
+      input.close();
+    }
+  }
+
+  @Test
+  public void testRandomAccessWrites() throws IOException {
+    long t1 = System.nanoTime();
+
+    int i = 0;
+    try {
+      for (; i < 10; i++) {
+        Directory fsDir = FSDirectory.open(new File(file, "normal"));
+        String name = getName();
+        createFile(name, fsDir, directory);
+        assertInputsEquals(name, fsDir, directory);
+      }
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail("Test failed on pass [" + i + "]");
+    }
+    long t2 = System.nanoTime();
+    System.out.println("Total time is " + ((t2 - t1)/1000000) + "ms");
+  }
+
+  @Test
+  public void testRandomAccessWritesLargeCache() throws IOException {
+    mapperCache.map = new ConcurrentLinkedHashMap.Builder<String, byte[]>().maximumWeightedCapacity(10000).build();
+    testRandomAccessWrites();
+  }
+
+  private void assertInputsEquals(String name, Directory fsDir, Directory hdfs) throws IOException {
+    int reads = random.nextInt(MAX_NUMBER_OF_READS);
+    IndexInput fsInput = fsDir.openInput(name, new IOContext());
+    IndexInput hdfsInput = hdfs.openInput(name, new IOContext());
+    assertEquals(fsInput.length(), hdfsInput.length());
+    int fileLength = (int) fsInput.length();
+    for (int i = 0; i < reads; i++) {
+      byte[] fsBuf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE];
+      byte[] hdfsBuf = new byte[fsBuf.length];
+      int offset = random.nextInt(fsBuf.length);
+      int length = random.nextInt(fsBuf.length - offset);
+      int pos = random.nextInt(fileLength - length);
+      fsInput.seek(pos);
+      fsInput.readBytes(fsBuf, offset, length);
+      hdfsInput.seek(pos);
+      hdfsInput.readBytes(hdfsBuf, offset, length);
+      for (int f = offset; f < length; f++) {
+        if (fsBuf[f] != hdfsBuf[f]) {
+          fail("read [" + i + "]");
+        }
+      }
+    }
+    fsInput.close();
+    hdfsInput.close();
+  }
+
+  private void createFile(String name, Directory fsDir, Directory hdfs) throws IOException {
+    int writes = random.nextInt(MAX_NUMBER_OF_WRITES);
+    int fileLength = random.nextInt(MAX_FILE_SIZE - MIN_FILE_SIZE) + MIN_FILE_SIZE;
+    IndexOutput fsOutput = fsDir.createOutput(name, IOContext.DEFAULT);
+    IndexOutput hdfsOutput = hdfs.createOutput(name, IOContext.DEFAULT);
+    for (int i = 0; i < writes; i++) {
+      byte[] buf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE, fileLength)) + MIN_BUFFER_SIZE];
+      random.nextBytes(buf);
+      int offset = random.nextInt(buf.length);
+      int length = random.nextInt(buf.length - offset);
+      fsOutput.writeBytes(buf, offset, length);
+      hdfsOutput.writeBytes(buf, offset, length);
+    }
+    fsOutput.close();
+    hdfsOutput.close();
+  }
+
+  private String getName() {
+    return Long.toString(Math.abs(random.nextLong()));
+  }
+
+  public static void rm(File file) {
+    if (!file.exists()) {
+      return;
+    }
+    if (file.isDirectory()) {
+      for (File f : file.listFiles()) {
+        rm(f);
+      }
+    }
+    file.delete();
+  }
+
+  /**
+   * Verify the configuration options for the block cache are handled
+   * appropriately.
+   */
+  @Test
+  public void ensureCacheConfigurable() throws Exception {
+    IOContext mergeContext = new IOContext(new MergeInfo(1,1,false,1));
+
+    BlockDirectory d = directory;
+    assertTrue(d.useReadCache("", IOContext.DEFAULT));
+    assertTrue(d.useWriteCache("", IOContext.DEFAULT));
+    assertFalse(d.useWriteCache("", mergeContext));
+
+    d = new BlockDirectory("test", directory, mapperCache, null, true, false);
+    assertTrue(d.useReadCache("", IOContext.DEFAULT));
+    assertFalse(d.useWriteCache("", IOContext.DEFAULT));
+    assertFalse(d.useWriteCache("", mergeContext));
+
+    d = new BlockDirectory("test", directory, mapperCache, null, false, true);
+    assertFalse(d.useReadCache("", IOContext.DEFAULT));
+    assertTrue(d.useWriteCache("", IOContext.DEFAULT));
+    assertFalse(d.useWriteCache("", mergeContext));
+  }
+}
diff --git a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
new file mode 100644
index 0000000..2349112
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java
@@ -0,0 +1,222 @@
+package org.apache.solr.store.hdfs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.cloud.hdfs.HdfsTestUtil;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+
+@ThreadLeakScope(Scope.NONE) // hdfs client currently leaks thread (HADOOP-9049)
+//@Ignore("this test violates the test security policy because of org.apache.hadoop.fs.RawLocalFileSystem.mkdirs")
+public class HdfsDirectoryTest extends SolrTestCaseJ4 {
+  
+  private static final int MAX_NUMBER_OF_WRITES = 10000;
+  private static final int MIN_FILE_SIZE = 100;
+  private static final int MAX_FILE_SIZE = 100000;
+  private static final int MIN_BUFFER_SIZE = 1;
+  private static final int MAX_BUFFER_SIZE = 5000;
+  private static final int MAX_NUMBER_OF_READS = 10000;
+  private static MiniDFSCluster dfsCluster;
+  private HdfsDirectory directory;
+  private Random random;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    createTempDir();
+    dfsCluster = HdfsTestUtil.setupClass(TEMP_DIR.getAbsolutePath()
+        + File.separator + HdfsDirectoryTest.class.getName() + "_hdfsdir-"
+        + System.currentTimeMillis());
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    HdfsTestUtil.teardownClass(dfsCluster);
+    dfsCluster = null;
+  }
+  
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+    
+    Configuration conf = new Configuration();
+    conf.set("dfs.permissions.enabled", "false");
+    
+    directory = new HdfsDirectory(new Path(dfsCluster.getURI().toString() + dataDir.getAbsolutePath() + "/hdfs"), conf);
+    
+    random = random();
+  }
+  
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+  
+  @Test
+  public void testWritingAndReadingAFile() throws IOException {
+    String[] listAll = directory.listAll();
+    for (String file : listAll) {
+      directory.deleteFile(file);
+    }
+    
+    IndexOutput output = directory.createOutput("testing.test", new IOContext());
+    output.writeInt(12345);
+    output.flush();
+    output.close();
+
+    IndexInput input = directory.openInput("testing.test", new IOContext());
+    assertEquals(12345, input.readInt());
+    input.close();
+
+    listAll = directory.listAll();
+    assertEquals(1, listAll.length);
+    assertEquals("testing.test", listAll[0]);
+
+    assertEquals(4, directory.fileLength("testing.test"));
+
+    IndexInput input1 = directory.openInput("testing.test", new IOContext());
+
+    IndexInput input2 = (IndexInput) input1.clone();
+    assertEquals(12345, input2.readInt());
+    input2.close();
+
+    assertEquals(12345, input1.readInt());
+    input1.close();
+
+    assertFalse(directory.fileExists("testing.test.other"));
+    assertTrue(directory.fileExists("testing.test"));
+    directory.deleteFile("testing.test");
+    assertFalse(directory.fileExists("testing.test"));
+  }
+  
+  @Test
+  public void testEOF() throws IOException {
+    Directory fsDir = new RAMDirectory();
+    String name = "test.eof";
+    createFile(name, fsDir, directory);
+    long fsLength = fsDir.fileLength(name);
+    long hdfsLength = directory.fileLength(name);
+    assertEquals(fsLength, hdfsLength);
+    testEof(name,fsDir,fsLength);
+    testEof(name,directory,hdfsLength);
+  }
+
+  private void testEof(String name, Directory directory, long length) throws IOException {
+    IndexInput input = directory.openInput(name, new IOContext());
+    input.seek(length);
+    try {
+      input.readByte();
+      fail("should throw eof");
+    } catch (IOException e) {
+    }
+  }
+
+  @Test
+  public void testRandomAccessWrites() throws IOException {
+    int i = 0;
+    try {
+      Set<String> names = new HashSet<String>();
+      for (; i< 10; i++) {
+        Directory fsDir = new RAMDirectory();
+        String name = getName();
+        System.out.println("Working on pass [" + i  +"] contains [" + names.contains(name) + "]");
+        names.add(name);
+        createFile(name,fsDir,directory);
+        assertInputsEquals(name,fsDir,directory);
+        fsDir.close();
+      }
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail("Test failed on pass [" + i + "]");
+    }
+  }
+
+  private void assertInputsEquals(String name, Directory fsDir, HdfsDirectory hdfs) throws IOException {
+    int reads = random.nextInt(MAX_NUMBER_OF_READS);
+    IndexInput fsInput = fsDir.openInput(name,new IOContext());
+    IndexInput hdfsInput = hdfs.openInput(name,new IOContext());
+    assertEquals(fsInput.length(), hdfsInput.length());
+    int fileLength = (int) fsInput.length();
+    for (int i = 0; i < reads; i++) {
+      int nextInt = Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE,fileLength);
+      byte[] fsBuf = new byte[random.nextInt(nextInt > 0 ? nextInt : 1) + MIN_BUFFER_SIZE];
+      byte[] hdfsBuf = new byte[fsBuf.length];
+      int offset = random.nextInt(fsBuf.length);
+      
+      nextInt = fsBuf.length - offset;
+      int length = random.nextInt(nextInt > 0 ? nextInt : 1);
+      nextInt = fileLength - length;
+      int pos = random.nextInt(nextInt > 0 ? nextInt : 1);
+      fsInput.seek(pos);
+      fsInput.readBytes(fsBuf, offset, length);
+      hdfsInput.seek(pos);
+      hdfsInput.readBytes(hdfsBuf, offset, length);
+      for (int f = offset; f < length; f++) {
+        if (fsBuf[f] != hdfsBuf[f]) {
+          fail();
+        }
+      }
+    }
+    fsInput.close();
+    hdfsInput.close();
+  }
+
+  private void createFile(String name, Directory fsDir, HdfsDirectory hdfs) throws IOException {
+    int writes = random.nextInt(MAX_NUMBER_OF_WRITES);
+    int fileLength = random.nextInt(MAX_FILE_SIZE - MIN_FILE_SIZE) + MIN_FILE_SIZE;
+    IndexOutput fsOutput = fsDir.createOutput(name, new IOContext());
+    fsOutput.setLength(fileLength);
+    IndexOutput hdfsOutput = hdfs.createOutput(name, new IOContext());
+    hdfsOutput.setLength(fileLength);
+    for (int i = 0; i < writes; i++) {
+      byte[] buf = new byte[random.nextInt(Math.min(MAX_BUFFER_SIZE - MIN_BUFFER_SIZE,fileLength)) + MIN_BUFFER_SIZE];
+      random.nextBytes(buf);
+      int offset = random.nextInt(buf.length);
+      int length = random.nextInt(buf.length - offset);
+      fsOutput.writeBytes(buf, offset, length);
+      hdfsOutput.writeBytes(buf, offset, length);
+    }
+    fsOutput.close();
+    hdfsOutput.close();
+  }
+
+  private String getName() {
+    return Long.toString(Math.abs(random.nextLong()));
+  }
+
+}
diff --git a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java
new file mode 100644
index 0000000..4f96374
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsLockFactoryTest.java
@@ -0,0 +1,86 @@
+package org.apache.solr.store.hdfs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.lucene.store.Lock;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.cloud.hdfs.HdfsTestUtil;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+
+@ThreadLeakScope(Scope.NONE) // hdfs client currently leaks thread (HADOOP-9049)
+public class HdfsLockFactoryTest extends SolrTestCaseJ4 {
+  
+  private static MiniDFSCluster dfsCluster;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    createTempDir();
+    dfsCluster = HdfsTestUtil.setupClass(TEMP_DIR.getAbsolutePath()
+        + File.separator + HdfsLockFactoryTest.class.getName() + "_hdfsdir-"
+        + System.currentTimeMillis());
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    HdfsTestUtil.teardownClass(dfsCluster);
+    dfsCluster = null;
+  }
+  
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+  
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+  
+  @Test
+  public void testBasic() throws IOException {
+    URI uri = dfsCluster.getURI();
+    Path lockPath = new Path(uri.toString(), "/lock");
+    HdfsLockFactory lockFactory = new HdfsLockFactory(lockPath, new Configuration());
+    Lock lock = lockFactory.makeLock("testlock");
+    boolean success = lock.obtain();
+    assertTrue("We could not get the lock when it should be available", success);
+    success = lock.obtain();
+    assertFalse("We got the lock but it should be unavailble", success);
+    lock.release();
+    success = lock.obtain();
+    assertTrue("We could not get the lock when it should be available", success);
+    success = lock.obtain();
+    assertFalse("We got the lock but it should be unavailble", success);
+  }
+  
+
+}
\ No newline at end of file
diff --git a/solr/core/src/test/org/apache/solr/update/DirectUpdateHandlerOptimizeTest.java b/solr/core/src/test/org/apache/solr/update/DirectUpdateHandlerOptimizeTest.java
index 7faa14a..066eb35 100644
--- a/solr/core/src/test/org/apache/solr/update/DirectUpdateHandlerOptimizeTest.java
+++ b/solr/core/src/test/org/apache/solr/update/DirectUpdateHandlerOptimizeTest.java
@@ -35,6 +35,7 @@
 
   @BeforeClass
   public static void beforeClass() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml", "schema12.xml");
   }
 
diff --git a/solr/core/src/test/org/apache/solr/update/DirectUpdateHandlerTest.java b/solr/core/src/test/org/apache/solr/update/DirectUpdateHandlerTest.java
index ddd7f78..c660912 100644
--- a/solr/core/src/test/org/apache/solr/update/DirectUpdateHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/update/DirectUpdateHandlerTest.java
@@ -45,6 +45,7 @@
   public static void beforeClass() throws Exception {
     savedFactory = System.getProperty("solr.DirectoryFactory");
     System.setProperty("solr.directoryFactory", "org.apache.solr.core.MockFSDirectoryFactory");
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml", "schema12.xml");
   }
   
diff --git a/solr/core/src/test/org/apache/solr/update/DocumentBuilderTest.java b/solr/core/src/test/org/apache/solr/update/DocumentBuilderTest.java
index eb9d737..c8ffe4a 100644
--- a/solr/core/src/test/org/apache/solr/update/DocumentBuilderTest.java
+++ b/solr/core/src/test/org/apache/solr/update/DocumentBuilderTest.java
@@ -357,9 +357,7 @@
    */
   private static byte expectedNorm(final DefaultSimilarity sim,
                                    final int length, final float boost) {
-    
-    return sim.encodeNormValue(boost / ((float) Math.sqrt(length)));
-
+    return (byte) sim.encodeNormValue(boost / ((float) Math.sqrt(length)));
   }
     
 
diff --git a/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java b/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java
index f017ebe..7ea2669 100644
--- a/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java
+++ b/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java
@@ -45,6 +45,7 @@
 
   @BeforeClass
   public static void beforeClass() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml", "schema12.xml");
   }
 
@@ -135,12 +136,21 @@
       CoreDescriptor dcore1 = new CoreDescriptor(h.getCoreContainer(), "split1", h.getCore().getCoreDescriptor().getInstanceDir());
       dcore1.setDataDir(indexDir1.getAbsolutePath());
       dcore1.setSchemaName("schema12.xml");
+      
+      if (h.getCoreContainer().getZkController() != null) {
+        h.getCoreContainer().preRegisterInZk(dcore1);
+      }
+      
       core1 = h.getCoreContainer().create(dcore1);
       h.getCoreContainer().register(core1, false);
 
       CoreDescriptor dcore2 = new CoreDescriptor(h.getCoreContainer(), "split2", h.getCore().getCoreDescriptor().getInstanceDir());
       dcore2.setDataDir(indexDir2.getAbsolutePath());
       dcore2.setSchemaName("schema12.xml");
+      
+      if (h.getCoreContainer().getZkController() != null) {
+        h.getCoreContainer().preRegisterInZk(dcore2);
+      }
       core2 = h.getCoreContainer().create(dcore2);
       h.getCoreContainer().register(core2, false);
 
diff --git a/solr/core/src/test/org/apache/solr/update/TestAtomicUpdateErrorCases.java b/solr/core/src/test/org/apache/solr/update/TestAtomicUpdateErrorCases.java
index 5dcf8d2..bacc85d 100644
--- a/solr/core/src/test/org/apache/solr/update/TestAtomicUpdateErrorCases.java
+++ b/solr/core/src/test/org/apache/solr/update/TestAtomicUpdateErrorCases.java
@@ -24,6 +24,7 @@
 
   public void testUpdateNoTLog() throws Exception {
     try {
+      System.setProperty("enable.update.log", "false");
       initCore("solrconfig.xml","schema15.xml");
       
       UpdateHandler uh = h.getCore().getUpdateHandler();
@@ -56,6 +57,7 @@
       }
 
     } finally {
+      System.clearProperty("enable.update.log");
       deleteCore();
     }
   }
diff --git a/solr/core/src/test/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactoryTest.java b/solr/core/src/test/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactoryTest.java
new file mode 100644
index 0000000..007d89e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactoryTest.java
@@ -0,0 +1,223 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update.processor;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.schema.IndexSchema;
+import org.apache.solr.schema.TestManagedSchema;
+import org.joda.time.DateTime;
+import org.joda.time.format.DateTimeFormat;
+import org.joda.time.format.DateTimeFormatter;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.After;
+import org.junit.Before;
+
+import java.io.File;
+import java.util.Date;
+
+/**
+ * Tests for the field mutating update processors
+ * that parse Dates, Longs, Doubles, and Booleans.
+ */
+public class AddSchemaFieldsUpdateProcessorFactoryTest extends UpdateProcessorTestBase {
+  private static final String SOLRCONFIG_XML = "solrconfig-add-schema-fields-update-processor-chains.xml";
+  private static final String SCHEMA_XML     = "schema-add-schema-fields-update-processor.xml";
+
+  private static File tmpSolrHome;
+  private static File tmpConfDir;
+
+  private static final String collection = "collection1";
+  private static final String confDir = collection + "/conf";
+
+  @Before
+  private void initManagedSchemaCore() throws Exception {
+    createTempDir();
+    final String tmpSolrHomePath
+        = TEMP_DIR + File.separator + TestManagedSchema.class.getSimpleName() + System.currentTimeMillis();
+    tmpSolrHome = new File(tmpSolrHomePath).getAbsoluteFile();
+    tmpConfDir = new File(tmpSolrHome, confDir);
+    File testHomeConfDir = new File(TEST_HOME(), confDir);
+    FileUtils.copyFileToDirectory(new File(testHomeConfDir, SOLRCONFIG_XML), tmpConfDir);
+    FileUtils.copyFileToDirectory(new File(testHomeConfDir, SCHEMA_XML), tmpConfDir);
+
+    // initCore will trigger an upgrade to managed schema, since the solrconfig*.xml has
+    // <schemaFactory class="ManagedIndexSchemaFactory" ... />
+    initCore(SOLRCONFIG_XML, SCHEMA_XML, tmpSolrHome.getPath());
+  }
+
+  @After
+  private void deleteCoreAndTempSolrHomeDirectory() throws Exception {
+    deleteCore();
+    FileUtils.deleteDirectory(tmpSolrHome);
+  }
+
+  public void testSingleField() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    final String fieldName = "newfield1";
+    assertNull(schema.getFieldOrNull(fieldName));
+    String dateString = "2010-11-12T13:14:15.168Z";
+    DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateTime();
+    Date date = dateTimeFormatter.parseDateTime(dateString).toDate();
+    SolrInputDocument d = processAdd("add-fields-no-run-processor", doc(f("id", "1"), f(fieldName, date)));
+    assertNotNull(d);
+    schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull(fieldName));
+    assertEquals("tdate", schema.getFieldType(fieldName).getTypeName());
+  }
+
+  public void testSingleFieldRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    final String fieldName = "newfield2";
+    assertNull(schema.getFieldOrNull(fieldName));
+    Float floatValue = -13258.992f;
+    SolrInputDocument d = processAdd("add-fields", doc(f("id", "2"), f(fieldName, floatValue)));
+    assertNotNull(d);
+    schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull(fieldName));
+    assertEquals("tfloat", schema.getFieldType(fieldName).getTypeName());
+    assertU(commit());
+    assertQ(req("id:2"), "//arr[@name='" + fieldName + "']/float[.='" + floatValue.toString() + "']");
+  }
+
+  public void testSingleFieldMixedFieldTypesRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    final String fieldName = "newfield3";
+    assertNull(schema.getFieldOrNull(fieldName));
+    Float fieldValue1 = -13258.0f;
+    Double fieldValue2 = 8.4828800808E10; 
+    SolrInputDocument d = processAdd
+        ("add-fields", doc(f("id", "3"), f(fieldName, fieldValue1, fieldValue2)));
+    assertNotNull(d);
+    schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull(fieldName));
+    assertEquals("tdouble", schema.getFieldType(fieldName).getTypeName());
+    assertU(commit());
+    assertQ(req("id:3")
+        ,"//arr[@name='" + fieldName + "']/double[.='" + fieldValue1.toString() + "']"
+        ,"//arr[@name='" + fieldName + "']/double[.='" + fieldValue2.toString() + "']");
+  }
+
+  public void testSingleFieldDefaultFieldTypeRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    final String fieldName = "newfield4";
+    assertNull(schema.getFieldOrNull(fieldName));
+    Float fieldValue1 = -13258.0f;
+    Double fieldValue2 = 8.4828800808E10;
+    String fieldValue3 = "blah blah";
+    SolrInputDocument d = processAdd
+        ("add-fields", doc(f("id", "4"), f(fieldName, fieldValue1, fieldValue2, fieldValue3)));
+    assertNotNull(d);
+    schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull(fieldName));
+    assertEquals("text", schema.getFieldType(fieldName).getTypeName());
+    assertU(commit());
+    assertQ(req("id:4")
+        ,"//arr[@name='" + fieldName + "']/str[.='" + fieldValue1.toString() + "']"
+        ,"//arr[@name='" + fieldName + "']/str[.='" + fieldValue2.toString() + "']"
+        ,"//arr[@name='" + fieldName + "']/str[.='" + fieldValue3.toString() + "']"
+    );
+  }
+
+  public void testMultipleFieldsRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    final String fieldName1 = "newfield5";
+    final String fieldName2 = "newfield6";
+    assertNull(schema.getFieldOrNull(fieldName1));
+    assertNull(schema.getFieldOrNull(fieldName2));
+    Float field1Value1 = -13258.0f;
+    Double field1Value2 = 8.4828800808E10;
+    Long field1Value3 = 999L;
+    Integer field2Value1 = 55123;
+    Long field2Value2 = 1234567890123456789L;
+    SolrInputDocument d = processAdd
+        ("add-fields", doc(f("id", "5"), f(fieldName1, field1Value1, field1Value2, field1Value3),
+                                         f(fieldName2, field2Value1, field2Value2)));
+    assertNotNull(d);
+    schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull(fieldName1));
+    assertNotNull(schema.getFieldOrNull(fieldName2));
+    assertEquals("tdouble", schema.getFieldType(fieldName1).getTypeName());
+    assertEquals("tlong", schema.getFieldType(fieldName2).getTypeName());
+    assertU(commit());
+    assertQ(req("id:5")
+        ,"//arr[@name='" + fieldName1 + "']/double[.='" + field1Value1.toString() + "']"
+        ,"//arr[@name='" + fieldName1 + "']/double[.='" + field1Value2.toString() + "']"
+        ,"//arr[@name='" + fieldName1 + "']/double[.='" + field1Value3.doubleValue() + "']"
+        ,"//arr[@name='" + fieldName2 + "']/long[.='" + field2Value1.toString() + "']"
+        ,"//arr[@name='" + fieldName2 + "']/long[.='" + field2Value2.toString() + "']");
+  }
+
+  public void testParseAndAddMultipleFieldsRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    final String fieldName1 = "newfield7";
+    final String fieldName2 = "newfield8";
+    final String fieldName3 = "newfield9";
+    final String fieldName4 = "newfield10";
+    assertNull(schema.getFieldOrNull(fieldName1));
+    assertNull(schema.getFieldOrNull(fieldName2));
+    assertNull(schema.getFieldOrNull(fieldName3));
+    assertNull(schema.getFieldOrNull(fieldName4));
+    String field1String1 = "-13,258.0"; 
+    Float field1Value1 = -13258.0f;
+    String field1String2 = "84,828,800,808.0"; 
+    Double field1Value2 = 8.4828800808E10;
+    String field1String3 = "999";
+    Long field1Value3 = 999L;
+    String field2String1 = "55,123";
+    Integer field2Value1 = 55123;
+    String field2String2 = "1,234,567,890,123,456,789";
+    Long field2Value2 = 1234567890123456789L;
+    String field3String1 = "blah-blah";
+    String field3Value1 = field3String1;
+    String field3String2 = "-5.28E-3";
+    Double field3Value2 = -5.28E-3;
+    String field4String1 = "1999-04-17 17:42";
+    DateTimeFormatter dateTimeFormatter = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm").withZoneUTC();
+    DateTime dateTime =  dateTimeFormatter.parseDateTime(field4String1);
+    Date field4Value1 = dateTime.toDate();
+    DateTimeFormatter dateTimeFormatter2 = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss").withZoneUTC();
+    String field4Value1String = dateTimeFormatter2.print(dateTime) + "Z";
+    
+    SolrInputDocument d = processAdd
+        ("parse-and-add-fields", doc(f("id", "6"), f(fieldName1, field1String1, field1String2, field1String3),
+                                                   f(fieldName2, field2String1, field2String2),
+                                                   f(fieldName3, field3String1, field3String2),
+                                                   f(fieldName4, field4String1)));
+    assertNotNull(d);
+    schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull(fieldName1));
+    assertNotNull(schema.getFieldOrNull(fieldName2));
+    assertNotNull(schema.getFieldOrNull(fieldName3));
+    assertNotNull(schema.getFieldOrNull(fieldName4));
+    assertEquals("tdouble", schema.getFieldType(fieldName1).getTypeName());
+    assertEquals("tlong", schema.getFieldType(fieldName2).getTypeName());
+    assertEquals("text", schema.getFieldType(fieldName3).getTypeName());
+    assertEquals("tdate", schema.getFieldType(fieldName4).getTypeName());
+    assertU(commit());
+    assertQ(req("id:6")
+        ,"//arr[@name='" + fieldName1 + "']/double[.='" + field1Value1.toString() + "']"
+        ,"//arr[@name='" + fieldName1 + "']/double[.='" + field1Value2.toString() + "']"
+        ,"//arr[@name='" + fieldName1 + "']/double[.='" + field1Value3.doubleValue() + "']"
+        ,"//arr[@name='" + fieldName2 + "']/long[.='" + field2Value1.toString() + "']"
+        ,"//arr[@name='" + fieldName2 + "']/long[.='" + field2Value2.toString() + "']"
+        ,"//arr[@name='" + fieldName3 + "']/str[.='" + field3String1 + "']"
+        ,"//arr[@name='" + fieldName3 + "']/str[.='" + field3String2 + "']"
+        ,"//arr[@name='" + fieldName4 + "']/date[.='" + field4Value1String + "']");
+  }
+}
diff --git a/solr/core/src/test/org/apache/solr/update/processor/FieldMutatingUpdateProcessorTest.java b/solr/core/src/test/org/apache/solr/update/processor/FieldMutatingUpdateProcessorTest.java
index 39c4b3d..2965292 100644
--- a/solr/core/src/test/org/apache/solr/update/processor/FieldMutatingUpdateProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/update/processor/FieldMutatingUpdateProcessorTest.java
@@ -552,6 +552,8 @@
     IndexSchema schema = h.getCore().getLatestSchema();
     assertNull("test expects 'foo_giberish' to not be a valid field, looks like schema was changed out from under us",
                schema.getFieldTypeNoEx("foo_giberish"));
+    assertNull("test expects 'bar_giberish' to not be a valid field, looks like schema was changed out from under us",
+               schema.getFieldTypeNoEx("bar_giberish"));
     assertNotNull("test expects 't_raw' to be a valid field, looks like schema was changed out from under us",
                   schema.getFieldTypeNoEx("t_raw"));
     assertNotNull("test expects 'foo_s' to be a valid field, looks like schema was changed out from under us",
@@ -561,11 +563,13 @@
     
     d = processAdd("ignore-not-in-schema",       
                    doc(f("id", "1111"),
+                       f("bar_giberish", "123456789", "", 42, "abcd"),
                        f("foo_giberish", "123456789", "", 42, "abcd"),
                        f("t_raw", "123456789", "", 42, "abcd"),
                        f("foo_s", "hoss")));
     
     assertNotNull(d);
+    assertFalse(d.containsKey("bar_giberish"));
     assertFalse(d.containsKey("foo_giberish"));
     assertEquals(Arrays.asList("123456789", "", 42, "abcd"), 
                  d.getFieldValues("t_raw"));
@@ -574,15 +578,98 @@
     d = processAdd("ignore-some",
                    doc(f("id", "1111"),
                        f("foo_giberish", "123456789", "", 42, "abcd"),
+                       f("bar_giberish", "123456789", "", 42, "abcd"),
                        f("t_raw", "123456789", "", 42, "abcd"),
                        f("foo_s", "hoss")));
 
     assertNotNull(d);
     assertEquals(Arrays.asList("123456789", "", 42, "abcd"), 
                  d.getFieldValues("foo_giberish"));
+    assertEquals(Arrays.asList("123456789", "", 42, "abcd"), 
+                 d.getFieldValues("bar_giberish"));
     assertFalse(d.containsKey("t_raw"));
     assertEquals("hoss", d.getFieldValue("foo_s"));
-    
+
+    d = processAdd("ignore-not-in-schema-explicit-selector",
+                   doc(f("id", "1111"),
+                       f("foo_giberish", "123456789", "", 42, "abcd"),
+                       f("bar_giberish", "123456789", "", 42, "abcd"),
+                       f("t_raw", "123456789", "", 42, "abcd"),
+                       f("foo_s", "hoss")));
+    assertNotNull(d);
+    assertFalse(d.containsKey("foo_giberish"));
+    assertFalse(d.containsKey("bar_giberish"));
+    assertEquals(Arrays.asList("123456789", "", 42, "abcd"),
+                 d.getFieldValues("t_raw"));
+    assertEquals("hoss", d.getFieldValue("foo_s"));
+
+    d = processAdd("ignore-not-in-schema-and-foo-name-prefix",
+                   doc(f("id", "1111"),
+                       f("foo_giberish", "123456789", "", 42, "abcd"),
+                       f("bar_giberish", "123456789", "", 42, "abcd"),
+                       f("t_raw", "123456789", "", 42, "abcd"),
+                       f("foo_s", "hoss")));
+    assertNotNull(d);
+    assertFalse(d.containsKey("foo_giberish"));
+    assertEquals(Arrays.asList("123456789", "", 42, "abcd"),
+                 d.getFieldValues("bar_giberish"));
+    assertEquals(Arrays.asList("123456789", "", 42, "abcd"),
+                 d.getFieldValues("t_raw"));
+    assertEquals("hoss", d.getFieldValue("foo_s"));
+
+    d = processAdd("ignore-foo-name-prefix-except-not-schema",
+                   doc(f("id", "1111"),
+                       f("foo_giberish", "123456789", "", 42, "abcd"),
+                       f("bar_giberish", "123456789", "", 42, "abcd"),
+                       f("t_raw", "123456789", "", 42, "abcd"),
+                       f("foo_s", "hoss")));
+    assertNotNull(d);
+    assertEquals(Arrays.asList("123456789", "", 42, "abcd"),
+                 d.getFieldValues("foo_giberish"));
+    assertEquals(Arrays.asList("123456789", "", 42, "abcd"),
+                 d.getFieldValues("bar_giberish"));
+    assertEquals(Arrays.asList("123456789", "", 42, "abcd"),
+                 d.getFieldValues("t_raw"));
+    assertFalse(d.containsKey("foo_s"));
+
+    d = processAdd("ignore-in-schema",
+                   doc(f("id", "1111"),
+                       f("foo_giberish", "123456789", "", 42, "abcd"),
+                       f("bar_giberish", "123456789", "", 42, "abcd"),
+                       f("t_raw", "123456789", "", 42, "abcd"),
+                       f("foo_s", "hoss")));
+    assertNotNull(d);
+    assertTrue(d.containsKey("foo_giberish"));
+    assertTrue(d.containsKey("bar_giberish"));
+    assertFalse(d.containsKey("id"));
+    assertFalse(d.containsKey("t_raw"));
+    assertFalse(d.containsKey("foo_s"));
+
+    d = processAdd("ignore-not-in-schema-explicit-str-selector",
+                   doc(f("id", "1111"),
+                       f("foo_giberish", "123456789", "", 42, "abcd"),
+                       f("bar_giberish", "123456789", "", 42, "abcd"),
+                       f("t_raw", "123456789", "", 42, "abcd"),
+                       f("foo_s", "hoss")));
+    assertNotNull(d);
+    assertFalse(d.containsKey("foo_giberish"));
+    assertFalse(d.containsKey("bar_giberish"));
+    assertEquals(Arrays.asList("123456789", "", 42, "abcd"),
+                 d.getFieldValues("t_raw"));
+    assertEquals("hoss", d.getFieldValue("foo_s"));
+
+    d = processAdd("ignore-in-schema-str-selector",
+                   doc(f("id", "1111"),
+                       f("foo_giberish", "123456789", "", 42, "abcd"),
+                       f("bar_giberish", "123456789", "", 42, "abcd"),
+                       f("t_raw", "123456789", "", 42, "abcd"),
+                       f("foo_s", "hoss")));
+    assertNotNull(d);
+    assertTrue(d.containsKey("foo_giberish"));
+    assertTrue(d.containsKey("bar_giberish"));
+    assertFalse(d.containsKey("id"));
+    assertFalse(d.containsKey("t_raw"));
+    assertFalse(d.containsKey("foo_s"));
 
   }
 
diff --git a/solr/core/src/test/org/apache/solr/update/processor/ParsingFieldUpdateProcessorsTest.java b/solr/core/src/test/org/apache/solr/update/processor/ParsingFieldUpdateProcessorsTest.java
new file mode 100644
index 0000000..4be6c21
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/update/processor/ParsingFieldUpdateProcessorsTest.java
@@ -0,0 +1,910 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update.processor;
+
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.schema.IndexSchema;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.DateTimeFormat;
+import org.joda.time.format.DateTimeFormatter;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.BeforeClass;
+
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Tests for the field mutating update processors
+ * that parse Dates, Longs, Doubles, and Booleans.
+ */
+public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase {
+  private static final double EPSILON = 1E-15;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("solrconfig-parsing-update-processor-chains.xml", "schema12.xml");
+  }
+
+  public void testParseDateRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("date_dt")); // should match "*_dt" dynamic field
+    String dateString = "2010-11-12T13:14:15.168Z";
+    SolrInputDocument d = processAdd("parse-date", doc(f("id", "9"), f("date_dt", dateString)));
+    assertNotNull(d);
+    DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateTime();
+    DateTime dateTime = dateTimeFormatter.parseDateTime(dateString);
+    assertTrue(d.getFieldValue("date_dt") instanceof Date);
+    assertEquals(dateTime.getMillis(), ((Date) d.getFieldValue("date_dt")).getTime());
+    assertU(commit());
+    assertQ(req("id:9"), "//date[@name='date_dt'][.='" + dateString + "']");
+  }
+
+  public void testParseTrieDateRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("date_tdt")); // should match "*_tdt" dynamic field
+    String dateString = "2010-11-12T13:14:15.168Z";
+    SolrInputDocument d = processAdd("parse-date", doc(f("id", "39"), f("date_tdt", dateString)));
+    assertNotNull(d);
+    DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateTime();
+    DateTime dateTime = dateTimeFormatter.parseDateTime(dateString);
+    assertTrue(d.getFieldValue("date_tdt") instanceof Date);
+    assertEquals(dateTime.getMillis(), ((Date) d.getFieldValue("date_tdt")).getTime());
+    assertU(commit());
+    assertQ(req("id:39"), "//date[@name='date_tdt'][.='" + dateString + "']");
+  }
+
+
+  public void testParseDateFieldNotInSchema() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    String dateString = "2010-11-12T13:14:15.168Z";
+    DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateTime();
+    DateTime dateTime = dateTimeFormatter.parseDateTime(dateString);
+
+    SolrInputDocument d = processAdd("parse-date-no-run-processor",
+                                     doc(f("id", "18"), f("not_in_schema", dateString)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("not_in_schema") instanceof Date);
+    assertEquals(dateTime.getMillis(), ((Date)d.getFieldValue("not_in_schema")).getTime());
+    
+    d = processAdd("parse-date-no-run-processor", 
+                   doc(f("id", "36"), f("not_in_schema", "not a date", dateString)));
+    assertNotNull(d);
+    for (Object val : d.getFieldValues("not_in_schema")) {
+      // check that nothing was mutated, since not all field values are parseable as dates 
+      assertTrue(val instanceof String);
+    }
+
+    d = processAdd("parse-date-no-run-processor",
+        doc(f("id", "72"), f("not_in_schema", dateString, "not a date")));
+    assertNotNull(d);
+    for (Object val : d.getFieldValues("not_in_schema")) {
+      // check again that nothing was mutated, but with a valid date first this time 
+      assertTrue(val instanceof String);
+    }
+  }
+  
+  public void testParseDateNonUTCdefaultTimeZoneRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("date_dt")); // should match "*_dt" dynamic field
+    String dateStringNoTimeZone         = "2010-11-12T13:14:15.168";
+    String dateStringUTC = dateStringNoTimeZone + "Z";
+
+    // dateStringNoTimeZone interpreted as being in timeZone America/New_York, then printed as UTC
+    String dateStringUSEasternTimeAsUTC = "2010-11-12T18:14:15.168Z";
+    
+    SolrInputDocument d = processAdd
+        ("parse-date-non-UTC-defaultTimeZone", doc(f("id", "99"), f("dateUTC_dt", dateStringUTC), 
+                                                   f("dateNoTimeZone_dt", dateStringNoTimeZone)));
+    assertNotNull(d);
+    String pattern = "yyyy-MM-dd'T'HH:mm:ss.SSSZ";
+    DateTimeFormatter dateTimeFormatterUTC = DateTimeFormat.forPattern(pattern);
+    DateTime dateTimeUTC = dateTimeFormatterUTC.parseDateTime(dateStringUTC);
+    assertTrue(d.getFieldValue("dateUTC_dt") instanceof Date);
+    assertTrue(d.getFieldValue("dateNoTimeZone_dt") instanceof Date);
+    assertEquals(dateTimeUTC.getMillis(), ((Date) d.getFieldValue("dateUTC_dt")).getTime());
+    assertU(commit());
+    assertQ(req("id:99") 
+        ,"//date[@name='dateUTC_dt'][.='" + dateStringUTC + "']"
+        ,"//date[@name='dateNoTimeZone_dt'][.='" + dateStringUSEasternTimeAsUTC + "']");
+  }
+  
+  public void testParseDateExplicitNotInSchemaSelector() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    String dateString = "2010-11-12T13:14:15.168Z";
+    DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateTime();
+    DateTime dateTime = dateTimeFormatter.parseDateTime(dateString);
+
+    SolrInputDocument d = processAdd("parse-date-explicit-not-in-schema-selector-no-run-processor",
+                                     doc(f("id", "88"), f("not_in_schema", dateString)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("not_in_schema") instanceof Date);
+    assertEquals(dateTime.getMillis(), ((Date)d.getFieldValue("not_in_schema")).getTime());
+  }
+
+  public void testParseDateExplicitTypeClassSelector() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("date_dt"));
+    String dateString = "2010-11-12T13:14:15.168Z";
+    DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateTime();
+    DateTime dateTime = dateTimeFormatter.parseDateTime(dateString);
+
+    SolrInputDocument d = processAdd("parse-date-explicit-typeclass-selector-no-run-processor",
+                                     doc(f("id", "77"), f("date_dt", dateString)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("date_dt") instanceof Date);
+    assertEquals(dateTime.getMillis(), ((Date)d.getFieldValue("date_dt")).getTime());
+  }
+
+  public void testParseUSPacificDate() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    String dateString = "8/9/2010";  // Interpreted as 00:00 US Pacific Daylight Time = UTC+07:00
+    String dateStringUTC = "2010-08-09T07:00:00.000Z";
+    SolrInputDocument d = processAdd("US-Pacific-parse-date-no-run-processor",
+                                     doc(f("id", "288"), f("not_in_schema", dateString)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("not_in_schema") instanceof Date);
+    assertEquals(dateStringUTC, 
+                 (new DateTime(((Date)d.getFieldValue("not_in_schema")).getTime(),DateTimeZone.UTC)).toString());
+  }
+  
+  public void testParseDateFormats() throws Exception {
+    String[] formatExamples = { 
+        "yyyy-MM-dd'T'HH:mm:ss.SSSZ",  "2010-01-15T00:00:00.000Z",
+        "yyyy-MM-dd'T'HH:mm:ss,SSSZ",  "2010-01-15T00:00:00,000Z",
+        "yyyy-MM-dd'T'HH:mm:ss.SSS",   "2010-01-15T00:00:00.000",
+        "yyyy-MM-dd'T'HH:mm:ss,SSS",   "2010-01-15T00:00:00,000",
+        "yyyy-MM-dd'T'HH:mm:ssZ",      "2010-01-15T00:00:00Z",
+        "yyyy-MM-dd'T'HH:mm:ss",       "2010-01-15T00:00:00",
+        "yyyy-MM-dd'T'HH:mmZ",         "2010-01-15T00:00Z",
+        "yyyy-MM-dd'T'HH:mm",          "2010-01-15T00:00",
+        "yyyy-MM-dd HH:mm:ss.SSSZ",    "2010-01-15 00:00:00.000Z",
+        "yyyy-MM-dd HH:mm:ss,SSSZ",    "2010-01-15 00:00:00,000Z",
+        "yyyy-MM-dd HH:mm:ss.SSS",     "2010-01-15 00:00:00.000",
+        "yyyy-MM-dd HH:mm:ss,SSS",     "2010-01-15 00:00:00,000",
+        "yyyy-MM-dd HH:mm:ssZ",        "2010-01-15 00:00:00Z",
+        "yyyy-MM-dd HH:mm:ss",         "2010-01-15 00:00:00",
+        "yyyy-MM-dd HH:mmZ",           "2010-01-15 00:00Z",
+        "yyyy-MM-dd HH:mm",            "2010-01-15 00:00",
+        "yyyy-MM-dd hh:mm a",          "2010-01-15 12:00 AM",
+        "yyyy-MM-dd hh:mma",           "2010-01-15 12:00AM",
+        "yyyy-MM-dd",                  "2010-01-15",
+        "EEE MMM dd HH:mm:ss Z yyyy",  "Fri Jan 15 00:00:00 +0000 2010",
+        "EEE MMM dd HH:mm:ss yyyy Z",  "Fri Jan 15 00:00:00 2010 +00:00",
+        "EEE MMM dd HH:mm:ss yyyy",    "Fri Jan 15 00:00:00 2010",
+        "EEE, dd MMM yyyy HH:mm:ss Z", "Fri, 15 Jan 2010 00:00:00 +00:00",
+        "EEEE, dd-MMM-yy HH:mm:ss Z",  "Friday, 15-Jan-10 00:00:00 +00:00",
+        "EEEE, MMMM dd, yyyy",         "Friday, January 15, 2010",
+        "MMMM dd, yyyy",               "January 15, 2010",
+        "MMM. dd, yyyy",               "Jan. 15, 2010"
+    };
+
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("dateUTC_dt")); // should match "*_dt" dynamic field
+
+    String dateTimePattern = "yyyy-MM-dd'T'HH:mm:ss.SSSZ";
+    DateTimeFormatter dateTimeFormatterUTC = DateTimeFormat.forPattern(dateTimePattern);
+    DateTime dateTimeUTC = dateTimeFormatterUTC.parseDateTime(formatExamples[1]);
+
+    for (int i = 0 ; i < formatExamples.length ; i += 2) {
+      String format = formatExamples[i];
+      String dateString = formatExamples[i + 1];
+      String id = "95" + i;
+      SolrInputDocument d = processAdd("parse-date-UTC-defaultTimeZone-no-run-processor", 
+                                       doc(f("id", id), f("dateUTC_dt", dateString)));
+      assertNotNull(d);
+      assertTrue("date '" + dateString + "' with format '" + format + "' is not mutated to a Date",
+          d.getFieldValue("dateUTC_dt") instanceof Date);
+      assertEquals("date '" + dateString + "' with format '" + format + "' mismatched milliseconds",
+                   dateTimeUTC.getMillis(), ((Date)d.getFieldValue("dateUTC_dt")).getTime());
+    }
+  }
+  
+  public void testParseFrenchDate() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    String frenchDateString = "le vendredi 15 janvier 2010";
+    String dateString = "2010-01-15T00:00:00.000Z";
+    DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateTime();
+    DateTime dateTime = dateTimeFormatter.parseDateTime(dateString);
+    SolrInputDocument d = processAdd("parse-french-date-UTC-defaultTimeZone-no-run-processor",
+                                     doc(f("id", "88"), f("not_in_schema", frenchDateString)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("not_in_schema") instanceof Date);
+    assertEquals(dateTime.getMillis(), ((Date)d.getFieldValue("not_in_schema")).getTime());
+  }
+  
+  public void testFailedParseMixedDate() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateOptionalTimeParser().withZoneUTC();
+    Map<Object,Object> mixed = new HashMap<Object,Object>();
+    String[] dateStrings = { "2020-05-13T18:47", "1989-12-14", "1682-07-22T18:33:00.000Z" };
+    for (String dateString : dateStrings) {
+      mixed.put(dateTimeFormatter.parseDateTime(dateString).toDate(), dateString);
+    }
+    Double extraDouble = 29.554d;
+    mixed.put(extraDouble, extraDouble); // Double-typed field value
+    SolrInputDocument d = processAdd("parse-date-no-run-processor", 
+                                     doc(f("id", "7201"), f("not_in_schema", mixed.values())));
+    assertNotNull(d);
+    boolean foundDouble = false;
+    for (Object o : d.getFieldValues("not_in_schema")) {
+      if (extraDouble == o) {
+        foundDouble = true;
+      } else {
+        assertTrue(o instanceof String);
+      }
+      mixed.values().remove(o);
+    }
+    assertTrue(foundDouble);
+    assertTrue(mixed.isEmpty());
+  }
+
+  public void testParseIntRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("int1_i")); // should match dynamic field "*_i"
+    assertNotNull(schema.getFieldOrNull("int2_i")); // should match dynamic field "*_i"
+    int value = 1089883491;
+    String intString1 = "1089883491";
+    String intString2 = "1,089,883,491";
+    SolrInputDocument d = processAdd("parse-int",
+        doc(f("id", "113"), f("int1_i", intString1), f("int2_i", intString2)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("int1_i") instanceof Integer);
+    assertEquals(value, ((Integer)d.getFieldValue("int1_i")).intValue());
+    assertTrue(d.getFieldValue("int2_i") instanceof Integer);
+    assertEquals(value, ((Integer)d.getFieldValue("int2_i")).intValue());
+
+    assertU(commit());
+    assertQ(req("id:113")
+        ,"//int[@name='int1_i'][.='" + value + "']"
+        ,"//int[@name='int2_i'][.='" + value + "']");
+  }
+
+  public void testParseIntNonRootLocale() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("int_i")); // should match dynamic field "*_i"
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    int value = 1089883491;
+    String intString1 = "1089883491";
+    String intString2 = "1 089 883 491"; // no-break space U+00A0
+    SolrInputDocument d = processAdd("parse-int-russian-no-run-processor",
+        doc(f("id", "113"), f("int_i", intString1), f("not_in_schema", intString2)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("int_i") instanceof Integer);
+    assertEquals(value, ((Integer)d.getFieldValue("int_i")).intValue());
+    assertTrue(d.getFieldValue("not_in_schema") instanceof Integer);
+    assertEquals(value, ((Integer)d.getFieldValue("not_in_schema")).intValue());
+  }
+
+  public void testParseTrieIntRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("int1_ti")); // should match dynamic field "*_ti"
+    assertNotNull(schema.getFieldOrNull("int2_ti")); // should match dynamic field "*_ti"
+    int value = 1089883491;
+    String intString1 = "1089883491";
+    String intString2 = "1,089,883,491";
+    SolrInputDocument d = processAdd("parse-int",
+        doc(f("id", "113"), f("int1_ti", intString1), f("int2_ti", intString2)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("int1_ti") instanceof Integer);
+    assertEquals(value, ((Integer)d.getFieldValue("int1_ti")).intValue());
+    assertTrue(d.getFieldValue("int2_ti") instanceof Integer);
+    assertEquals(value, ((Integer)d.getFieldValue("int2_ti")).intValue());
+
+    assertU(commit());
+    assertQ(req("id:113")
+        ,"//int[@name='int1_ti'][.='" + value + "']"
+        ,"//int[@name='int2_ti'][.='" + value + "']");
+  }
+
+  public void testIntOverflow() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNull(schema.getFieldOrNull("not_in_schema1"));
+    assertNull(schema.getFieldOrNull("not_in_schema2"));
+    long longValue1 = (long)Integer.MAX_VALUE + 100L;
+    long longValue2 = (long)Integer.MIN_VALUE - 100L;
+    String longString1 = Long.toString(longValue1);
+    String longString2 = Long.toString(longValue2);
+    SolrInputDocument d = processAdd("parse-int-no-run-processor",
+        doc(f("id", "282"), f("not_in_schema1", longString1), f("not_in_schema2", longString2)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("not_in_schema1") instanceof String);
+    assertTrue(d.getFieldValue("not_in_schema2") instanceof String);
+  }
+  
+  public void testFailedParseMixedInt() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    Map<Object,Object> mixed = new HashMap<Object,Object>();
+    Float floatVal = 294423.0f;
+    mixed.put(85, "85");
+    mixed.put(floatVal, floatVal); // Float-typed field value
+    mixed.put(-2894518, "-2,894,518");
+    mixed.put(1879472193, "1,879,472,193");
+    SolrInputDocument d = processAdd("parse-int-no-run-processor",
+                                     doc(f("id", "7202"), f("not_in_schema", mixed.values())));
+    assertNotNull(d);
+    boolean foundFloat = false;
+    for (Object o : d.getFieldValues("not_in_schema")) {
+      if (floatVal == o) {
+        foundFloat = true;
+      } else {
+        assertTrue(o instanceof String);
+      }
+      mixed.values().remove(o);
+    }
+    assertTrue(foundFloat);
+    assertTrue(mixed.isEmpty());
+  }
+
+  public void testParseLongRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("long1_l")); // should match dynamic field "*_l"
+    assertNotNull(schema.getFieldOrNull("long2_l")); // should match dynamic field "*_l"
+    long value = 1089883491L;
+    String longString1 = "1089883491";
+    String longString2 = "1,089,883,491";
+    SolrInputDocument d = processAdd("parse-long", 
+                                     doc(f("id", "113"), f("long1_l", longString1), f("long2_l", longString2)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("long1_l") instanceof Long);
+    assertEquals(value, ((Long) d.getFieldValue("long1_l")).longValue());
+    assertTrue(d.getFieldValue("long2_l") instanceof Long);
+    assertEquals(value, ((Long)d.getFieldValue("long2_l")).longValue());
+    
+    assertU(commit());
+    assertQ(req("id:113")
+        ,"//long[@name='long1_l'][.='" + value + "']"
+        ,"//long[@name='long2_l'][.='" + value + "']");
+  }
+
+  public void testParseLongNonRootLocale() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("long_l")); // should match dynamic field "*_l"
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    long value = 1089883491L;
+    String longString1 = "1089883491";
+    String longString2 = "1 089 883 491"; // no-break space U+00A0
+    SolrInputDocument d = processAdd("parse-long-russian-no-run-processor",
+                                     doc(f("id", "113"), f("long_l", longString1), f("not_in_schema", longString2)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("long_l") instanceof Long);
+    assertEquals(value, ((Long)d.getFieldValue("long_l")).longValue());
+    assertTrue(d.getFieldValue("not_in_schema") instanceof Long);
+    assertEquals(value, ((Long)d.getFieldValue("not_in_schema")).longValue());
+  }
+
+  public void testParseTrieLongRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("long1_tl")); // should match dynamic field "*_tl"
+    assertNotNull(schema.getFieldOrNull("long2_tl")); // should match dynamic field "*_tl"
+    long value = 1089883491L;
+    String longString1 = "1089883491";
+    String longString2 = "1,089,883,491";
+    SolrInputDocument d = processAdd("parse-long",
+        doc(f("id", "113"), f("long1_tl", longString1), f("long2_tl", longString2)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("long1_tl") instanceof Long);
+    assertEquals(value, ((Long)d.getFieldValue("long1_tl")).longValue());
+    assertTrue(d.getFieldValue("long2_tl") instanceof Long);
+    assertEquals(value, ((Long)d.getFieldValue("long2_tl")).longValue());
+
+    assertU(commit());
+    assertQ(req("id:113")
+        ,"//long[@name='long1_tl'][.='" + value + "']"
+        ,"//long[@name='long2_tl'][.='" + value + "']");
+  }
+
+  public void testFailedParseMixedLong() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    Map<Object,Object> mixed = new HashMap<Object,Object>();
+    Float floatVal = 294423.0f;
+    mixed.put(85L, "85");
+    mixed.put(floatVal, floatVal); // Float-typed field value
+    mixed.put(-2894518L, "-2,894,518");
+    mixed.put(1879472193L, "1,879,472,193");
+    SolrInputDocument d = processAdd("parse-long-no-run-processor",
+                                     doc(f("id", "7204"), f("not_in_schema", mixed.values())));
+    assertNotNull(d);
+    boolean foundFloat = false;
+    for (Object o : d.getFieldValues("not_in_schema")) {
+      if (floatVal == o) {
+        foundFloat = true;
+      } else {
+        assertTrue(o instanceof String);
+      }
+      mixed.values().remove(o);
+    }
+    assertTrue(foundFloat);
+    assertTrue(mixed.isEmpty());
+  }
+
+  public void testParseFloatRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("float1_f")); // should match dynamic field "*_f"
+    assertNotNull(schema.getFieldOrNull("float2_f")); // should match dynamic field "*_f"
+    float value = 10898.83491f;
+    String floatString1 = "10898.83491";
+    String floatString2 = "10,898.83491";
+    SolrInputDocument d = processAdd("parse-float",
+        doc(f("id", "128"), f("float1_f", floatString1), f("float2_f", floatString2)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("float1_f") instanceof Float);
+    assertEquals(value, (Float)d.getFieldValue("float1_f"), EPSILON);
+    assertTrue(d.getFieldValue("float2_f") instanceof Float);
+    assertEquals(value, (Float)d.getFieldValue("float2_f"), EPSILON);
+
+    assertU(commit());
+    assertQ(req("id:128")
+        ,"//float[@name='float1_f'][.='" + value + "']"
+        ,"//float[@name='float2_f'][.='" + value + "']");
+  }
+
+  public void testParseFloatNonRootLocale() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("float_f")); // should match dynamic field "*_f"
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    float value = 10898.83491f;
+    String floatString1 = "10898,83491";
+    String floatString2 = "10 898,83491"; // no-break space: U+00A0
+    SolrInputDocument d = processAdd("parse-float-french-no-run-processor",
+        doc(f("id", "140"), f("float_f", floatString1),
+            f("not_in_schema", floatString2)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("float_f") instanceof Float);
+    assertEquals(value, (Float)d.getFieldValue("float_f"), EPSILON);
+    assertTrue(d.getFieldValue("not_in_schema") instanceof Float);
+    assertEquals(value, (Float)d.getFieldValue("not_in_schema"), EPSILON);
+  }
+
+  public void testParseTrieFloatRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("float1_tf")); // should match dynamic field "*_tf"
+    assertNotNull(schema.getFieldOrNull("float2_tf")); // should match dynamic field "*_tf"
+    float value = 10898.83491f;
+    String floatString1 = "10898.83491";
+    String floatString2 = "10,898.83491";
+    SolrInputDocument d = processAdd("parse-float",
+        doc(f("id", "728"), f("float1_tf", floatString1), f("float2_tf", floatString2)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("float1_tf") instanceof Float);
+    assertEquals(value, (Float)d.getFieldValue("float1_tf"), EPSILON);
+    assertTrue(d.getFieldValue("float2_tf") instanceof Float);
+    assertEquals(value, (Float)d.getFieldValue("float2_tf"), EPSILON);
+
+    assertU(commit());
+    assertQ(req("id:728")
+        ,"//float[@name='float1_tf'][.='" + value + "']"
+        ,"//float[@name='float2_tf'][.='" + value + "']");
+  }
+  
+  public void testMixedFloats() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("float_tf")); // should match dynamic field "*_tf"
+    Map<Float,Object> mixedFloats = new HashMap<Float,Object>();
+    mixedFloats.put(85.0f, "85");
+    mixedFloats.put(2894518.0f, "2,894,518");
+    mixedFloats.put(2.94423E-9f, 2.94423E-9f); // Float-typed field value
+    mixedFloats.put(48794721.937f, "48,794,721.937");
+    SolrInputDocument d = processAdd("parse-float-no-run-processor", 
+                                     doc(f("id", "342"), f("float_tf", mixedFloats.values())));
+    assertNotNull(d);
+    for (Object o : d.getFieldValues("float_tf")) {
+      assertTrue(o instanceof Float);
+      mixedFloats.remove(o);
+    }
+    assertTrue(mixedFloats.isEmpty());
+  }
+
+  public void testFailedParseMixedFloat() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    Map<Object,Object> mixed = new HashMap<Object,Object>();
+    Long longVal = 294423L;
+    mixed.put(85L, "85");
+    mixed.put(longVal, longVal); // Float-typed field value
+    mixed.put(-2894518L, "-2,894,518");
+    mixed.put(1879472193L, "1,879,472,193");
+    SolrInputDocument d = processAdd("parse-float-no-run-processor",
+                                     doc(f("id", "7205"), f("not_in_schema", mixed.values())));
+    assertNotNull(d);
+    boolean foundLong = false;
+    for (Object o : d.getFieldValues("not_in_schema")) {
+      if (longVal == o) {
+        foundLong = true;
+      } else {
+        assertTrue(o instanceof String);
+      }
+      mixed.values().remove(o);
+    }
+    assertTrue(foundLong);
+    assertTrue(mixed.isEmpty());
+  }
+
+  public void testParseDoubleRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("double1_d")); // should match dynamic field "*_d"
+    assertNotNull(schema.getFieldOrNull("double2_d")); // should match dynamic field "*_d"
+    double value = 10898.83491;
+    String doubleString1 = "10898.83491";
+    String doubleString2 = "10,898.83491";
+    SolrInputDocument d = processAdd("parse-double",
+        doc(f("id", "128"), f("double1_d", doubleString1), f("double2_d", doubleString2)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("double1_d") instanceof Double);
+    assertEquals(value, (Double)d.getFieldValue("double1_d"), EPSILON);
+    assertTrue(d.getFieldValue("double2_d") instanceof Double);
+    assertEquals(value, (Double)d.getFieldValue("double2_d"), EPSILON);
+
+    assertU(commit());
+    assertQ(req("id:128")
+        ,"//double[@name='double1_d'][.='" + value + "']"
+        ,"//double[@name='double2_d'][.='" + value + "']");
+  }
+
+  public void testParseDoubleNonRootLocale() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("double_d")); // should match dynamic field "*_d"
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    double value = 10898.83491;
+    String doubleString1 = "10898,83491";
+    String doubleString2 = "10 898,83491"; // no-break space: U+00A0
+    SolrInputDocument d = processAdd("parse-double-french-no-run-processor",
+                                     doc(f("id", "140"), f("double_d", doubleString1), 
+                                         f("not_in_schema", doubleString2)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("double_d") instanceof Double);
+    assertEquals(value, (Double)d.getFieldValue("double_d"), EPSILON);
+    assertTrue(d.getFieldValue("not_in_schema") instanceof Double);
+    assertEquals(value, (Double)d.getFieldValue("not_in_schema"), EPSILON);
+  }
+
+  public void testParseTrieDoubleRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("double1_td")); // should match dynamic field "*_td"
+    assertNotNull(schema.getFieldOrNull("double2_td")); // should match dynamic field "*_td"
+    double value = 10898.83491;
+    String doubleString1 = "10898.83491";
+    String doubleString2 = "10,898.83491";
+    SolrInputDocument d = processAdd("parse-double",
+        doc(f("id", "728"), f("double1_td", doubleString1), f("double2_td", doubleString2)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("double1_td") instanceof Double);
+    assertEquals(value, (Double)d.getFieldValue("double1_td"), EPSILON);
+    assertTrue(d.getFieldValue("double2_td") instanceof Double);
+    assertEquals(value, (Double)d.getFieldValue("double2_td"), EPSILON);
+
+    assertU(commit());
+    assertQ(req("id:728")
+        ,"//double[@name='double1_td'][.='" + value + "']"
+        ,"//double[@name='double2_td'][.='" + value + "']");
+  }
+
+  public void testFailedParseMixedDouble() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    Map<Object,Object> mixed = new HashMap<Object,Object>();
+    Long longVal = 294423L;
+    mixed.put(85, "85.0");
+    mixed.put(longVal, longVal); // Float-typed field value
+    mixed.put(-2894.518, "-2,894.518");
+    mixed.put(187947.2193, "187,947.2193");
+    SolrInputDocument d = processAdd("parse-double-no-run-processor",
+                                     doc(f("id", "7206"), f("not_in_schema", mixed.values())));
+    assertNotNull(d);
+    boolean foundLong = false;
+    for (Object o : d.getFieldValues("not_in_schema")) {
+      if (longVal == o) {
+        foundLong = true;
+      } else {
+        assertTrue(o instanceof String);
+      }
+      mixed.values().remove(o);
+    }
+    assertTrue(foundLong);
+    assertTrue(mixed.isEmpty());
+  }
+
+  public void testParseBooleanRoundTrip() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("boolean1_b")); // should match dynamic field "*_b"
+    assertNotNull(schema.getFieldOrNull("boolean2_b")); // should match dynamic field "*_b"
+    boolean value1 = true;
+    boolean value2 = false;
+    SolrInputDocument d = processAdd("parse-boolean",
+        doc(f("id", "141"), f("boolean1_b", value1), f("boolean2_b", value2)));
+    assertNotNull(d);
+    assertTrue(d.getFieldValue("boolean1_b") instanceof Boolean);
+    assertEquals(value1, d.getFieldValue("boolean1_b"));
+    assertTrue(d.getFieldValue("boolean2_b") instanceof Boolean);
+    assertEquals(value2, d.getFieldValue("boolean2_b"));
+
+    assertU(commit());
+    assertQ(req("id:141")
+        ,"//bool[@name='boolean1_b'][.='" + value1 + "']"
+        ,"//bool[@name='boolean2_b'][.='" + value2 + "']");
+  }
+  
+  public void testParseAlternateValueBooleans() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("boolean1_b")); // should match dynamic field "*_b"
+    assertNotNull(schema.getFieldOrNull("boolean2_b")); // should match dynamic field "*_b"
+    assertNotNull(schema.getFieldOrNull("boolean3_b")); // should match dynamic field "*_b"
+    assertNotNull(schema.getFieldOrNull("boolean4_b")); // should match dynamic field "*_b"
+    assertNotNull(schema.getFieldOrNull("boolean5_b")); // should match dynamic field "*_b"
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    boolean[] values      = { true, true, true, false, false, false };
+    String[] stringValues = { "on", "yes", "True", "Off", "no", "FALSE" };
+    String[] fieldNames   = { "boolean1_b", "boolean2_b", "boolean3_b", "boolean4_b", "boolean5_b", "not_in_schema" };
+    SolrInputDocument d = doc(f("id", "55"));
+    for (int i = 0 ; i < values.length ; ++i) {
+      d.addField(fieldNames[i], stringValues[i]);
+    }
+    d = processAdd("parse-boolean-alternate-values-no-run-processor", d);
+    assertNotNull(d);
+
+    for (int i = 0 ; i < values.length ; ++i) {
+      assertTrue(d.getFieldValue(fieldNames[i]) instanceof Boolean);
+      assertEquals(values[i], d.getFieldValue(fieldNames[i]));
+    }
+  }
+
+  public void testParseAlternateSingleValuesBooleans() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNotNull(schema.getFieldOrNull("boolean1_b")); // should match dynamic field "*_b"
+    assertNotNull(schema.getFieldOrNull("boolean2_b")); // should match dynamic field "*_b"
+    boolean[] values      = { true, false };
+    String[] stringValues = { "yup", "nope" };
+    String[] fieldNames   = { "boolean1_b", "boolean2_b" };
+    SolrInputDocument d = doc(f("id", "59"));
+    for (int i = 0 ; i < values.length ; ++i) {
+      d.addField(fieldNames[i], stringValues[i]);
+    }
+    d = processAdd("parse-boolean-alternate-single-values-no-run-processor", d);
+    assertNotNull(d);
+
+    for (int i = 0 ; i < values.length ; ++i) {
+      assertTrue(d.getFieldValue(fieldNames[i]) instanceof Boolean);
+      assertEquals(values[i], d.getFieldValue(fieldNames[i]));
+    }
+
+    // Standard boolean values should not be mutated, since they're not configured
+    stringValues = new String[] { "true", "false" };
+    d = doc(f("id", "593"));
+    for (int i = 0 ; i < values.length ; ++i) {
+      d.addField(fieldNames[i], stringValues[i]);
+    }
+    d = processAdd("parse-boolean-alternate-single-values-no-run-processor", d);
+    assertNotNull(d);
+
+    for (int i = 0 ; i < values.length ; ++i) {
+      assertTrue(d.getFieldValue(fieldNames[i]) instanceof String);
+    }
+  }
+
+  public void testFailedParseMixedBoolean() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    assertNull(schema.getFieldOrNull("not_in_schema"));
+    Map<Object,Object> mixed = new HashMap<Object,Object>();
+    Long longVal = 294423L;
+    mixed.put(true, "true");
+    mixed.put(longVal, longVal); // Float-typed field value
+    mixed.put(false, "false");
+    mixed.put(true, "true");
+    SolrInputDocument d = processAdd("parse-boolean-no-run-processor",
+                                     doc(f("id", "7207"), f("not_in_schema", mixed.values())));
+    assertNotNull(d);
+    boolean foundLong = false;
+    for (Object o : d.getFieldValues("not_in_schema")) {
+      if (longVal == o) {
+        foundLong = true;
+      } else {
+        assertTrue(o instanceof String);
+      }
+      mixed.values().remove(o);
+    }
+    assertTrue(foundLong);
+    assertTrue(mixed.isEmpty());
+  }
+
+  public void testCascadingParsers() throws Exception {
+    IndexSchema schema = h.getCore().getLatestSchema();
+    final String fieldName = "not_in_schema";
+    assertNull(schema.getFieldOrNull(fieldName));
+    SolrInputDocument d = null;
+    String chain = "cascading-parsers-no-run-processor";
+    
+    Map<Boolean,String> booleans = new HashMap<Boolean,String>();
+    booleans.put(true, "truE");
+    booleans.put(false, "False");
+    d = processAdd(chain, doc(f("id", "341"), f(fieldName, booleans.values())));
+    assertNotNull(d);
+    for (Object o : d.getFieldValues(fieldName)) {
+      assertTrue(o instanceof Boolean);
+      booleans.remove(o);
+    }
+    assertTrue(booleans.isEmpty());
+
+    Map<Integer,String> ints = new HashMap<Integer,String>();
+    ints.put(2, "2");
+    ints.put(50928, "50928");
+    ints.put(86942008, "86,942,008");
+    d = processAdd(chain, doc(f("id", "333"), f(fieldName, ints.values())));
+    assertNotNull(d);
+    for (Object o : d.getFieldValues(fieldName)) {
+      assertTrue(o instanceof Integer);
+      ints.remove(o);
+    }
+    assertTrue(ints.isEmpty());
+
+    Map<Long,String> longs = new HashMap<Long,String>();
+    longs.put(2L, "2");
+    longs.put(50928L, "50928");
+    longs.put(86942008987654L, "86,942,008,987,654");
+    d = processAdd(chain, doc(f("id", "342"), f(fieldName, longs.values())));
+    assertNotNull(d);
+    for (Object o : d.getFieldValues(fieldName)) {
+      assertTrue(o instanceof Long);
+      longs.remove(o);
+    }
+    assertTrue(longs.isEmpty());
+    
+    /*
+    // Disabling this test because unlike Integer/Long, Float parsing can perform
+    // rounding to make values fit.  See 
+    Map<Float,String> floats = new HashMap<Float,String>();
+    floats.put(2.0, "2.");
+    floats.put(509.28, "509.28");
+    floats.put(86942.008, "86,942.008");
+    d = processAdd(chain, doc(f("id", "342"), f(fieldName, floats.values())));
+    assertNotNull(d);
+    for (Object o : d.getFieldValues(fieldName)) {
+      assertTrue(o instanceof float);
+      longs.remove(o);
+    }
+    */
+
+    Map<Double,String> doubles = new HashMap<Double,String>();
+    doubles.put(2.0, "2.");
+    doubles.put(509.28, "509.28");
+    doubles.put(86942.008, "86,942.008");
+    d = processAdd(chain, doc(f("id", "342"), f(fieldName, doubles.values())));
+    assertNotNull(d);
+    for (Object o : d.getFieldValues(fieldName)) {
+      assertTrue(o instanceof Double);
+      longs.remove(o);
+    }
+
+    DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateOptionalTimeParser().withZoneUTC();
+    Map<Date,String> dates = new HashMap<Date,String>();
+    String[] dateStrings = { "2020-05-13T18:47", "1989-12-14", "1682-07-22T18:33:00.000Z" };
+    for (String dateString : dateStrings) {
+      dates.put(dateTimeFormatter.parseDateTime(dateString).toDate(), dateString);
+    }
+    d = processAdd(chain, doc(f("id", "343"), f(fieldName, dates.values())));
+    assertNotNull(d);
+    for (Object o : d.getFieldValues(fieldName)) {
+      assertTrue(o instanceof Date);
+      dates.remove(o);
+    }
+    assertTrue(dates.isEmpty());
+    
+    Map<Double,String> mixedLongsAndDoubles = new LinkedHashMap<Double,String>(); // preserve order
+    mixedLongsAndDoubles.put(85.0, "85");
+    mixedLongsAndDoubles.put(2.94423E-9, "2.94423E-9");
+    mixedLongsAndDoubles.put(2894518.0, "2,894,518");
+    mixedLongsAndDoubles.put(48794721.937, "48,794,721.937");
+    d = processAdd(chain, doc(f("id", "344"), f(fieldName, mixedLongsAndDoubles.values())));
+    assertNotNull(d);
+    for (Object o : d.getFieldValues(fieldName)) {
+      assertTrue(o instanceof Double);
+      mixedLongsAndDoubles.remove(o);
+    }
+    assertTrue(mixedLongsAndDoubles.isEmpty());
+    
+    Set<String> mixed = new HashSet<String>();
+    mixed.add("true");
+    mixed.add("1682-07-22T18:33:00.000Z");
+    mixed.add("2,894,518");
+    mixed.add("308,393,131,379,900");
+    mixed.add("48,794,721.937");
+    d = processAdd(chain, doc(f("id", "345"), f(fieldName, mixed)));
+    assertNotNull(d);
+    for (Object o : d.getFieldValues(fieldName)) {
+      assertTrue(o instanceof String);
+    }
+
+    Map<Double,Object> mixedDoubles = new LinkedHashMap<Double,Object>(); // preserve order
+    mixedDoubles.put(85.0, "85");
+    mixedDoubles.put(2.94423E-9, 2.94423E-9); // Double-typed field value
+    mixedDoubles.put(2894518.0, "2,894,518");
+    mixedDoubles.put(48794721.937, "48,794,721.937");
+    d = processAdd(chain, doc(f("id", "3391"), f(fieldName, mixedDoubles.values())));
+    assertNotNull(d);
+    for (Object o : d.getFieldValues(fieldName)) {
+      assertTrue(o instanceof Double);
+      mixedDoubles.remove(o);
+    }
+    assertTrue(mixedDoubles.isEmpty());
+
+    Map<Integer,Object> mixedInts = new LinkedHashMap<Integer,Object>(); // preserve order
+    mixedInts.put(85, "85");
+    mixedInts.put(294423, 294423); // Integer-typed field value
+    mixedInts.put(-2894518, "-2,894,518");
+    mixedInts.put(1879472193, "1,879,472,193");
+    d = processAdd(chain, doc(f("id", "3392"), f(fieldName, mixedInts.values())));
+    assertNotNull(d);
+    for (Object o : d.getFieldValues(fieldName)) {
+      assertTrue(o instanceof Integer);
+      mixedInts.remove(o);
+    }
+    assertTrue(mixedInts.isEmpty());
+
+    Map<Long,Object> mixedLongs = new LinkedHashMap<Long,Object>(); // preserve order
+    mixedLongs.put(85L, "85");
+    mixedLongs.put(42944233L, 42944233L); // Long-typed field value
+    mixedLongs.put(2894518L, "2,894,518");
+    mixedLongs.put(48794721937L, "48,794,721,937");
+    d = processAdd(chain, doc(f("id", "3393"), f(fieldName, mixedLongs.values())));
+    assertNotNull(d);
+    for (Object o : d.getFieldValues(fieldName)) {
+      assertTrue(o instanceof Long);
+      mixedLongs.remove(o);
+    }
+    assertTrue(mixedLongs.isEmpty());
+
+    Map<Boolean,Object> mixedBooleans = new LinkedHashMap<Boolean,Object>(); // preserve order
+    mixedBooleans.put(true, "true");
+    mixedBooleans.put(false, false); // Boolean-typed field value
+    mixedBooleans.put(false, "false");
+    mixedBooleans.put(true, "true");
+    d = processAdd(chain, doc(f("id", "3394"), f(fieldName, mixedBooleans.values())));
+    assertNotNull(d);
+    for (Object o : d.getFieldValues(fieldName)) {
+      assertTrue(o instanceof Boolean);
+      mixedBooleans.remove(o);
+    }
+    assertTrue(mixedBooleans.isEmpty());
+
+    dateTimeFormatter = ISODateTimeFormat.dateOptionalTimeParser().withZoneUTC();
+    Map<Date,Object> mixedDates = new HashMap<Date,Object>();
+    dateStrings = new String[] { "2020-05-13T18:47", "1989-12-14", "1682-07-22T18:33:00.000Z" };
+    for (String dateString : dateStrings) {
+      mixedDates.put(dateTimeFormatter.parseDateTime(dateString).toDate(), dateString);
+    }
+    Date extraDate = dateTimeFormatter.parseDateTime("2003-04-24").toDate();
+    mixedDates.put(extraDate, extraDate); // Date-typed field value
+    d = processAdd(chain, doc(f("id", "3395"), f(fieldName, mixedDates.values())));
+    assertNotNull(d);
+    for (Object o : d.getFieldValues(fieldName)) {
+      assertTrue(o instanceof Date);
+      mixedDates.remove(o);
+    }
+    assertTrue(mixedDates.isEmpty());
+  }
+}
diff --git a/solr/core/src/test/org/apache/solr/update/processor/RegexBoostProcessorTest.java b/solr/core/src/test/org/apache/solr/update/processor/RegexBoostProcessorTest.java
index d757bd2..dac0ad0 100644
--- a/solr/core/src/test/org/apache/solr/update/processor/RegexBoostProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/update/processor/RegexBoostProcessorTest.java
@@ -38,6 +38,7 @@
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml", "schema12.xml");
     SolrCore core = h.getCore();
     _parser = new SolrRequestParsers( null );
diff --git a/solr/core/src/test/org/apache/solr/update/processor/SignatureUpdateProcessorFactoryTest.java b/solr/core/src/test/org/apache/solr/update/processor/SignatureUpdateProcessorFactoryTest.java
index 7003ce0..ef7a8f3 100755
--- a/solr/core/src/test/org/apache/solr/update/processor/SignatureUpdateProcessorFactoryTest.java
+++ b/solr/core/src/test/org/apache/solr/update/processor/SignatureUpdateProcessorFactoryTest.java
@@ -52,6 +52,7 @@
 
   @BeforeClass
   public static void beforeClass() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml", "schema12.xml");
   }
 
diff --git a/solr/core/src/test/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactoryTest.java b/solr/core/src/test/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactoryTest.java
index 30ecbde..78ac58a 100644
--- a/solr/core/src/test/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactoryTest.java
+++ b/solr/core/src/test/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactoryTest.java
@@ -43,6 +43,7 @@
 
   @BeforeClass
   public static void beforeClass() throws Exception {
+    System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
     initCore("solrconfig.xml", "schema12.xml");
   }
 
diff --git a/solr/example/example-schemaless/solr/collection1/conf/currency.xml b/solr/example/example-schemaless/solr/collection1/conf/currency.xml
new file mode 100644
index 0000000..3a9c58a
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/currency.xml
@@ -0,0 +1,67 @@
+<?xml version="1.0" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Example exchange rates file for CurrencyField type named "currency" in example schema -->
+
+<currencyConfig version="1.0">
+  <rates>
+    <!-- Updated from http://www.exchangerate.com/ at 2011-09-27 -->
+    <rate from="USD" to="ARS" rate="4.333871" comment="ARGENTINA Peso" />
+    <rate from="USD" to="AUD" rate="1.025768" comment="AUSTRALIA Dollar" />
+    <rate from="USD" to="EUR" rate="0.743676" comment="European Euro" />
+    <rate from="USD" to="BRL" rate="1.881093" comment="BRAZIL Real" />
+    <rate from="USD" to="CAD" rate="1.030815" comment="CANADA Dollar" />
+    <rate from="USD" to="CLP" rate="519.0996" comment="CHILE Peso" />
+    <rate from="USD" to="CNY" rate="6.387310" comment="CHINA Yuan" />
+    <rate from="USD" to="CZK" rate="18.47134" comment="CZECH REP. Koruna" />
+    <rate from="USD" to="DKK" rate="5.515436" comment="DENMARK Krone" />
+    <rate from="USD" to="HKD" rate="7.801922" comment="HONG KONG Dollar" />
+    <rate from="USD" to="HUF" rate="215.6169" comment="HUNGARY Forint" />
+    <rate from="USD" to="ISK" rate="118.1280" comment="ICELAND Krona" />
+    <rate from="USD" to="INR" rate="49.49088" comment="INDIA Rupee" />
+    <rate from="USD" to="XDR" rate="0.641358" comment="INTNL MON. FUND SDR" />
+    <rate from="USD" to="ILS" rate="3.709739" comment="ISRAEL Sheqel" />
+    <rate from="USD" to="JPY" rate="76.32419" comment="JAPAN Yen" />
+    <rate from="USD" to="KRW" rate="1169.173" comment="KOREA (SOUTH) Won" />
+    <rate from="USD" to="KWD" rate="0.275142" comment="KUWAIT Dinar" />
+    <rate from="USD" to="MXN" rate="13.85895" comment="MEXICO Peso" />
+    <rate from="USD" to="NZD" rate="1.285159" comment="NEW ZEALAND Dollar" />
+    <rate from="USD" to="NOK" rate="5.859035" comment="NORWAY Krone" />
+    <rate from="USD" to="PKR" rate="87.57007" comment="PAKISTAN Rupee" />
+    <rate from="USD" to="PEN" rate="2.730683" comment="PERU Sol" />
+    <rate from="USD" to="PHP" rate="43.62039" comment="PHILIPPINES Peso" />
+    <rate from="USD" to="PLN" rate="3.310139" comment="POLAND Zloty" />
+    <rate from="USD" to="RON" rate="3.100932" comment="ROMANIA Leu" />
+    <rate from="USD" to="RUB" rate="32.14663" comment="RUSSIA Ruble" />
+    <rate from="USD" to="SAR" rate="3.750465" comment="SAUDI ARABIA Riyal" />
+    <rate from="USD" to="SGD" rate="1.299352" comment="SINGAPORE Dollar" />
+    <rate from="USD" to="ZAR" rate="8.329761" comment="SOUTH AFRICA Rand" />
+    <rate from="USD" to="SEK" rate="6.883442" comment="SWEDEN Krona" />
+    <rate from="USD" to="CHF" rate="0.906035" comment="SWITZERLAND Franc" />
+    <rate from="USD" to="TWD" rate="30.40283" comment="TAIWAN Dollar" />
+    <rate from="USD" to="THB" rate="30.89487" comment="THAILAND Baht" />
+    <rate from="USD" to="AED" rate="3.672955" comment="U.A.E. Dirham" />
+    <rate from="USD" to="UAH" rate="7.988582" comment="UKRAINE Hryvnia" />
+    <rate from="USD" to="GBP" rate="0.647910" comment="UNITED KINGDOM Pound" />
+    
+    <!-- Cross-rates for some common currencies -->
+    <rate from="EUR" to="GBP" rate="0.869914" />  
+    <rate from="EUR" to="NOK" rate="7.800095" />  
+    <rate from="GBP" to="NOK" rate="8.966508" />  
+  </rates>
+</currencyConfig>
diff --git a/solr/example/example-schemaless/solr/collection1/conf/elevate.xml b/solr/example/example-schemaless/solr/collection1/conf/elevate.xml
new file mode 100644
index 0000000..25d5ceb
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/elevate.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- If this file is found in the config directory, it will only be
+     loaded once at startup.  If it is found in Solr's data
+     directory, it will be re-loaded every commit.
+
+   See http://wiki.apache.org/solr/QueryElevationComponent for more info
+
+-->
+<elevate>
+ <query text="foo bar">
+  <doc id="1" />
+  <doc id="2" />
+  <doc id="3" />
+ </query>
+ 
+ <query text="ipod">
+   <doc id="MA147LL/A" />  <!-- put the actual ipod at the top -->
+   <doc id="IW-02" exclude="true" /> <!-- exclude this cable -->
+ </query>
+ 
+</elevate>
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/contractions_ca.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/contractions_ca.txt
new file mode 100644
index 0000000..307a85f
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/contractions_ca.txt
@@ -0,0 +1,8 @@
+# Set of Catalan contractions for ElisionFilter
+# TODO: load this as a resource from the analyzer and sync it in build.xml
+d
+l
+m
+n
+s
+t
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/contractions_fr.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/contractions_fr.txt
new file mode 100644
index 0000000..f1bba51
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/contractions_fr.txt
@@ -0,0 +1,15 @@
+# Set of French contractions for ElisionFilter
+# TODO: load this as a resource from the analyzer and sync it in build.xml
+l
+m
+t
+qu
+n
+s
+j
+d
+c
+jusqu
+quoiqu
+lorsqu
+puisqu
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/contractions_ga.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/contractions_ga.txt
new file mode 100644
index 0000000..9ebe7fa
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/contractions_ga.txt
@@ -0,0 +1,5 @@
+# Set of Irish contractions for ElisionFilter
+# TODO: load this as a resource from the analyzer and sync it in build.xml
+d
+m
+b
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/contractions_it.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/contractions_it.txt
new file mode 100644
index 0000000..cac0409
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/contractions_it.txt
@@ -0,0 +1,23 @@
+# Set of Italian contractions for ElisionFilter
+# TODO: load this as a resource from the analyzer and sync it in build.xml
+c
+l 
+all 
+dall 
+dell 
+nell 
+sull 
+coll 
+pell 
+gl 
+agl 
+dagl 
+degl 
+negl 
+sugl 
+un 
+m 
+t 
+s 
+v 
+d
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/hyphenations_ga.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/hyphenations_ga.txt
new file mode 100644
index 0000000..4d2642c
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/hyphenations_ga.txt
@@ -0,0 +1,5 @@
+# Set of Irish hyphenations for StopFilter
+# TODO: load this as a resource from the analyzer and sync it in build.xml
+h
+n
+t
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stemdict_nl.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stemdict_nl.txt
new file mode 100644
index 0000000..4410729
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stemdict_nl.txt
@@ -0,0 +1,6 @@
+# Set of overrides for the dutch stemmer
+# TODO: load this as a resource from the analyzer and sync it in build.xml
+fiets	fiets
+bromfiets	bromfiets
+ei	eier
+kind	kinder
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stoptags_ja.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stoptags_ja.txt
new file mode 100644
index 0000000..71b7508
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stoptags_ja.txt
@@ -0,0 +1,420 @@
+#
+# This file defines a Japanese stoptag set for JapanesePartOfSpeechStopFilter.
+#
+# Any token with a part-of-speech tag that exactly matches those defined in this
+# file are removed from the token stream.
+#
+# Set your own stoptags by uncommenting the lines below.  Note that comments are
+# not allowed on the same line as a stoptag.  See LUCENE-3745 for frequency lists,
+# etc. that can be useful for building you own stoptag set.
+#
+# The entire possible tagset is provided below for convenience.
+#
+#####
+#  noun: unclassified nouns
+#名詞
+#
+#  noun-common: Common nouns or nouns where the sub-classification is undefined
+#名詞-一般
+#
+#  noun-proper: Proper nouns where the sub-classification is undefined 
+#名詞-固有名詞
+#
+#  noun-proper-misc: miscellaneous proper nouns
+#名詞-固有名詞-一般
+#
+#  noun-proper-person: Personal names where the sub-classification is undefined
+#名詞-固有名詞-人名
+#
+#  noun-proper-person-misc: names that cannot be divided into surname and 
+#  given name; foreign names; names where the surname or given name is unknown.
+#  e.g. お市の方
+#名詞-固有名詞-人名-一般
+#
+#  noun-proper-person-surname: Mainly Japanese surnames.
+#  e.g. 山田
+#名詞-固有名詞-人名-姓
+#
+#  noun-proper-person-given_name: Mainly Japanese given names.
+#  e.g. 太郎
+#名詞-固有名詞-人名-名
+#
+#  noun-proper-organization: Names representing organizations.
+#  e.g. 通産省, NHK
+#名詞-固有名詞-組織
+#
+#  noun-proper-place: Place names where the sub-classification is undefined
+#名詞-固有名詞-地域
+#
+#  noun-proper-place-misc: Place names excluding countries.
+#  e.g. アジア, バルセロナ, 京都
+#名詞-固有名詞-地域-一般
+#
+#  noun-proper-place-country: Country names. 
+#  e.g. 日本, オーストラリア
+#名詞-固有名詞-地域-国
+#
+#  noun-pronoun: Pronouns where the sub-classification is undefined
+#名詞-代名詞
+#
+#  noun-pronoun-misc: miscellaneous pronouns: 
+#  e.g. それ, ここ, あいつ, あなた, あちこち, いくつ, どこか, なに, みなさん, みんな, わたくし, われわれ
+#名詞-代名詞-一般
+#
+#  noun-pronoun-contraction: Spoken language contraction made by combining a 
+#  pronoun and the particle 'wa'.
+#  e.g. ありゃ, こりゃ, こりゃあ, そりゃ, そりゃあ 
+#名詞-代名詞-縮約
+#
+#  noun-adverbial: Temporal nouns such as names of days or months that behave 
+#  like adverbs. Nouns that represent amount or ratios and can be used adverbially,
+#  e.g. 金曜, 一月, 午後, 少量
+#名詞-副詞可能
+#
+#  noun-verbal: Nouns that take arguments with case and can appear followed by 
+#  'suru' and related verbs (する, できる, なさる, くださる)
+#  e.g. インプット, 愛着, 悪化, 悪戦苦闘, 一安心, 下取り
+#名詞-サ変接続
+#
+#  noun-adjective-base: The base form of adjectives, words that appear before な ("na")
+#  e.g. 健康, 安易, 駄目, だめ
+#名詞-形容動詞語幹
+#
+#  noun-numeric: Arabic numbers, Chinese numerals, and counters like 何 (回), 数.
+#  e.g. 0, 1, 2, 何, 数, 幾
+#名詞-数
+#
+#  noun-affix: noun affixes where the sub-classification is undefined
+#名詞-非自立
+#
+#  noun-affix-misc: Of adnominalizers, the case-marker の ("no"), and words that 
+#  attach to the base form of inflectional words, words that cannot be classified 
+#  into any of the other categories below. This category includes indefinite nouns.
+#  e.g. あかつき, 暁, かい, 甲斐, 気, きらい, 嫌い, くせ, 癖, こと, 事, ごと, 毎, しだい, 次第, 
+#       順, せい, 所為, ついで, 序で, つもり, 積もり, 点, どころ, の, はず, 筈, はずみ, 弾み, 
+#       拍子, ふう, ふり, 振り, ほう, 方, 旨, もの, 物, 者, ゆえ, 故, ゆえん, 所以, わけ, 訳,
+#       わり, 割り, 割, ん-口語/, もん-口語/
+#名詞-非自立-一般
+#
+#  noun-affix-adverbial: noun affixes that that can behave as adverbs.
+#  e.g. あいだ, 間, あげく, 挙げ句, あと, 後, 余り, 以外, 以降, 以後, 以上, 以前, 一方, うえ, 
+#       上, うち, 内, おり, 折り, かぎり, 限り, きり, っきり, 結果, ころ, 頃, さい, 際, 最中, さなか, 
+#       最中, じたい, 自体, たび, 度, ため, 為, つど, 都度, とおり, 通り, とき, 時, ところ, 所, 
+#       とたん, 途端, なか, 中, のち, 後, ばあい, 場合, 日, ぶん, 分, ほか, 他, まえ, 前, まま, 
+#       儘, 侭, みぎり, 矢先
+#名詞-非自立-副詞可能
+#
+#  noun-affix-aux: noun affixes treated as 助動詞 ("auxiliary verb") in school grammars 
+#  with the stem よう(だ) ("you(da)").
+#  e.g.  よう, やう, 様 (よう)
+#名詞-非自立-助動詞語幹
+#  
+#  noun-affix-adjective-base: noun affixes that can connect to the indeclinable
+#  connection form な (aux "da").
+#  e.g. みたい, ふう
+#名詞-非自立-形容動詞語幹
+#
+#  noun-special: special nouns where the sub-classification is undefined.
+#名詞-特殊
+#
+#  noun-special-aux: The そうだ ("souda") stem form that is used for reporting news, is 
+#  treated as 助動詞 ("auxiliary verb") in school grammars, and attach to the base 
+#  form of inflectional words.
+#  e.g. そう
+#名詞-特殊-助動詞語幹
+#
+#  noun-suffix: noun suffixes where the sub-classification is undefined.
+#名詞-接尾
+#
+#  noun-suffix-misc: Of the nouns or stem forms of other parts of speech that connect 
+#  to ガル or タイ and can combine into compound nouns, words that cannot be classified into
+#  any of the other categories below. In general, this category is more inclusive than 
+#  接尾語 ("suffix") and is usually the last element in a compound noun.
+#  e.g. おき, かた, 方, 甲斐 (がい), がかり, ぎみ, 気味, ぐるみ, (~した) さ, 次第, 済 (ず) み,
+#       よう, (でき)っこ, 感, 観, 性, 学, 類, 面, 用
+#名詞-接尾-一般
+#
+#  noun-suffix-person: Suffixes that form nouns and attach to person names more often
+#  than other nouns.
+#  e.g. 君, 様, 著
+#名詞-接尾-人名
+#
+#  noun-suffix-place: Suffixes that form nouns and attach to place names more often 
+#  than other nouns.
+#  e.g. 町, 市, 県
+#名詞-接尾-地域
+#
+#  noun-suffix-verbal: Of the suffixes that attach to nouns and form nouns, those that 
+#  can appear before スル ("suru").
+#  e.g. 化, 視, 分け, 入り, 落ち, 買い
+#名詞-接尾-サ変接続
+#
+#  noun-suffix-aux: The stem form of そうだ (様態) that is used to indicate conditions, 
+#  is treated as 助動詞 ("auxiliary verb") in school grammars, and attach to the 
+#  conjunctive form of inflectional words.
+#  e.g. そう
+#名詞-接尾-助動詞語幹
+#
+#  noun-suffix-adjective-base: Suffixes that attach to other nouns or the conjunctive 
+#  form of inflectional words and appear before the copula だ ("da").
+#  e.g. 的, げ, がち
+#名詞-接尾-形容動詞語幹
+#
+#  noun-suffix-adverbial: Suffixes that attach to other nouns and can behave as adverbs.
+#  e.g. 後 (ご), 以後, 以降, 以前, 前後, 中, 末, 上, 時 (じ)
+#名詞-接尾-副詞可能
+#
+#  noun-suffix-classifier: Suffixes that attach to numbers and form nouns. This category 
+#  is more inclusive than 助数詞 ("classifier") and includes common nouns that attach 
+#  to numbers.
+#  e.g. 個, つ, 本, 冊, パーセント, cm, kg, カ月, か国, 区画, 時間, 時半
+#名詞-接尾-助数詞
+#
+#  noun-suffix-special: Special suffixes that mainly attach to inflecting words.
+#  e.g. (楽し) さ, (考え) 方
+#名詞-接尾-特殊
+#
+#  noun-suffix-conjunctive: Nouns that behave like conjunctions and join two words 
+#  together.
+#  e.g. (日本) 対 (アメリカ), 対 (アメリカ), (3) 対 (5), (女優) 兼 (主婦)
+#名詞-接続詞的
+#
+#  noun-verbal_aux: Nouns that attach to the conjunctive particle て ("te") and are 
+#  semantically verb-like.
+#  e.g. ごらん, ご覧, 御覧, 頂戴
+#名詞-動詞非自立的
+#
+#  noun-quotation: text that cannot be segmented into words, proverbs, Chinese poetry, 
+#  dialects, English, etc. Currently, the only entry for 名詞 引用文字列 ("noun quotation") 
+#  is いわく ("iwaku").
+#名詞-引用文字列
+#
+#  noun-nai_adjective: Words that appear before the auxiliary verb ない ("nai") and
+#  behave like an adjective.
+#  e.g. 申し訳, 仕方, とんでも, 違い
+#名詞-ナイ形容詞語幹
+#
+#####
+#  prefix: unclassified prefixes
+#接頭詞
+#
+#  prefix-nominal: Prefixes that attach to nouns (including adjective stem forms) 
+#  excluding numerical expressions.
+#  e.g. お (水), 某 (氏), 同 (社), 故 (~氏), 高 (品質), お (見事), ご (立派)
+#接頭詞-名詞接続
+#
+#  prefix-verbal: Prefixes that attach to the imperative form of a verb or a verb
+#  in conjunctive form followed by なる/なさる/くださる.
+#  e.g. お (読みなさい), お (座り)
+#接頭詞-動詞接続
+#
+#  prefix-adjectival: Prefixes that attach to adjectives.
+#  e.g. お (寒いですねえ), バカ (でかい)
+#接頭詞-形容詞接続
+#
+#  prefix-numerical: Prefixes that attach to numerical expressions.
+#  e.g. 約, およそ, 毎時
+#接頭詞-数接続
+#
+#####
+#  verb: unclassified verbs
+#動詞
+#
+#  verb-main:
+#動詞-自立
+#
+#  verb-auxiliary:
+#動詞-非自立
+#
+#  verb-suffix:
+#動詞-接尾
+#
+#####
+#  adjective: unclassified adjectives
+#形容詞
+#
+#  adjective-main:
+#形容詞-自立
+#
+#  adjective-auxiliary:
+#形容詞-非自立
+#
+#  adjective-suffix:
+#形容詞-接尾
+#
+#####
+#  adverb: unclassified adverbs
+#副詞
+#
+#  adverb-misc: Words that can be segmented into one unit and where adnominal 
+#  modification is not possible.
+#  e.g. あいかわらず, 多分
+#副詞-一般
+#
+#  adverb-particle_conjunction: Adverbs that can be followed by の, は, に, 
+#  な, する, だ, etc.
+#  e.g. こんなに, そんなに, あんなに, なにか, なんでも
+#副詞-助詞類接続
+#
+#####
+#  adnominal: Words that only have noun-modifying forms.
+#  e.g. この, その, あの, どの, いわゆる, なんらかの, 何らかの, いろんな, こういう, そういう, ああいう, 
+#       どういう, こんな, そんな, あんな, どんな, 大きな, 小さな, おかしな, ほんの, たいした, 
+#       「(, も) さる (ことながら)」, 微々たる, 堂々たる, 単なる, いかなる, 我が」「同じ, 亡き
+#連体詞
+#
+#####
+#  conjunction: Conjunctions that can occur independently.
+#  e.g. が, けれども, そして, じゃあ, それどころか
+接続詞
+#
+#####
+#  particle: unclassified particles.
+助詞
+#
+#  particle-case: case particles where the subclassification is undefined.
+助詞-格助詞
+#
+#  particle-case-misc: Case particles.
+#  e.g. から, が, で, と, に, へ, より, を, の, にて
+助詞-格助詞-一般
+#
+#  particle-case-quote: the "to" that appears after nouns, a person’s speech, 
+#  quotation marks, expressions of decisions from a meeting, reasons, judgements,
+#  conjectures, etc.
+#  e.g. ( だ) と (述べた.), ( である) と (して執行猶予...)
+助詞-格助詞-引用
+#
+#  particle-case-compound: Compounds of particles and verbs that mainly behave 
+#  like case particles.
+#  e.g. という, といった, とかいう, として, とともに, と共に, でもって, にあたって, に当たって, に当って,
+#       にあたり, に当たり, に当り, に当たる, にあたる, において, に於いて,に於て, における, に於ける, 
+#       にかけ, にかけて, にかんし, に関し, にかんして, に関して, にかんする, に関する, に際し, 
+#       に際して, にしたがい, に従い, に従う, にしたがって, に従って, にたいし, に対し, にたいして, 
+#       に対して, にたいする, に対する, について, につき, につけ, につけて, につれ, につれて, にとって,
+#       にとり, にまつわる, によって, に依って, に因って, により, に依り, に因り, による, に依る, に因る, 
+#       にわたって, にわたる, をもって, を以って, を通じ, を通じて, を通して, をめぐって, をめぐり, をめぐる,
+#       って-口語/, ちゅう-関西弁「という」/, (何) ていう (人)-口語/, っていう-口語/, といふ, とかいふ
+助詞-格助詞-連語
+#
+#  particle-conjunctive:
+#  e.g. から, からには, が, けれど, けれども, けど, し, つつ, て, で, と, ところが, どころか, とも, ども, 
+#       ながら, なり, ので, のに, ば, ものの, や ( した), やいなや, (ころん) じゃ(いけない)-口語/, 
+#       (行っ) ちゃ(いけない)-口語/, (言っ) たって (しかたがない)-口語/, (それがなく)ったって (平気)-口語/
+助詞-接続助詞
+#
+#  particle-dependency:
+#  e.g. こそ, さえ, しか, すら, は, も, ぞ
+助詞-係助詞
+#
+#  particle-adverbial:
+#  e.g. がてら, かも, くらい, 位, ぐらい, しも, (学校) じゃ(これが流行っている)-口語/, 
+#       (それ)じゃあ (よくない)-口語/, ずつ, (私) なぞ, など, (私) なり (に), (先生) なんか (大嫌い)-口語/,
+#       (私) なんぞ, (先生) なんて (大嫌い)-口語/, のみ, だけ, (私) だって-口語/, だに, 
+#       (彼)ったら-口語/, (お茶) でも (いかが), 等 (とう), (今後) とも, ばかり, ばっか-口語/, ばっかり-口語/,
+#       ほど, 程, まで, 迄, (誰) も (が)([助詞-格助詞] および [助詞-係助詞] の前に位置する「も」)
+助詞-副助詞
+#
+#  particle-interjective: particles with interjective grammatical roles.
+#  e.g. (松島) や
+助詞-間投助詞
+#
+#  particle-coordinate:
+#  e.g. と, たり, だの, だり, とか, なり, や, やら
+助詞-並立助詞
+#
+#  particle-final:
+#  e.g. かい, かしら, さ, ぜ, (だ)っけ-口語/, (とまってる) で-方言/, な, ナ, なあ-口語/, ぞ, ね, ネ, 
+#       ねぇ-口語/, ねえ-口語/, ねん-方言/, の, のう-口語/, や, よ, ヨ, よぉ-口語/, わ, わい-口語/
+助詞-終助詞
+#
+#  particle-adverbial/conjunctive/final: The particle "ka" when unknown whether it is 
+#  adverbial, conjunctive, or sentence final. For example:
+#       (a) 「A か B か」. Ex:「(国内で運用する) か,(海外で運用する) か (.)」
+#       (b) Inside an adverb phrase. Ex:「(幸いという) か (, 死者はいなかった.)」
+#           「(祈りが届いたせい) か (, 試験に合格した.)」
+#       (c) 「かのように」. Ex:「(何もなかった) か (のように振る舞った.)」
+#  e.g. か
+助詞-副助詞/並立助詞/終助詞
+#
+#  particle-adnominalizer: The "no" that attaches to nouns and modifies 
+#  non-inflectional words.
+助詞-連体化
+#
+#  particle-adnominalizer: The "ni" and "to" that appear following nouns and adverbs 
+#  that are giongo, giseigo, or gitaigo.
+#  e.g. に, と
+助詞-副詞化
+#
+#  particle-special: A particle that does not fit into one of the above classifications. 
+#  This includes particles that are used in Tanka, Haiku, and other poetry.
+#  e.g. かな, けむ, ( しただろう) に, (あんた) にゃ(わからん), (俺) ん (家)
+助詞-特殊
+#
+#####
+#  auxiliary-verb:
+助動詞
+#
+#####
+#  interjection: Greetings and other exclamations.
+#  e.g. おはよう, おはようございます, こんにちは, こんばんは, ありがとう, どうもありがとう, ありがとうございます, 
+#       いただきます, ごちそうさま, さよなら, さようなら, はい, いいえ, ごめん, ごめんなさい
+#感動詞
+#
+#####
+#  symbol: unclassified Symbols.
+記号
+#
+#  symbol-misc: A general symbol not in one of the categories below.
+#  e.g. [○◎@$〒→+]
+記号-一般
+#
+#  symbol-comma: Commas
+#  e.g. [,、]
+記号-読点
+#
+#  symbol-period: Periods and full stops.
+#  e.g. [..。]
+記号-句点
+#
+#  symbol-space: Full-width whitespace.
+記号-空白
+#
+#  symbol-open_bracket:
+#  e.g. [({‘“『【]
+記号-括弧開
+#
+#  symbol-close_bracket:
+#  e.g. [)}’”』」】]
+記号-括弧閉
+#
+#  symbol-alphabetic:
+#記号-アルファベット
+#
+#####
+#  other: unclassified other
+#その他
+#
+#  other-interjection: Words that are hard to classify as noun-suffixes or 
+#  sentence-final particles.
+#  e.g. (だ)ァ
+その他-間投
+#
+#####
+#  filler: Aizuchi that occurs during a conversation or sounds inserted as filler.
+#  e.g. あの, うんと, えと
+フィラー
+#
+#####
+#  non-verbal: non-verbal sound.
+非言語音
+#
+#####
+#  fragment:
+#語断片
+#
+#####
+#  unknown: unknown part of speech.
+#未知語
+#
+##### End of file
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ar.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ar.txt
new file mode 100644
index 0000000..046829d
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ar.txt
@@ -0,0 +1,125 @@
+# This file was created by Jacques Savoy and is distributed under the BSD license.
+# See http://members.unine.ch/jacques.savoy/clef/index.html.
+# Also see http://www.opensource.org/licenses/bsd-license.html
+# Cleaned on October 11, 2009 (not normalized, so use before normalization)
+# This means that when modifying this list, you might need to add some 
+# redundant entries, for example containing forms with both أ and ا
+من
+ومن
+منها
+منه
+في
+وفي
+فيها
+فيه


+ثم
+او
+أو

+بها
+به


+اى
+اي
+أي
+أى
+لا
+ولا
+الا
+ألا
+إلا
+لكن
+ما
+وما
+كما
+فما
+عن
+مع
+اذا
+إذا
+ان
+أن
+إن
+انها
+أنها
+إنها
+انه
+أنه
+إنه
+بان
+بأن
+فان
+فأن
+وان
+وأن
+وإن
+التى
+التي
+الذى
+الذي
+الذين
+الى
+الي
+إلى
+إلي
+على
+عليها
+عليه
+اما
+أما
+إما
+ايضا
+أيضا
+كل
+وكل
+لم
+ولم
+لن
+ولن
+هى
+هي
+هو
+وهى
+وهي
+وهو
+فهى
+فهي
+فهو
+انت
+أنت
+لك
+لها
+له
+هذه
+هذا
+تلك
+ذلك
+هناك
+كانت
+كان
+يكون
+تكون
+وكانت
+وكان
+غير
+بعض
+قد
+نحو
+بين
+بينما
+منذ
+ضمن
+حيث
+الان
+الآن
+خلال
+بعد
+قبل
+حتى
+عند
+عندما
+لدى
+جميع
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_bg.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_bg.txt
new file mode 100644
index 0000000..1ae4ba2
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_bg.txt
@@ -0,0 +1,193 @@
+# This file was created by Jacques Savoy and is distributed under the BSD license.
+# See http://members.unine.ch/jacques.savoy/clef/index.html.
+# Also see http://www.opensource.org/licenses/bsd-license.html

+аз
+ако
+ала
+бе
+без
+беше
+би
+бил
+била
+били
+било
+близо
+бъдат
+бъде
+бяха

+вас
+ваш
+ваша
+вероятно
+вече
+взема
+ви
+вие
+винаги
+все
+всеки
+всички
+всичко
+всяка
+във
+въпреки
+върху

+ги
+главно
+го

+да
+дали
+до
+докато
+докога
+дори
+досега
+доста

+едва
+един
+ето
+за
+зад
+заедно
+заради
+засега
+затова
+защо
+защото

+из
+или
+им
+има
+имат
+иска

+каза
+как
+каква
+какво
+както
+какъв
+като
+кога
+когато
+което
+които
+кой
+който
+колко
+която
+къде
+където
+към
+ли

+ме
+между
+мен
+ми
+мнозина
+мога
+могат
+може
+моля
+момента
+му

+на
+над
+назад
+най
+направи
+напред
+например
+нас
+не
+него
+нея
+ни
+ние
+никой
+нито
+но
+някои
+някой
+няма
+обаче
+около
+освен
+особено
+от
+отгоре
+отново
+още
+пак
+по
+повече
+повечето
+под
+поне
+поради
+после
+почти
+прави
+пред
+преди
+през
+при
+пък
+първо

+са
+само
+се
+сега
+си
+скоро
+след
+сме
+според
+сред
+срещу
+сте
+съм
+със
+също

+тази
+така
+такива
+такъв
+там
+твой
+те
+тези
+ти
+тн
+то
+това
+тогава
+този
+той
+толкова
+точно
+трябва
+тук
+тъй
+тя
+тях

+харесва

+че
+често
+чрез
+ще
+щом

diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ca.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ca.txt
new file mode 100644
index 0000000..3da65de
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ca.txt
@@ -0,0 +1,220 @@
+# Catalan stopwords from http://github.com/vcl/cue.language (Apache 2 Licensed)
+a
+abans
+ací
+ah
+així
+això
+al
+als
+aleshores
+algun
+alguna
+algunes
+alguns
+alhora
+allà
+allí
+allò
+altra
+altre
+altres
+amb
+ambdós
+ambdues
+apa
+aquell
+aquella
+aquelles
+aquells
+aquest
+aquesta
+aquestes
+aquests
+aquí
+baix
+cada
+cadascú
+cadascuna
+cadascunes
+cadascuns
+com
+contra
+d'un
+d'una
+d'unes
+d'uns
+dalt
+de
+del
+dels
+des
+després
+dins
+dintre
+donat
+doncs
+durant
+e
+eh
+el
+els
+em
+en
+encara
+ens
+entre
+érem
+eren
+éreu
+es
+és
+esta
+està
+estàvem
+estaven
+estàveu
+esteu
+et
+etc
+ets
+fins
+fora
+gairebé
+ha
+han
+has
+havia
+he
+hem
+heu
+hi 
+ho
+i
+igual
+iguals
+ja
+l'hi
+la
+les
+li
+li'n
+llavors
+m'he
+ma
+mal
+malgrat
+mateix
+mateixa
+mateixes
+mateixos
+me
+mentre
+més
+meu
+meus
+meva
+meves
+molt
+molta
+moltes
+molts
+mon
+mons
+n'he
+n'hi
+ne
+ni
+no
+nogensmenys
+només
+nosaltres
+nostra
+nostre
+nostres
+o
+oh
+oi
+on
+pas
+pel
+pels
+per
+però
+perquè
+poc 
+poca
+pocs
+poques
+potser
+propi
+qual
+quals
+quan
+quant 
+que
+què
+quelcom
+qui
+quin
+quina
+quines
+quins
+s'ha
+s'han
+sa
+semblant
+semblants
+ses
+seu 
+seus
+seva
+seva
+seves
+si
+sobre
+sobretot
+sóc
+solament
+sols
+son 
+són
+sons 
+sota
+sou
+t'ha
+t'han
+t'he
+ta
+tal
+també
+tampoc
+tan
+tant
+tanta
+tantes
+teu
+teus
+teva
+teves
+ton
+tons
+tot
+tota
+totes
+tots
+un
+una
+unes
+uns
+us
+va
+vaig
+vam
+van
+vas
+veu
+vosaltres
+vostra
+vostre
+vostres
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_cz.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_cz.txt
new file mode 100644
index 0000000..53c6097
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_cz.txt
@@ -0,0 +1,172 @@
+a
+s
+k
+o
+i
+u
+v
+z
+dnes
+cz
+tímto
+budeš
+budem
+byli
+jseš
+můj
+svým
+ta
+tomto
+tohle
+tuto
+tyto
+jej
+zda
+proč
+máte
+tato
+kam
+tohoto
+kdo
+kteří
+mi
+nám
+tom
+tomuto
+mít
+nic
+proto
+kterou
+byla
+toho
+protože
+asi
+ho
+naši
+napište
+re
+což
+tím
+takže
+svých
+její
+svými
+jste
+aj
+tu
+tedy
+teto
+bylo
+kde
+ke
+pravé
+ji
+nad
+nejsou
+či
+pod
+téma
+mezi
+přes
+ty
+pak
+vám
+ani
+když
+však
+neg
+jsem
+tento
+článku
+články
+aby
+jsme
+před
+pta
+jejich
+byl
+ještě
+až
+bez
+také
+pouze
+první
+vaše
+která
+nás
+nový
+tipy
+pokud
+může
+strana
+jeho
+své
+jiné
+zprávy
+nové
+není
+vás
+jen
+podle
+zde
+už
+být
+více
+bude
+již
+než
+který
+by
+které
+co
+nebo
+ten
+tak
+má
+při
+od
+po
+jsou
+jak
+další
+ale
+si
+se
+ve
+to
+jako
+za
+zpět
+ze
+do
+pro
+je
+na
+atd
+atp
+jakmile
+přičemž
+já
+on
+ona
+ono
+oni
+ony
+my
+vy
+jí
+ji
+mě
+mne
+jemu
+tomu
+těm
+těmu
+němu
+němuž
+jehož
+jíž
+jelikož
+jež
+jakož
+načež
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_da.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_da.txt
new file mode 100644
index 0000000..a3ff5fe
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_da.txt
@@ -0,0 +1,108 @@
+ | From svn.tartarus.org/snowball/trunk/website/algorithms/danish/stop.txt
+ | This file is distributed under the BSD License.
+ | See http://snowball.tartarus.org/license.php
+ | Also see http://www.opensource.org/licenses/bsd-license.html
+ |  - Encoding was converted to UTF-8.
+ |  - This notice was added.
+
+ | A Danish stop word list. Comments begin with vertical bar. Each stop
+ | word is at the start of a line.
+
+ | This is a ranked list (commonest to rarest) of stopwords derived from
+ | a large text sample.
+
+
+og           | and
+i            | in
+jeg          | I
+det          | that (dem. pronoun)/it (pers. pronoun)
+at           | that (in front of a sentence)/to (with infinitive)
+en           | a/an
+den          | it (pers. pronoun)/that (dem. pronoun)
+til          | to/at/for/until/against/by/of/into, more
+er           | present tense of "to be"
+som          | who, as
+på           | on/upon/in/on/at/to/after/of/with/for, on
+de           | they
+med          | with/by/in, along
+han          | he
+af           | of/by/from/off/for/in/with/on, off
+for          | at/for/to/from/by/of/ago, in front/before, because
+ikke         | not
+der          | who/which, there/those
+var          | past tense of "to be"
+mig          | me/myself
+sig          | oneself/himself/herself/itself/themselves
+men          | but
+et           | a/an/one, one (number), someone/somebody/one
+har          | present tense of "to have"
+om           | round/about/for/in/a, about/around/down, if
+vi           | we
+min          | my
+havde        | past tense of "to have"
+ham          | him
+hun          | she
+nu           | now
+over         | over/above/across/by/beyond/past/on/about, over/past
+da           | then, when/as/since
+fra          | from/off/since, off, since
+du           | you
+ud           | out
+sin          | his/her/its/one's
+dem          | them
+os           | us/ourselves
+op           | up
+man          | you/one
+hans         | his
+hvor         | where
+eller        | or
+hvad         | what
+skal         | must/shall etc.
+selv         | myself/youself/herself/ourselves etc., even
+her          | here
+alle         | all/everyone/everybody etc.
+vil          | will (verb)
+blev         | past tense of "to stay/to remain/to get/to become"
+kunne        | could
+ind          | in
+når          | when
+være         | present tense of "to be"
+dog          | however/yet/after all
+noget        | something
+ville        | would
+jo           | you know/you see (adv), yes
+deres        | their/theirs
+efter        | after/behind/according to/for/by/from, later/afterwards
+ned          | down
+skulle       | should
+denne        | this
+end          | than
+dette        | this
+mit          | my/mine
+også         | also
+under        | under/beneath/below/during, below/underneath
+have         | have
+dig          | you
+anden        | other
+hende        | her
+mine         | my
+alt          | everything
+meget        | much/very, plenty of
+sit          | his, her, its, one's
+sine         | his, her, its, one's
+vor          | our
+mod          | against
+disse        | these
+hvis         | if
+din          | your/yours
+nogle        | some
+hos          | by/at
+blive        | be/become
+mange        | many
+ad           | by/through
+bliver       | present tense of "to be/to become"
+hendes       | her/hers
+været        | be
+thi          | for (conj)
+jer          | you
+sådan        | such, like this/like that
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_de.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_de.txt
new file mode 100644
index 0000000..f770384
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_de.txt
@@ -0,0 +1,292 @@
+ | From svn.tartarus.org/snowball/trunk/website/algorithms/german/stop.txt
+ | This file is distributed under the BSD License.
+ | See http://snowball.tartarus.org/license.php
+ | Also see http://www.opensource.org/licenses/bsd-license.html
+ |  - Encoding was converted to UTF-8.
+ |  - This notice was added.
+
+ | A German stop word list. Comments begin with vertical bar. Each stop
+ | word is at the start of a line.
+
+ | The number of forms in this list is reduced significantly by passing it
+ | through the German stemmer.
+
+
+aber           |  but
+
+alle           |  all
+allem
+allen
+aller
+alles
+
+als            |  than, as
+also           |  so
+am             |  an + dem
+an             |  at
+
+ander          |  other
+andere
+anderem
+anderen
+anderer
+anderes
+anderm
+andern
+anderr
+anders
+
+auch           |  also
+auf            |  on
+aus            |  out of
+bei            |  by
+bin            |  am
+bis            |  until
+bist           |  art
+da             |  there
+damit          |  with it
+dann           |  then
+
+der            |  the
+den
+des
+dem
+die
+das
+
+daß            |  that
+
+derselbe       |  the same
+derselben
+denselben
+desselben
+demselben
+dieselbe
+dieselben
+dasselbe
+
+dazu           |  to that
+
+dein           |  thy
+deine
+deinem
+deinen
+deiner
+deines
+
+denn           |  because
+
+derer          |  of those
+dessen         |  of him
+
+dich           |  thee
+dir            |  to thee
+du             |  thou
+
+dies           |  this
+diese
+diesem
+diesen
+dieser
+dieses
+
+
+doch           |  (several meanings)
+dort           |  (over) there
+
+
+durch          |  through
+
+ein            |  a
+eine
+einem
+einen
+einer
+eines
+
+einig          |  some
+einige
+einigem
+einigen
+einiger
+einiges
+
+einmal         |  once
+
+er             |  he
+ihn            |  him
+ihm            |  to him
+
+es             |  it
+etwas          |  something
+
+euer           |  your
+eure
+eurem
+euren
+eurer
+eures
+
+für            |  for
+gegen          |  towards
+gewesen        |  p.p. of sein
+hab            |  have
+habe           |  have
+haben          |  have
+hat            |  has
+hatte          |  had
+hatten         |  had
+hier           |  here
+hin            |  there
+hinter         |  behind
+
+ich            |  I
+mich           |  me
+mir            |  to me
+
+
+ihr            |  you, to her
+ihre
+ihrem
+ihren
+ihrer
+ihres
+euch           |  to you
+
+im             |  in + dem
+in             |  in
+indem          |  while
+ins            |  in + das
+ist            |  is
+
+jede           |  each, every
+jedem
+jeden
+jeder
+jedes
+
+jene           |  that
+jenem
+jenen
+jener
+jenes
+
+jetzt          |  now
+kann           |  can
+
+kein           |  no
+keine
+keinem
+keinen
+keiner
+keines
+
+können         |  can
+könnte         |  could
+machen         |  do
+man            |  one
+
+manche         |  some, many a
+manchem
+manchen
+mancher
+manches
+
+mein           |  my
+meine
+meinem
+meinen
+meiner
+meines
+
+mit            |  with
+muss           |  must
+musste         |  had to
+nach           |  to(wards)
+nicht          |  not
+nichts         |  nothing
+noch           |  still, yet
+nun            |  now
+nur            |  only
+ob             |  whether
+oder           |  or
+ohne           |  without
+sehr           |  very
+
+sein           |  his
+seine
+seinem
+seinen
+seiner
+seines
+
+selbst         |  self
+sich           |  herself
+
+sie            |  they, she
+ihnen          |  to them
+
+sind           |  are
+so             |  so
+
+solche         |  such
+solchem
+solchen
+solcher
+solches
+
+soll           |  shall
+sollte         |  should
+sondern        |  but
+sonst          |  else
+über           |  over
+um             |  about, around
+und            |  and
+
+uns            |  us
+unse
+unsem
+unsen
+unser
+unses
+
+unter          |  under
+viel           |  much
+vom            |  von + dem
+von            |  from
+vor            |  before
+während        |  while
+war            |  was
+waren          |  were
+warst          |  wast
+was            |  what
+weg            |  away, off
+weil           |  because
+weiter         |  further
+
+welche         |  which
+welchem
+welchen
+welcher
+welches
+
+wenn           |  when
+werde          |  will
+werden         |  will
+wie            |  how
+wieder         |  again
+will           |  want
+wir            |  we
+wird           |  will
+wirst          |  willst
+wo             |  where
+wollen         |  want
+wollte         |  wanted
+würde          |  would
+würden         |  would
+zu             |  to
+zum            |  zu + dem
+zur            |  zu + der
+zwar           |  indeed
+zwischen       |  between
+
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_el.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_el.txt
new file mode 100644
index 0000000..232681f
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_el.txt
@@ -0,0 +1,78 @@
+# Lucene Greek Stopwords list
+# Note: by default this file is used after GreekLowerCaseFilter,
+# so when modifying this file use 'σ' instead of 'ς' 
+ο

+το
+οι
+τα
+του
+τησ
+των
+τον
+την
+και 
+κι

+ειμαι
+εισαι
+ειναι
+ειμαστε
+ειστε
+στο
+στον
+στη
+στην
+μα
+αλλα
+απο
+για
+προσ
+με
+σε
+ωσ
+παρα
+αντι
+κατα
+μετα
+θα
+να
+δε
+δεν
+μη
+μην
+επι
+ενω
+εαν
+αν
+τοτε
+που
+πωσ
+ποιοσ
+ποια
+ποιο
+ποιοι
+ποιεσ
+ποιων
+ποιουσ
+αυτοσ
+αυτη
+αυτο
+αυτοι
+αυτων
+αυτουσ
+αυτεσ
+αυτα
+εκεινοσ
+εκεινη
+εκεινο
+εκεινοι
+εκεινεσ
+εκεινα
+εκεινων
+εκεινουσ
+οπωσ
+ομωσ
+ισωσ
+οσο
+οτι
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_en.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_en.txt
new file mode 100644
index 0000000..2c164c0
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_en.txt
@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# a couple of test stopwords to test that the words are really being
+# configured from this file:
+stopworda
+stopwordb
+
+# Standard english stop words taken from Lucene's StopAnalyzer
+a
+an
+and
+are
+as
+at
+be
+but
+by
+for
+if
+in
+into
+is
+it
+no
+not
+of
+on
+or
+such
+that
+the
+their
+then
+there
+these
+they
+this
+to
+was
+will
+with
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_es.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_es.txt
new file mode 100644
index 0000000..2db1476
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_es.txt
@@ -0,0 +1,354 @@
+ | From svn.tartarus.org/snowball/trunk/website/algorithms/spanish/stop.txt
+ | This file is distributed under the BSD License.
+ | See http://snowball.tartarus.org/license.php
+ | Also see http://www.opensource.org/licenses/bsd-license.html
+ |  - Encoding was converted to UTF-8.
+ |  - This notice was added.
+
+ | A Spanish stop word list. Comments begin with vertical bar. Each stop
+ | word is at the start of a line.
+
+
+ | The following is a ranked list (commonest to rarest) of stopwords
+ | deriving from a large sample of text.
+
+ | Extra words have been added at the end.
+
+de             |  from, of
+la             |  the, her
+que            |  who, that
+el             |  the
+en             |  in
+y              |  and
+a              |  to
+los            |  the, them
+del            |  de + el
+se             |  himself, from him etc
+las            |  the, them
+por            |  for, by, etc
+un             |  a
+para           |  for
+con            |  with
+no             |  no
+una            |  a
+su             |  his, her
+al             |  a + el
+  | es         from SER
+lo             |  him
+como           |  how
+más            |  more
+pero           |  pero
+sus            |  su plural
+le             |  to him, her
+ya             |  already
+o              |  or
+  | fue        from SER
+este           |  this
+  | ha         from HABER
+sí             |  himself etc
+porque         |  because
+esta           |  this
+  | son        from SER
+entre          |  between
+  | está     from ESTAR
+cuando         |  when
+muy            |  very
+sin            |  without
+sobre          |  on
+  | ser        from SER
+  | tiene      from TENER
+también        |  also
+me             |  me
+hasta          |  until
+hay            |  there is/are
+donde          |  where
+  | han        from HABER
+quien          |  whom, that
+  | están      from ESTAR
+  | estado     from ESTAR
+desde          |  from
+todo           |  all
+nos            |  us
+durante        |  during
+  | estados    from ESTAR
+todos          |  all
+uno            |  a
+les            |  to them
+ni             |  nor
+contra         |  against
+otros          |  other
+  | fueron     from SER
+ese            |  that
+eso            |  that
+  | había      from HABER
+ante           |  before
+ellos          |  they
+e              |  and (variant of y)
+esto           |  this
+mí             |  me
+antes          |  before
+algunos        |  some
+qué            |  what?
+unos           |  a
+yo             |  I
+otro           |  other
+otras          |  other
+otra           |  other
+él             |  he
+tanto          |  so much, many
+esa            |  that
+estos          |  these
+mucho          |  much, many
+quienes        |  who
+nada           |  nothing
+muchos         |  many
+cual           |  who
+  | sea        from SER
+poco           |  few
+ella           |  she
+estar          |  to be
+  | haber      from HABER
+estas          |  these
+  | estaba     from ESTAR
+  | estamos    from ESTAR
+algunas        |  some
+algo           |  something
+nosotros       |  we
+
+      | other forms
+
+mi             |  me
+mis            |  mi plural
+tú             |  thou
+te             |  thee
+ti             |  thee
+tu             |  thy
+tus            |  tu plural
+ellas          |  they
+nosotras       |  we
+vosotros       |  you
+vosotras       |  you
+os             |  you
+mío            |  mine
+mía            |
+míos           |
+mías           |
+tuyo           |  thine
+tuya           |
+tuyos          |
+tuyas          |
+suyo           |  his, hers, theirs
+suya           |
+suyos          |
+suyas          |
+nuestro        |  ours
+nuestra        |
+nuestros       |
+nuestras       |
+vuestro        |  yours
+vuestra        |
+vuestros       |
+vuestras       |
+esos           |  those
+esas           |  those
+
+               | forms of estar, to be (not including the infinitive):
+estoy
+estás
+está
+estamos
+estáis
+están
+esté
+estés
+estemos
+estéis
+estén
+estaré
+estarás
+estará
+estaremos
+estaréis
+estarán
+estaría
+estarías
+estaríamos
+estaríais
+estarían
+estaba
+estabas
+estábamos
+estabais
+estaban
+estuve
+estuviste
+estuvo
+estuvimos
+estuvisteis
+estuvieron
+estuviera
+estuvieras
+estuviéramos
+estuvierais
+estuvieran
+estuviese
+estuvieses
+estuviésemos
+estuvieseis
+estuviesen
+estando
+estado
+estada
+estados
+estadas
+estad
+
+               | forms of haber, to have (not including the infinitive):
+he
+has
+ha
+hemos
+habéis
+han
+haya
+hayas
+hayamos
+hayáis
+hayan
+habré
+habrás
+habrá
+habremos
+habréis
+habrán
+habría
+habrías
+habríamos
+habríais
+habrían
+había
+habías
+habíamos
+habíais
+habían
+hube
+hubiste
+hubo
+hubimos
+hubisteis
+hubieron
+hubiera
+hubieras
+hubiéramos
+hubierais
+hubieran
+hubiese
+hubieses
+hubiésemos
+hubieseis
+hubiesen
+habiendo
+habido
+habida
+habidos
+habidas
+
+               | forms of ser, to be (not including the infinitive):
+soy
+eres
+es
+somos
+sois
+son
+sea
+seas
+seamos
+seáis
+sean
+seré
+serás
+será
+seremos
+seréis
+serán
+sería
+serías
+seríamos
+seríais
+serían
+era
+eras
+éramos
+erais
+eran
+fui
+fuiste
+fue
+fuimos
+fuisteis
+fueron
+fuera
+fueras
+fuéramos
+fuerais
+fueran
+fuese
+fueses
+fuésemos
+fueseis
+fuesen
+siendo
+sido
+  |  sed also means 'thirst'
+
+               | forms of tener, to have (not including the infinitive):
+tengo
+tienes
+tiene
+tenemos
+tenéis
+tienen
+tenga
+tengas
+tengamos
+tengáis
+tengan
+tendré
+tendrás
+tendrá
+tendremos
+tendréis
+tendrán
+tendría
+tendrías
+tendríamos
+tendríais
+tendrían
+tenía
+tenías
+teníamos
+teníais
+tenían
+tuve
+tuviste
+tuvo
+tuvimos
+tuvisteis
+tuvieron
+tuviera
+tuvieras
+tuviéramos
+tuvierais
+tuvieran
+tuviese
+tuvieses
+tuviésemos
+tuvieseis
+tuviesen
+teniendo
+tenido
+tenida
+tenidos
+tenidas
+tened
+
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_eu.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_eu.txt
new file mode 100644
index 0000000..25f1db9
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_eu.txt
@@ -0,0 +1,99 @@
+# example set of basque stopwords
+al
+anitz
+arabera
+asko
+baina
+bat
+batean
+batek
+bati
+batzuei
+batzuek
+batzuetan
+batzuk
+bera
+beraiek
+berau
+berauek
+bere
+berori
+beroriek
+beste
+bezala
+da
+dago
+dira
+ditu
+du
+dute
+edo
+egin
+ere
+eta
+eurak
+ez
+gainera
+gu
+gutxi
+guzti
+haiei
+haiek
+haietan
+hainbeste
+hala
+han
+handik
+hango
+hara
+hari
+hark
+hartan
+hau
+hauei
+hauek
+hauetan
+hemen
+hemendik
+hemengo
+hi
+hona
+honek
+honela
+honetan
+honi
+hor
+hori
+horiei
+horiek
+horietan
+horko
+horra
+horrek
+horrela
+horretan
+horri
+hortik
+hura
+izan
+ni
+noiz
+nola
+non
+nondik
+nongo
+nor
+nora
+ze
+zein
+zen
+zenbait
+zenbat
+zer
+zergatik
+ziren
+zituen
+zu
+zuek
+zuen
+zuten
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_fa.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_fa.txt
new file mode 100644
index 0000000..723641c
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_fa.txt
@@ -0,0 +1,313 @@
+# This file was created by Jacques Savoy and is distributed under the BSD license.
+# See http://members.unine.ch/jacques.savoy/clef/index.html.
+# Also see http://www.opensource.org/licenses/bsd-license.html
+# Note: by default this file is used after normalization, so when adding entries
+# to this file, use the arabic 'ي' instead of 'ی'
+انان
+نداشته
+سراسر
+خياه
+ايشان
+وي
+تاكنون
+بيشتري
+دوم
+پس
+ناشي
+وگو
+يا
+داشتند
+سپس
+هنگام
+هرگز
+پنج
+نشان
+امسال
+ديگر
+گروهي
+شدند
+چطور
+ده

+دو
+نخستين
+ولي
+چرا
+چه
+وسط

+كدام
+قابل
+يك
+رفت
+هفت
+همچنين
+در
+هزار
+بله
+بلي
+شايد
+اما
+شناسي
+گرفته
+دهد
+داشته
+دانست
+داشتن
+خواهيم
+ميليارد
+وقتيكه
+امد
+خواهد
+جز
+اورده
+شده
+بلكه
+خدمات
+شدن
+برخي
+نبود
+بسياري
+جلوگيري
+حق
+كردند
+نوعي
+بعري
+نكرده
+نظير
+نبايد
+بوده
+بودن
+داد
+اورد
+هست
+جايي
+شود
+دنبال
+داده
+بايد
+سابق
+هيچ
+همان
+انجا
+كمتر
+كجاست
+گردد
+كسي
+تر
+مردم
+تان
+دادن
+بودند
+سري
+جدا
+ندارند
+مگر
+يكديگر
+دارد
+دهند
+بنابراين
+هنگامي
+سمت
+جا
+انچه
+خود
+دادند
+زياد
+دارند
+اثر
+بدون
+بهترين
+بيشتر
+البته
+به
+براساس
+بيرون
+كرد
+بعضي
+گرفت
+توي
+اي
+ميليون
+او
+جريان
+تول
+بر
+مانند
+برابر
+باشيم
+مدتي
+گويند
+اكنون
+تا
+تنها
+جديد
+چند
+بي
+نشده
+كردن
+كردم
+گويد
+كرده
+كنيم
+نمي
+نزد
+روي
+قصد
+فقط
+بالاي
+ديگران
+اين
+ديروز
+توسط
+سوم
+ايم
+دانند
+سوي
+استفاده
+شما
+كنار
+داريم
+ساخته
+طور
+امده
+رفته
+نخست
+بيست
+نزديك
+طي
+كنيد
+از
+انها
+تمامي
+داشت
+يكي
+طريق
+اش
+چيست
+روب
+نمايد
+گفت
+چندين
+چيزي
+تواند
+ام
+ايا
+با
+ان
+ايد
+ترين
+اينكه
+ديگري
+راه
+هايي
+بروز
+همچنان
+پاعين
+كس
+حدود
+مختلف
+مقابل
+چيز
+گيرد
+ندارد
+ضد
+همچون
+سازي
+شان
+مورد
+باره
+مرسي
+خويش
+برخوردار
+چون
+خارج
+شش
+هنوز
+تحت
+ضمن
+هستيم
+گفته
+فكر
+بسيار
+پيش
+براي
+روزهاي
+انكه
+نخواهد
+بالا
+كل
+وقتي
+كي
+چنين
+كه
+گيري
+نيست
+است
+كجا
+كند
+نيز
+يابد
+بندي
+حتي
+توانند
+عقب
+خواست
+كنند
+بين
+تمام
+همه
+ما
+باشند
+مثل
+شد
+اري
+باشد
+اره
+طبق
+بعد
+اگر
+صورت
+غير
+جاي
+بيش
+ريزي
+اند
+زيرا
+چگونه
+بار
+لطفا
+مي
+درباره
+من
+ديده
+همين
+گذاري
+برداري
+علت
+گذاشته
+هم
+فوق
+نه
+ها
+شوند
+اباد
+همواره
+هر
+اول
+خواهند
+چهار
+نام
+امروز
+مان
+هاي
+قبل
+كنم
+سعي
+تازه
+را
+هستند
+زير
+جلوي
+عنوان
+بود
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_fi.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_fi.txt
new file mode 100644
index 0000000..addad79
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_fi.txt
@@ -0,0 +1,95 @@
+ | From svn.tartarus.org/snowball/trunk/website/algorithms/finnish/stop.txt
+ | This file is distributed under the BSD License.
+ | See http://snowball.tartarus.org/license.php
+ | Also see http://www.opensource.org/licenses/bsd-license.html
+ |  - Encoding was converted to UTF-8.
+ |  - This notice was added.
+ 
+| forms of BE
+
+olla
+olen
+olet
+on
+olemme
+olette
+ovat
+ole        | negative form
+
+oli
+olisi
+olisit
+olisin
+olisimme
+olisitte
+olisivat
+olit
+olin
+olimme
+olitte
+olivat
+ollut
+olleet
+
+en         | negation
+et
+ei
+emme
+ette
+eivät
+
+|Nom   Gen    Acc    Part   Iness   Elat    Illat  Adess   Ablat   Allat   Ess    Trans
+minä   minun  minut  minua  minussa minusta minuun minulla minulta minulle               | I
+sinä   sinun  sinut  sinua  sinussa sinusta sinuun sinulla sinulta sinulle               | you
+hän    hänen  hänet  häntä  hänessä hänestä häneen hänellä häneltä hänelle               | he she
+me     meidän meidät meitä  meissä  meistä  meihin meillä  meiltä  meille                | we
+te     teidän teidät teitä  teissä  teistä  teihin teillä  teiltä  teille                | you
+he     heidän heidät heitä  heissä  heistä  heihin heillä  heiltä  heille                | they
+
+tämä   tämän         tätä   tässä   tästä   tähän  tallä   tältä   tälle   tänä   täksi  | this
+tuo    tuon          tuotä  tuossa  tuosta  tuohon tuolla  tuolta  tuolle  tuona  tuoksi | that
+se     sen           sitä   siinä   siitä   siihen sillä   siltä   sille   sinä   siksi  | it
+nämä   näiden        näitä  näissä  näistä  näihin näillä  näiltä  näille  näinä  näiksi | these
+nuo    noiden        noita  noissa  noista  noihin noilla  noilta  noille  noina  noiksi | those
+ne     niiden        niitä  niissä  niistä  niihin niillä  niiltä  niille  niinä  niiksi | they
+
+kuka   kenen kenet   ketä   kenessä kenestä keneen kenellä keneltä kenelle kenenä keneksi| who
+ketkä  keiden ketkä  keitä  keissä  keistä  keihin keillä  keiltä  keille  keinä  keiksi | (pl)
+mikä   minkä minkä   mitä   missä   mistä   mihin  millä   miltä   mille   minä   miksi  | which what
+mitkä                                                                                    | (pl)
+
+joka   jonka         jota   jossa   josta   johon  jolla   jolta   jolle   jona   joksi  | who which
+jotka  joiden        joita  joissa  joista  joihin joilla  joilta  joille  joina  joiksi | (pl)
+
+| conjunctions
+
+että   | that
+ja     | and
+jos    | if
+koska  | because
+kuin   | than
+mutta  | but
+niin   | so
+sekä   | and
+sillä  | for
+tai    | or
+vaan   | but
+vai    | or
+vaikka | although
+
+
+| prepositions
+
+kanssa  | with
+mukaan  | according to
+noin    | about
+poikki  | across
+yli     | over, across
+
+| other
+
+kun    | when
+niin   | so
+nyt    | now
+itse   | self
+
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_fr.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_fr.txt
new file mode 100644
index 0000000..20d12cb
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_fr.txt
@@ -0,0 +1,184 @@
+ | From svn.tartarus.org/snowball/trunk/website/algorithms/french/stop.txt
+ | This file is distributed under the BSD License.
+ | See http://snowball.tartarus.org/license.php
+ | Also see http://www.opensource.org/licenses/bsd-license.html
+ |  - Encoding was converted to UTF-8.
+ |  - This notice was added.
+
+ | A French stop word list. Comments begin with vertical bar. Each stop
+ | word is at the start of a line.
+
+au             |  a + le
+aux            |  a + les
+avec           |  with
+ce             |  this
+ces            |  these
+dans           |  with
+de             |  of
+des            |  de + les
+du             |  de + le
+elle           |  she
+en             |  `of them' etc
+et             |  and
+eux            |  them
+il             |  he
+je             |  I
+la             |  the
+le             |  the
+leur           |  their
+lui            |  him
+ma             |  my (fem)
+mais           |  but
+me             |  me
+même           |  same; as in moi-même (myself) etc
+mes            |  me (pl)
+moi            |  me
+mon            |  my (masc)
+ne             |  not
+nos            |  our (pl)
+notre          |  our
+nous           |  we
+on             |  one
+ou             |  where
+par            |  by
+pas            |  not
+pour           |  for
+qu             |  que before vowel
+que            |  that
+qui            |  who
+sa             |  his, her (fem)
+se             |  oneself
+ses            |  his (pl)
+son            |  his, her (masc)
+sur            |  on
+ta             |  thy (fem)
+te             |  thee
+tes            |  thy (pl)
+toi            |  thee
+ton            |  thy (masc)
+tu             |  thou
+un             |  a
+une            |  a
+vos            |  your (pl)
+votre          |  your
+vous           |  you
+
+               |  single letter forms
+
+c              |  c'
+d              |  d'
+j              |  j'
+l              |  l'
+à              |  to, at
+m              |  m'
+n              |  n'
+s              |  s'
+t              |  t'
+y              |  there
+
+               | forms of être (not including the infinitive):
+été
+étée
+étées
+étés
+étant
+suis
+es
+est
+sommes
+êtes
+sont
+serai
+seras
+sera
+serons
+serez
+seront
+serais
+serait
+serions
+seriez
+seraient
+étais
+était
+étions
+étiez
+étaient
+fus
+fut
+fûmes
+fûtes
+furent
+sois
+soit
+soyons
+soyez
+soient
+fusse
+fusses
+fût
+fussions
+fussiez
+fussent
+
+               | forms of avoir (not including the infinitive):
+ayant
+eu
+eue
+eues
+eus
+ai
+as
+avons
+avez
+ont
+aurai
+auras
+aura
+aurons
+aurez
+auront
+aurais
+aurait
+aurions
+auriez
+auraient
+avais
+avait
+avions
+aviez
+avaient
+eut
+eûmes
+eûtes
+eurent
+aie
+aies
+ait
+ayons
+ayez
+aient
+eusse
+eusses
+eût
+eussions
+eussiez
+eussent
+
+               | Later additions (from Jean-Christophe Deschamps)
+ceci           |  this
+cela           |  that
+celà           |  that
+cet            |  this
+cette          |  this
+ici            |  here
+ils            |  they
+les            |  the (pl)
+leurs          |  their (pl)
+quel           |  which
+quels          |  which
+quelle         |  which
+quelles        |  which
+sans           |  without
+soi            |  oneself
+
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ga.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ga.txt
new file mode 100644
index 0000000..9ff88d7
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ga.txt
@@ -0,0 +1,110 @@
+
+a
+ach
+ag
+agus
+an
+aon
+ar
+arna
+as
+b'
+ba
+beirt
+bhúr
+caoga
+ceathair
+ceathrar
+chomh
+chtó
+chuig
+chun
+cois
+céad
+cúig
+cúigear
+d'
+daichead
+dar
+de
+deich
+deichniúr
+den
+dhá
+do
+don
+dtí
+dá
+dár
+dó
+faoi
+faoin
+faoina
+faoinár
+fara
+fiche
+gach
+gan
+go
+gur
+haon
+hocht
+i
+iad
+idir
+in
+ina
+ins
+inár
+is
+le
+leis
+lena
+lenár
+m'
+mar
+mo
+mé
+na
+nach
+naoi
+naonúr
+ná
+ní
+níor
+nó
+nócha
+ocht
+ochtar
+os
+roimh
+sa
+seacht
+seachtar
+seachtó
+seasca
+seisear
+siad
+sibh
+sinn
+sna
+sé
+sí
+tar
+thar
+thú
+triúr
+trí
+trína
+trínár
+tríocha
+tú
+um
+ár

+éis


+ón
+óna
+ónár
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_gl.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_gl.txt
new file mode 100644
index 0000000..d8760b1
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_gl.txt
@@ -0,0 +1,161 @@
+# galican stopwords
+a
+aínda
+alí
+aquel
+aquela
+aquelas
+aqueles
+aquilo
+aquí
+ao
+aos
+as
+así

+ben
+cando
+che
+co
+coa
+comigo
+con
+connosco
+contigo
+convosco
+coas
+cos
+cun
+cuns
+cunha
+cunhas
+da
+dalgunha
+dalgunhas
+dalgún
+dalgúns
+das
+de
+del
+dela
+delas
+deles
+desde
+deste
+do
+dos
+dun
+duns
+dunha
+dunhas
+e
+el
+ela
+elas
+eles
+en
+era
+eran
+esa
+esas
+ese
+eses
+esta
+estar
+estaba
+está
+están
+este
+estes
+estiven
+estou
+eu

+facer
+foi
+foron
+fun
+había
+hai
+iso
+isto
+la
+las
+lle
+lles
+lo
+los
+mais
+me
+meu
+meus
+min
+miña
+miñas
+moi
+na
+nas
+neste
+nin
+no
+non
+nos
+nosa
+nosas
+noso
+nosos
+nós
+nun
+nunha
+nuns
+nunhas
+o
+os
+ou

+ós
+para
+pero
+pode
+pois
+pola
+polas
+polo
+polos
+por
+que
+se
+senón
+ser
+seu
+seus
+sexa
+sido
+sobre
+súa
+súas
+tamén
+tan
+te
+ten
+teñen
+teño
+ter
+teu
+teus
+ti
+tido
+tiña
+tiven
+túa
+túas
+un
+unha
+unhas
+uns
+vos
+vosa
+vosas
+voso
+vosos
+vós
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_hi.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_hi.txt
new file mode 100644
index 0000000..86286bb
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_hi.txt
@@ -0,0 +1,235 @@
+# Also see http://www.opensource.org/licenses/bsd-license.html
+# See http://members.unine.ch/jacques.savoy/clef/index.html.
+# This file was created by Jacques Savoy and is distributed under the BSD license.
+# Note: by default this file also contains forms normalized by HindiNormalizer 
+# for spelling variation (see section below), such that it can be used whether or 
+# not you enable that feature. When adding additional entries to this list,
+# please add the normalized form as well. 
+अंदर
+अत
+अपना
+अपनी
+अपने
+अभी
+आदि
+आप
+इत्यादि
+इन 
+इनका
+इन्हीं
+इन्हें
+इन्हों
+इस
+इसका
+इसकी
+इसके
+इसमें
+इसी
+इसे
+उन
+उनका
+उनकी
+उनके
+उनको
+उन्हीं
+उन्हें
+उन्हों
+उस
+उसके
+उसी
+उसे
+एक
+एवं
+एस
+ऐसे
+और
+कई
+कर
+करता
+करते
+करना
+करने
+करें
+कहते
+कहा
+का
+काफ़ी
+कि
+कितना
+किन्हें
+किन्हों
+किया
+किर
+किस
+किसी
+किसे
+की
+कुछ
+कुल
+के
+को
+कोई
+कौन
+कौनसा
+गया
+घर
+जब
+जहाँ
+जा
+जितना
+जिन
+जिन्हें
+जिन्हों
+जिस
+जिसे
+जीधर
+जैसा
+जैसे
+जो
+तक
+तब
+तरह
+तिन
+तिन्हें
+तिन्हों
+तिस
+तिसे
+तो
+था
+थी
+थे
+दबारा
+दिया
+दुसरा
+दूसरे
+दो
+द्वारा
+न
+नहीं
+ना
+निहायत
+नीचे
+ने
+पर
+पर  
+पहले
+पूरा
+पे
+फिर
+बनी
+बही
+बहुत
+बाद
+बाला
+बिलकुल
+भी
+भीतर
+मगर
+मानो
+मे
+में
+यदि
+यह
+यहाँ
+यही
+या
+यिह 
+ये
+रखें
+रहा
+रहे
+ऱ्वासा
+लिए
+लिये
+लेकिन
+व
+वर्ग
+वह
+वह 
+वहाँ
+वहीं
+वाले
+वुह 
+वे
+वग़ैरह
+संग
+सकता
+सकते
+सबसे
+सभी
+साथ
+साबुत
+साभ
+सारा
+से
+सो
+ही
+हुआ
+हुई
+हुए
+है
+हैं
+हो
+होता
+होती
+होते
+होना
+होने
+# additional normalized forms of the above
+अपनि
+जेसे
+होति
+सभि
+तिंहों
+इंहों
+दवारा
+इसि
+किंहें
+थि
+उंहों
+ओर
+जिंहें
+वहिं
+अभि
+बनि
+हि
+उंहिं
+उंहें
+हें
+वगेरह
+एसे
+रवासा
+कोन
+निचे
+काफि
+उसि
+पुरा
+भितर
+हे
+बहि
+वहां
+कोइ
+यहां
+जिंहों
+तिंहें
+किसि
+कइ
+यहि
+इंहिं
+जिधर
+इंहें
+अदि
+इतयादि
+हुइ
+कोनसा
+इसकि
+दुसरे
+जहां
+अप
+किंहों
+उनकि
+भि
+वरग
+हुअ
+जेसा
+नहिं
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_hu.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_hu.txt
new file mode 100644
index 0000000..1a96f1d
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_hu.txt
@@ -0,0 +1,209 @@
+ | From svn.tartarus.org/snowball/trunk/website/algorithms/hungarian/stop.txt
+ | This file is distributed under the BSD License.
+ | See http://snowball.tartarus.org/license.php
+ | Also see http://www.opensource.org/licenses/bsd-license.html
+ |  - Encoding was converted to UTF-8.
+ |  - This notice was added.
+ 
+| Hungarian stop word list
+| prepared by Anna Tordai
+
+a
+ahogy
+ahol
+aki
+akik
+akkor
+alatt
+által
+általában
+amely
+amelyek
+amelyekben
+amelyeket
+amelyet
+amelynek
+ami
+amit
+amolyan
+amíg
+amikor
+át
+abban
+ahhoz
+annak
+arra
+arról
+az
+azok
+azon
+azt
+azzal
+azért
+aztán
+azután
+azonban
+bár
+be
+belül
+benne
+cikk
+cikkek
+cikkeket
+csak
+de
+e
+eddig
+egész
+egy
+egyes
+egyetlen
+egyéb
+egyik
+egyre
+ekkor
+el
+elég
+ellen
+elő
+először
+előtt
+első
+én
+éppen
+ebben
+ehhez
+emilyen
+ennek
+erre
+ez
+ezt
+ezek
+ezen
+ezzel
+ezért
+és
+fel
+felé
+hanem
+hiszen
+hogy
+hogyan
+igen
+így
+illetve
+ill.
+ill
+ilyen
+ilyenkor
+ison
+ismét
+itt
+jó
+jól
+jobban
+kell
+kellett
+keresztül
+keressünk
+ki
+kívül
+között
+közül
+legalább
+lehet
+lehetett
+legyen
+lenne
+lenni
+lesz
+lett
+maga
+magát
+majd
+majd
+már
+más
+másik
+meg
+még
+mellett
+mert
+mely
+melyek
+mi
+mit
+míg
+miért
+milyen
+mikor
+minden
+mindent
+mindenki
+mindig
+mint
+mintha
+mivel
+most
+nagy
+nagyobb
+nagyon
+ne
+néha
+nekem
+neki
+nem
+néhány
+nélkül
+nincs
+olyan
+ott
+össze

+ők
+őket
+pedig
+persze
+rá
+s
+saját
+sem
+semmi
+sok
+sokat
+sokkal
+számára
+szemben
+szerint
+szinte
+talán
+tehát
+teljes
+tovább
+továbbá
+több
+úgy
+ugyanis
+új
+újabb
+újra
+után
+utána
+utolsó
+vagy
+vagyis
+valaki
+valami
+valamint
+való
+vagyok
+van
+vannak
+volt
+voltam
+voltak
+voltunk
+vissza
+vele
+viszont
+volna
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_hy.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_hy.txt
new file mode 100644
index 0000000..60c1c50
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_hy.txt
@@ -0,0 +1,46 @@
+# example set of Armenian stopwords.
+այդ
+այլ
+այն
+այս
+դու
+դուք
+եմ
+են
+ենք
+ես
+եք

+էի
+էին
+էինք
+էիր
+էիք
+էր
+ըստ


+ին
+իսկ
+իր
+կամ
+համար
+հետ
+հետո
+մենք
+մեջ
+մի

+նա
+նաև
+նրա
+նրանք
+որ
+որը
+որոնք
+որպես
+ու
+ում
+պիտի
+վրա

diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_id.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_id.txt
new file mode 100644
index 0000000..4617f83
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_id.txt
@@ -0,0 +1,359 @@
+# from appendix D of: A Study of Stemming Effects on Information
+# Retrieval in Bahasa Indonesia
+ada
+adanya
+adalah
+adapun
+agak
+agaknya
+agar
+akan
+akankah
+akhirnya
+aku
+akulah
+amat
+amatlah
+anda
+andalah
+antar
+diantaranya
+antara
+antaranya
+diantara
+apa
+apaan
+mengapa
+apabila
+apakah
+apalagi
+apatah
+atau
+ataukah
+ataupun
+bagai
+bagaikan
+sebagai
+sebagainya
+bagaimana
+bagaimanapun
+sebagaimana
+bagaimanakah
+bagi
+bahkan
+bahwa
+bahwasanya
+sebaliknya
+banyak
+sebanyak
+beberapa
+seberapa
+begini
+beginian
+beginikah
+beginilah
+sebegini
+begitu
+begitukah
+begitulah
+begitupun
+sebegitu
+belum
+belumlah
+sebelum
+sebelumnya
+sebenarnya
+berapa
+berapakah
+berapalah
+berapapun
+betulkah
+sebetulnya
+biasa
+biasanya
+bila
+bilakah
+bisa
+bisakah
+sebisanya
+boleh
+bolehkah
+bolehlah
+buat
+bukan
+bukankah
+bukanlah
+bukannya
+cuma
+percuma
+dahulu
+dalam
+dan
+dapat
+dari
+daripada
+dekat
+demi
+demikian
+demikianlah
+sedemikian
+dengan
+depan
+di
+dia
+dialah
+dini
+diri
+dirinya
+terdiri
+dong
+dulu
+enggak
+enggaknya
+entah
+entahlah
+terhadap
+terhadapnya
+hal
+hampir
+hanya
+hanyalah
+harus
+haruslah
+harusnya
+seharusnya
+hendak
+hendaklah
+hendaknya
+hingga
+sehingga
+ia
+ialah
+ibarat
+ingin
+inginkah
+inginkan
+ini
+inikah
+inilah
+itu
+itukah
+itulah
+jangan
+jangankan
+janganlah
+jika
+jikalau
+juga
+justru
+kala
+kalau
+kalaulah
+kalaupun
+kalian
+kami
+kamilah
+kamu
+kamulah
+kan
+kapan
+kapankah
+kapanpun
+dikarenakan
+karena
+karenanya
+ke
+kecil
+kemudian
+kenapa
+kepada
+kepadanya
+ketika
+seketika
+khususnya
+kini
+kinilah
+kiranya
+sekiranya
+kita
+kitalah
+kok
+lagi
+lagian
+selagi
+lah
+lain
+lainnya
+melainkan
+selaku
+lalu
+melalui
+terlalu
+lama
+lamanya
+selama
+selama
+selamanya
+lebih
+terlebih
+bermacam
+macam
+semacam
+maka
+makanya
+makin
+malah
+malahan
+mampu
+mampukah
+mana
+manakala
+manalagi
+masih
+masihkah
+semasih
+masing
+mau
+maupun
+semaunya
+memang
+mereka
+merekalah
+meski
+meskipun
+semula
+mungkin
+mungkinkah
+nah
+namun
+nanti
+nantinya
+nyaris
+oleh
+olehnya
+seorang
+seseorang
+pada
+padanya
+padahal
+paling
+sepanjang
+pantas
+sepantasnya
+sepantasnyalah
+para
+pasti
+pastilah
+per
+pernah
+pula
+pun
+merupakan
+rupanya
+serupa
+saat
+saatnya
+sesaat
+saja
+sajalah
+saling
+bersama
+sama
+sesama
+sambil
+sampai
+sana
+sangat
+sangatlah
+saya
+sayalah
+se
+sebab
+sebabnya
+sebuah
+tersebut
+tersebutlah
+sedang
+sedangkan
+sedikit
+sedikitnya
+segala
+segalanya
+segera
+sesegera
+sejak
+sejenak
+sekali
+sekalian
+sekalipun
+sesekali
+sekaligus
+sekarang
+sekarang
+sekitar
+sekitarnya
+sela
+selain
+selalu
+seluruh
+seluruhnya
+semakin
+sementara
+sempat
+semua
+semuanya
+sendiri
+sendirinya
+seolah
+seperti
+sepertinya
+sering
+seringnya
+serta
+siapa
+siapakah
+siapapun
+disini
+disinilah
+sini
+sinilah
+sesuatu
+sesuatunya
+suatu
+sesudah
+sesudahnya
+sudah
+sudahkah
+sudahlah
+supaya
+tadi
+tadinya
+tak
+tanpa
+setelah
+telah
+tentang
+tentu
+tentulah
+tentunya
+tertentu
+seterusnya
+tapi
+tetapi
+setiap
+tiap
+setidaknya
+tidak
+tidakkah
+tidaklah
+toh
+waduh
+wah
+wahai
+sewaktu
+walau
+walaupun
+wong
+yaitu
+yakni
+yang
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_it.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_it.txt
new file mode 100644
index 0000000..4cb5b08
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_it.txt
@@ -0,0 +1,301 @@
+ | From svn.tartarus.org/snowball/trunk/website/algorithms/italian/stop.txt
+ | This file is distributed under the BSD License.
+ | See http://snowball.tartarus.org/license.php
+ | Also see http://www.opensource.org/licenses/bsd-license.html
+ |  - Encoding was converted to UTF-8.
+ |  - This notice was added.
+
+ | An Italian stop word list. Comments begin with vertical bar. Each stop
+ | word is at the start of a line.
+
+ad             |  a (to) before vowel
+al             |  a + il
+allo           |  a + lo
+ai             |  a + i
+agli           |  a + gli
+all            |  a + l'
+agl            |  a + gl'
+alla           |  a + la
+alle           |  a + le
+con            |  with
+col            |  con + il
+coi            |  con + i (forms collo, cogli etc are now very rare)
+da             |  from
+dal            |  da + il
+dallo          |  da + lo
+dai            |  da + i
+dagli          |  da + gli
+dall           |  da + l'
+dagl           |  da + gll'
+dalla          |  da + la
+dalle          |  da + le
+di             |  of
+del            |  di + il
+dello          |  di + lo
+dei            |  di + i
+degli          |  di + gli
+dell           |  di + l'
+degl           |  di + gl'
+della          |  di + la
+delle          |  di + le
+in             |  in
+nel            |  in + el
+nello          |  in + lo
+nei            |  in + i
+negli          |  in + gli
+nell           |  in + l'
+negl           |  in + gl'
+nella          |  in + la
+nelle          |  in + le
+su             |  on
+sul            |  su + il
+sullo          |  su + lo
+sui            |  su + i
+sugli          |  su + gli
+sull           |  su + l'
+sugl           |  su + gl'
+sulla          |  su + la
+sulle          |  su + le
+per            |  through, by
+tra            |  among
+contro         |  against
+io             |  I
+tu             |  thou
+lui            |  he
+lei            |  she
+noi            |  we
+voi            |  you
+loro           |  they
+mio            |  my
+mia            |
+miei           |
+mie            |
+tuo            |
+tua            |
+tuoi           |  thy
+tue            |
+suo            |
+sua            |
+suoi           |  his, her
+sue            |
+nostro         |  our
+nostra         |
+nostri         |
+nostre         |
+vostro         |  your
+vostra         |
+vostri         |
+vostre         |
+mi             |  me
+ti             |  thee
+ci             |  us, there
+vi             |  you, there
+lo             |  him, the
+la             |  her, the
+li             |  them
+le             |  them, the
+gli            |  to him, the
+ne             |  from there etc
+il             |  the
+un             |  a
+uno            |  a
+una            |  a
+ma             |  but
+ed             |  and
+se             |  if
+perché         |  why, because
+anche          |  also
+come           |  how
+dov            |  where (as dov')
+dove           |  where
+che            |  who, that
+chi            |  who
+cui            |  whom
+non            |  not
+più            |  more
+quale          |  who, that
+quanto         |  how much
+quanti         |
+quanta         |
+quante         |
+quello         |  that
+quelli         |
+quella         |
+quelle         |
+questo         |  this
+questi         |
+questa         |
+queste         |
+si             |  yes
+tutto          |  all
+tutti          |  all
+
+               |  single letter forms:
+
+a              |  at
+c              |  as c' for ce or ci
+e              |  and
+i              |  the
+l              |  as l'
+o              |  or
+
+               | forms of avere, to have (not including the infinitive):
+
+ho
+hai
+ha
+abbiamo
+avete
+hanno
+abbia
+abbiate
+abbiano
+avrò
+avrai
+avrà
+avremo
+avrete
+avranno
+avrei
+avresti
+avrebbe
+avremmo
+avreste
+avrebbero
+avevo
+avevi
+aveva
+avevamo
+avevate
+avevano
+ebbi
+avesti
+ebbe
+avemmo
+aveste
+ebbero
+avessi
+avesse
+avessimo
+avessero
+avendo
+avuto
+avuta
+avuti
+avute
+
+               | forms of essere, to be (not including the infinitive):
+sono
+sei

+siamo
+siete
+sia
+siate
+siano
+sarò
+sarai
+sarà
+saremo
+sarete
+saranno
+sarei
+saresti
+sarebbe
+saremmo
+sareste
+sarebbero
+ero
+eri
+era
+eravamo
+eravate
+erano
+fui
+fosti
+fu
+fummo
+foste
+furono
+fossi
+fosse
+fossimo
+fossero
+essendo
+
+               | forms of fare, to do (not including the infinitive, fa, fat-):
+faccio
+fai
+facciamo
+fanno
+faccia
+facciate
+facciano
+farò
+farai
+farà
+faremo
+farete
+faranno
+farei
+faresti
+farebbe
+faremmo
+fareste
+farebbero
+facevo
+facevi
+faceva
+facevamo
+facevate
+facevano
+feci
+facesti
+fece
+facemmo
+faceste
+fecero
+facessi
+facesse
+facessimo
+facessero
+facendo
+
+               | forms of stare, to be (not including the infinitive):
+sto
+stai
+sta
+stiamo
+stanno
+stia
+stiate
+stiano
+starò
+starai
+starà
+staremo
+starete
+staranno
+starei
+staresti
+starebbe
+staremmo
+stareste
+starebbero
+stavo
+stavi
+stava
+stavamo
+stavate
+stavano
+stetti
+stesti
+stette
+stemmo
+steste
+stettero
+stessi
+stesse
+stessimo
+stessero
+stando
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ja.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ja.txt
new file mode 100644
index 0000000..d4321be
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ja.txt
@@ -0,0 +1,127 @@
+#
+# This file defines a stopword set for Japanese.
+#
+# This set is made up of hand-picked frequent terms from segmented Japanese Wikipedia.
+# Punctuation characters and frequent kanji have mostly been left out.  See LUCENE-3745
+# for frequency lists, etc. that can be useful for making your own set (if desired)
+#
+# Note that there is an overlap between these stopwords and the terms stopped when used
+# in combination with the JapanesePartOfSpeechStopFilter.  When editing this file, note
+# that comments are not allowed on the same line as stopwords.
+#
+# Also note that stopping is done in a case-insensitive manner.  Change your StopFilter
+# configuration if you need case-sensitive stopping.  Lastly, note that stopping is done
+# using the same character width as the entries in this file.  Since this StopFilter is
+# normally done after a CJKWidthFilter in your chain, you would usually want your romaji
+# entries to be in half-width and your kana entries to be in full-width.
+#
+の
+に
+は
+を
+た
+が
+で
+て
+と
+し
+れ
+さ
+ある
+いる
+も
+する
+から
+な
+こと
+として
+い
+や
+れる
+など
+なっ
+ない
+この
+ため
+その
+あっ
+よう
+また
+もの
+という
+あり
+まで
+られ
+なる
+へ
+か
+だ
+これ
+によって
+により
+おり
+より
+による
+ず
+なり
+られる
+において
+ば
+なかっ
+なく
+しかし
+について
+せ
+だっ
+その後
+できる
+それ
+う
+ので
+なお
+のみ
+でき
+き
+つ
+における
+および
+いう
+さらに
+でも
+ら
+たり
+その他
+に関する
+たち
+ます
+ん
+なら
+に対して
+特に
+せる
+及び
+これら
+とき
+では
+にて
+ほか
+ながら
+うち
+そして
+とともに
+ただし
+かつて
+それぞれ
+または
+お
+ほど
+ものの
+に対する
+ほとんど
+と共に
+といった
+です
+とも
+ところ
+ここ
+##### End of file
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_lv.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_lv.txt
new file mode 100644
index 0000000..e21a23c
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_lv.txt
@@ -0,0 +1,172 @@
+# Set of Latvian stopwords from A Stemming Algorithm for Latvian, Karlis Kreslins
+# the original list of over 800 forms was refined: 
+#   pronouns, adverbs, interjections were removed
+# 
+# prepositions
+aiz
+ap
+ar
+apakš
+ārpus
+augšpus
+bez
+caur
+dēļ
+gar
+iekš
+iz
+kopš
+labad
+lejpus
+līdz
+no
+otrpus
+pa
+par
+pār
+pēc
+pie
+pirms
+pret
+priekš
+starp
+šaipus
+uz
+viņpus
+virs
+virspus
+zem
+apakšpus
+# Conjunctions
+un
+bet
+jo
+ja
+ka
+lai
+tomēr
+tikko
+turpretī
+arī
+kaut
+gan
+tādēļ
+tā
+ne
+tikvien
+vien
+kā
+ir
+te
+vai
+kamēr
+# Particles
+ar
+diezin
+droši
+diemžēl
+nebūt
+ik
+it
+taču
+nu
+pat
+tiklab
+iekšpus
+nedz
+tik
+nevis
+turpretim
+jeb
+iekam
+iekām
+iekāms
+kolīdz
+līdzko
+tiklīdz
+jebšu
+tālab
+tāpēc
+nekā
+itin
+jā
+jau
+jel
+nē
+nezin
+tad
+tikai
+vis
+tak
+iekams
+vien
+# modal verbs
+būt  
+biju 
+biji
+bija
+bijām
+bijāt
+esmu
+esi
+esam
+esat 
+būšu     
+būsi
+būs
+būsim
+būsiet
+tikt
+tiku
+tiki
+tika
+tikām
+tikāt
+tieku
+tiec
+tiek
+tiekam
+tiekat
+tikšu
+tiks
+tiksim
+tiksiet
+tapt
+tapi
+tapāt
+topat
+tapšu
+tapsi
+taps
+tapsim
+tapsiet
+kļūt
+kļuvu
+kļuvi
+kļuva
+kļuvām
+kļuvāt
+kļūstu
+kļūsti
+kļūst
+kļūstam
+kļūstat
+kļūšu
+kļūsi
+kļūs
+kļūsim
+kļūsiet
+# verbs
+varēt
+varēju
+varējām
+varēšu
+varēsim
+var
+varēji
+varējāt
+varēsi
+varēsiet
+varat
+varēja
+varēs
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_nl.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_nl.txt
new file mode 100644
index 0000000..f4d61f5
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_nl.txt
@@ -0,0 +1,117 @@
+ | From svn.tartarus.org/snowball/trunk/website/algorithms/dutch/stop.txt
+ | This file is distributed under the BSD License.
+ | See http://snowball.tartarus.org/license.php
+ | Also see http://www.opensource.org/licenses/bsd-license.html
+ |  - Encoding was converted to UTF-8.
+ |  - This notice was added.
+
+ | A Dutch stop word list. Comments begin with vertical bar. Each stop
+ | word is at the start of a line.
+
+ | This is a ranked list (commonest to rarest) of stopwords derived from
+ | a large sample of Dutch text.
+
+ | Dutch stop words frequently exhibit homonym clashes. These are indicated
+ | clearly below.
+
+de             |  the
+en             |  and
+van            |  of, from
+ik             |  I, the ego
+te             |  (1) chez, at etc, (2) to, (3) too
+dat            |  that, which
+die            |  that, those, who, which
+in             |  in, inside
+een            |  a, an, one
+hij            |  he
+het            |  the, it
+niet           |  not, nothing, naught
+zijn           |  (1) to be, being, (2) his, one's, its
+is             |  is
+was            |  (1) was, past tense of all persons sing. of 'zijn' (to be) (2) wax, (3) the washing, (4) rise of river
+op             |  on, upon, at, in, up, used up
+aan            |  on, upon, to (as dative)
+met            |  with, by
+als            |  like, such as, when
+voor           |  (1) before, in front of, (2) furrow
+had            |  had, past tense all persons sing. of 'hebben' (have)
+er             |  there
+maar           |  but, only
+om             |  round, about, for etc
+hem            |  him
+dan            |  then
+zou            |  should/would, past tense all persons sing. of 'zullen'
+of             |  or, whether, if
+wat            |  what, something, anything
+mijn           |  possessive and noun 'mine'
+men            |  people, 'one'
+dit            |  this
+zo             |  so, thus, in this way
+door           |  through by
+over           |  over, across
+ze             |  she, her, they, them
+zich           |  oneself
+bij            |  (1) a bee, (2) by, near, at
+ook            |  also, too
+tot            |  till, until
+je             |  you
+mij            |  me
+uit            |  out of, from
+der            |  Old Dutch form of 'van der' still found in surnames
+daar           |  (1) there, (2) because
+haar           |  (1) her, their, them, (2) hair
+naar           |  (1) unpleasant, unwell etc, (2) towards, (3) as
+heb            |  present first person sing. of 'to have'
+hoe            |  how, why
+heeft          |  present third person sing. of 'to have'
+hebben         |  'to have' and various parts thereof
+deze           |  this
+u              |  you
+want           |  (1) for, (2) mitten, (3) rigging
+nog            |  yet, still
+zal            |  'shall', first and third person sing. of verb 'zullen' (will)
+me             |  me
+zij            |  she, they
+nu             |  now
+ge             |  'thou', still used in Belgium and south Netherlands
+geen           |  none
+omdat          |  because
+iets           |  something, somewhat
+worden         |  to become, grow, get
+toch           |  yet, still
+al             |  all, every, each
+waren          |  (1) 'were' (2) to wander, (3) wares, (3)
+veel           |  much, many
+meer           |  (1) more, (2) lake
+doen           |  to do, to make
+toen           |  then, when
+moet           |  noun 'spot/mote' and present form of 'to must'
+ben            |  (1) am, (2) 'are' in interrogative second person singular of 'to be'
+zonder         |  without
+kan            |  noun 'can' and present form of 'to be able'
+hun            |  their, them
+dus            |  so, consequently
+alles          |  all, everything, anything
+onder          |  under, beneath
+ja             |  yes, of course
+eens           |  once, one day
+hier           |  here
+wie            |  who
+werd           |  imperfect third person sing. of 'become'
+altijd         |  always
+doch           |  yet, but etc
+wordt          |  present third person sing. of 'become'
+wezen          |  (1) to be, (2) 'been' as in 'been fishing', (3) orphans
+kunnen         |  to be able
+ons            |  us/our
+zelf           |  self
+tegen          |  against, towards, at
+na             |  after, near
+reeds          |  already
+wil            |  (1) present tense of 'want', (2) 'will', noun, (3) fender
+kon            |  could; past tense of 'to be able'
+niets          |  nothing
+uw             |  your
+iemand         |  somebody
+geweest        |  been; past participle of 'be'
+andere         |  other
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_no.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_no.txt
new file mode 100644
index 0000000..e76f36e
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_no.txt
@@ -0,0 +1,192 @@
+ | From svn.tartarus.org/snowball/trunk/website/algorithms/norwegian/stop.txt
+ | This file is distributed under the BSD License.
+ | See http://snowball.tartarus.org/license.php
+ | Also see http://www.opensource.org/licenses/bsd-license.html
+ |  - Encoding was converted to UTF-8.
+ |  - This notice was added.
+
+ | A Norwegian stop word list. Comments begin with vertical bar. Each stop
+ | word is at the start of a line.
+
+ | This stop word list is for the dominant bokmål dialect. Words unique
+ | to nynorsk are marked *.
+
+ | Revised by Jan Bruusgaard <Jan.Bruusgaard@ssb.no>, Jan 2005
+
+og             | and
+i              | in
+jeg            | I
+det            | it/this/that
+at             | to (w. inf.)
+en             | a/an
+et             | a/an
+den            | it/this/that
+til            | to
+er             | is/am/are
+som            | who/that
+på             | on
+de             | they / you(formal)
+med            | with
+han            | he
+av             | of
+ikke           | not
+ikkje          | not *
+der            | there
+så             | so
+var            | was/were
+meg            | me
+seg            | you
+men            | but
+ett            | one
+har            | have
+om             | about
+vi             | we
+min            | my
+mitt           | my
+ha             | have
+hadde          | had
+hun            | she
+nå             | now
+over           | over
+da             | when/as
+ved            | by/know
+fra            | from
+du             | you
+ut             | out
+sin            | your
+dem            | them
+oss            | us
+opp            | up
+man            | you/one
+kan            | can
+hans           | his
+hvor           | where
+eller          | or
+hva            | what
+skal           | shall/must
+selv           | self (reflective)
+sjøl           | self (reflective)
+her            | here
+alle           | all
+vil            | will
+bli            | become
+ble            | became
+blei           | became *
+blitt          | have become
+kunne          | could
+inn            | in
+når            | when
+være           | be
+kom            | come
+noen           | some
+noe            | some
+ville          | would
+dere           | you
+som            | who/which/that
+deres          | their/theirs
+kun            | only/just
+ja             | yes
+etter          | after
+ned            | down
+skulle         | should
+denne          | this
+for            | for/because
+deg            | you
+si             | hers/his
+sine           | hers/his
+sitt           | hers/his
+mot            | against
+å              | to
+meget          | much
+hvorfor        | why
+dette          | this
+disse          | these/those
+uten           | without
+hvordan        | how
+ingen          | none
+din            | your
+ditt           | your
+blir           | become
+samme          | same
+hvilken        | which
+hvilke         | which (plural)
+sånn           | such a
+inni           | inside/within
+mellom         | between
+vår            | our
+hver           | each
+hvem           | who
+vors           | us/ours
+hvis           | whose
+både           | both
+bare           | only/just
+enn            | than
+fordi          | as/because
+før            | before
+mange          | many
+også           | also
+slik           | just
+vært           | been
+være           | to be
+båe            | both *
+begge          | both
+siden          | since
+dykk           | your *
+dykkar         | yours *
+dei            | they *
+deira          | them *
+deires         | theirs *
+deim           | them *
+di             | your (fem.) *
+då             | as/when *
+eg             | I *
+ein            | a/an *
+eit            | a/an *
+eitt           | a/an *
+elles          | or *
+honom          | he *
+hjå            | at *
+ho             | she *
+hoe            | she *
+henne          | her
+hennar         | her/hers
+hennes         | hers
+hoss           | how *
+hossen         | how *
+ikkje          | not *
+ingi           | noone *
+inkje          | noone *
+korleis        | how *
+korso          | how *
+kva            | what/which *
+kvar           | where *
+kvarhelst      | where *
+kven           | who/whom *
+kvi            | why *
+kvifor         | why *
+me             | we *
+medan          | while *
+mi             | my *
+mine           | my *
+mykje          | much *
+no             | now *
+nokon          | some (masc./neut.) *
+noka           | some (fem.) *
+nokor          | some *
+noko           | some *
+nokre          | some *
+si             | his/hers *
+sia            | since *
+sidan          | since *
+so             | so *
+somt           | some *
+somme          | some *
+um             | about*
+upp            | up *
+vere           | be *
+vore           | was *
+verte          | become *
+vort           | become *
+varte          | became *
+vart           | became *
+
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_pt.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_pt.txt
new file mode 100644
index 0000000..276c1b4
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_pt.txt
@@ -0,0 +1,251 @@
+ | From svn.tartarus.org/snowball/trunk/website/algorithms/portuguese/stop.txt
+ | This file is distributed under the BSD License.
+ | See http://snowball.tartarus.org/license.php
+ | Also see http://www.opensource.org/licenses/bsd-license.html
+ |  - Encoding was converted to UTF-8.
+ |  - This notice was added.
+
+ | A Portuguese stop word list. Comments begin with vertical bar. Each stop
+ | word is at the start of a line.
+
+
+ | The following is a ranked list (commonest to rarest) of stopwords
+ | deriving from a large sample of text.
+
+ | Extra words have been added at the end.
+
+de             |  of, from
+a              |  the; to, at; her
+o              |  the; him
+que            |  who, that
+e              |  and
+do             |  de + o
+da             |  de + a
+em             |  in
+um             |  a
+para           |  for
+  | é          from SER
+com            |  with
+não            |  not, no
+uma            |  a
+os             |  the; them
+no             |  em + o
+se             |  himself etc
+na             |  em + a
+por            |  for
+mais           |  more
+as             |  the; them
+dos            |  de + os
+como           |  as, like
+mas            |  but
+  | foi        from SER
+ao             |  a + o
+ele            |  he
+das            |  de + as
+  | tem        from TER
+à              |  a + a
+seu            |  his
+sua            |  her
+ou             |  or
+  | ser        from SER
+quando         |  when
+muito          |  much
+  | há         from HAV
+nos            |  em + os; us
+já             |  already, now
+  | está       from EST
+eu             |  I
+também         |  also
+só             |  only, just
+pelo           |  per + o
+pela           |  per + a
+até            |  up to
+isso           |  that
+ela            |  he
+entre          |  between
+  | era        from SER
+depois         |  after
+sem            |  without
+mesmo          |  same
+aos            |  a + os
+  | ter        from TER
+seus           |  his
+quem           |  whom
+nas            |  em + as
+me             |  me
+esse           |  that
+eles           |  they
+  | estão      from EST
+você           |  you
+  | tinha      from TER
+  | foram      from SER
+essa           |  that
+num            |  em + um
+nem            |  nor
+suas           |  her
+meu            |  my
+às             |  a + as
+minha          |  my
+  | têm        from TER
+numa           |  em + uma
+pelos          |  per + os
+elas           |  they
+  | havia      from HAV
+  | seja       from SER
+qual           |  which
+  | será       from SER
+nós            |  we
+  | tenho      from TER
+lhe            |  to him, her
+deles          |  of them
+essas          |  those
+esses          |  those
+pelas          |  per + as
+este           |  this
+  | fosse      from SER
+dele           |  of him
+
+ | other words. There are many contractions such as naquele = em+aquele,
+ | mo = me+o, but they are rare.
+ | Indefinite article plural forms are also rare.
+
+tu             |  thou
+te             |  thee
+vocês          |  you (plural)
+vos            |  you
+lhes           |  to them
+meus           |  my
+minhas
+teu            |  thy
+tua
+teus
+tuas
+nosso          | our
+nossa
+nossos
+nossas
+
+dela           |  of her
+delas          |  of them
+
+esta           |  this
+estes          |  these
+estas          |  these
+aquele         |  that
+aquela         |  that
+aqueles        |  those
+aquelas        |  those
+isto           |  this
+aquilo         |  that
+
+               | forms of estar, to be (not including the infinitive):
+estou
+está
+estamos
+estão
+estive
+esteve
+estivemos
+estiveram
+estava
+estávamos
+estavam
+estivera
+estivéramos
+esteja
+estejamos
+estejam
+estivesse
+estivéssemos
+estivessem
+estiver
+estivermos
+estiverem
+
+               | forms of haver, to have (not including the infinitive):
+hei
+há
+havemos
+hão
+houve
+houvemos
+houveram
+houvera
+houvéramos
+haja
+hajamos
+hajam
+houvesse
+houvéssemos
+houvessem
+houver
+houvermos
+houverem
+houverei
+houverá
+houveremos
+houverão
+houveria
+houveríamos
+houveriam
+
+               | forms of ser, to be (not including the infinitive):
+sou
+somos
+são
+era
+éramos
+eram
+fui
+foi
+fomos
+foram
+fora
+fôramos
+seja
+sejamos
+sejam
+fosse
+fôssemos
+fossem
+for
+formos
+forem
+serei
+será
+seremos
+serão
+seria
+seríamos
+seriam
+
+               | forms of ter, to have (not including the infinitive):
+tenho
+tem
+temos
+tém
+tinha
+tínhamos
+tinham
+tive
+teve
+tivemos
+tiveram
+tivera
+tivéramos
+tenha
+tenhamos
+tenham
+tivesse
+tivéssemos
+tivessem
+tiver
+tivermos
+tiverem
+terei
+terá
+teremos
+terão
+teria
+teríamos
+teriam
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ro.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ro.txt
new file mode 100644
index 0000000..4fdee90
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ro.txt
@@ -0,0 +1,233 @@
+# This file was created by Jacques Savoy and is distributed under the BSD license.
+# See http://members.unine.ch/jacques.savoy/clef/index.html.
+# Also see http://www.opensource.org/licenses/bsd-license.html
+acea
+aceasta
+această
+aceea
+acei
+aceia
+acel
+acela
+acele
+acelea
+acest
+acesta
+aceste
+acestea
+aceşti
+aceştia
+acolo
+acum
+ai
+aia
+aibă
+aici
+al
+ăla
+ale
+alea
+ălea
+altceva
+altcineva
+am
+ar
+are
+aş
+aşadar
+asemenea
+asta
+ăsta
+astăzi
+astea
+ăstea
+ăştia
+asupra
+aţi
+au
+avea
+avem
+aveţi
+azi
+bine
+bucur
+bună
+ca
+că
+căci
+când
+care
+cărei
+căror
+cărui
+cât
+câte
+câţi
+către
+câtva
+ce
+cel
+ceva
+chiar
+cînd
+cine
+cineva
+cît
+cîte
+cîţi
+cîtva
+contra
+cu
+cum
+cumva
+curând
+curînd
+da
+dă
+dacă
+dar
+datorită
+de
+deci
+deja
+deoarece
+departe
+deşi
+din
+dinaintea
+dintr
+dintre
+drept
+după
+ea
+ei
+el
+ele
+eram
+este
+eşti
+eu
+face
+fără
+fi
+fie
+fiecare
+fii
+fim
+fiţi
+iar
+ieri
+îi
+îl
+îmi
+împotriva
+în 
+înainte
+înaintea
+încât
+încît
+încotro
+între
+întrucât
+întrucît
+îţi
+la
+lângă
+le
+li
+lîngă
+lor
+lui
+mă
+mâine
+mea
+mei
+mele
+mereu
+meu
+mi
+mine
+mult
+multă
+mulţi
+ne
+nicăieri
+nici
+nimeni
+nişte
+noastră
+noastre
+noi
+noştri
+nostru
+nu
+ori
+oricând
+oricare
+oricât
+orice
+oricînd
+oricine
+oricît
+oricum
+oriunde
+până
+pe
+pentru
+peste
+pînă
+poate
+pot
+prea
+prima
+primul
+prin
+printr
+sa
+să
+săi
+sale
+sau
+său
+se
+şi
+sînt
+sîntem
+sînteţi
+spre
+sub
+sunt
+suntem
+sunteţi
+ta
+tăi
+tale
+tău
+te
+ţi
+ţie
+tine
+toată
+toate
+tot
+toţi
+totuşi
+tu
+un
+una
+unde
+undeva
+unei
+unele
+uneori
+unor
+vă
+vi
+voastră
+voastre
+voi
+voştri
+vostru
+vouă
+vreo
+vreun
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ru.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ru.txt
new file mode 100644
index 0000000..6430769
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_ru.txt
@@ -0,0 +1,241 @@
+ | From svn.tartarus.org/snowball/trunk/website/algorithms/russian/stop.txt
+ | This file is distributed under the BSD License.
+ | See http://snowball.tartarus.org/license.php
+ | Also see http://www.opensource.org/licenses/bsd-license.html
+ |  - Encoding was converted to UTF-8.
+ |  - This notice was added.
+
+ | a russian stop word list. comments begin with vertical bar. each stop
+ | word is at the start of a line.
+
+ | this is a ranked list (commonest to rarest) of stopwords derived from
+ | a large text sample.
+
+ | letter `ё' is translated to `е'.
+
+и              | and
+в              | in/into
+во             | alternative form
+не             | not
+что            | what/that
+он             | he
+на             | on/onto
+я              | i
+с              | from
+со             | alternative form
+как            | how
+а              | milder form of `no' (but)
+то             | conjunction and form of `that'
+все            | all
+она            | she
+так            | so, thus
+его            | him
+но             | but
+да             | yes/and
+ты             | thou
+к              | towards, by
+у              | around, chez
+же             | intensifier particle
+вы             | you
+за             | beyond, behind
+бы             | conditional/subj. particle
+по             | up to, along
+только         | only
+ее             | her
+мне            | to me
+было           | it was
+вот            | here is/are, particle
+от             | away from
+меня           | me
+еще            | still, yet, more
+нет            | no, there isnt/arent
+о              | about
+из             | out of
+ему            | to him
+теперь         | now
+когда          | when
+даже           | even
+ну             | so, well
+вдруг          | suddenly
+ли             | interrogative particle
+если           | if
+уже            | already, but homonym of `narrower'
+или            | or
+ни             | neither
+быть           | to be
+был            | he was
+него           | prepositional form of его
+до             | up to
+вас            | you accusative
+нибудь         | indef. suffix preceded by hyphen
+опять          | again
+уж             | already, but homonym of `adder'
+вам            | to you
+сказал         | he said
+ведь           | particle `after all'
+там            | there
+потом          | then
+себя           | oneself
+ничего         | nothing
+ей             | to her
+может          | usually with `быть' as `maybe'
+они            | they
+тут            | here
+где            | where
+есть           | there is/are
+надо           | got to, must
+ней            | prepositional form of  ей
+для            | for
+мы             | we
+тебя           | thee
+их             | them, their
+чем            | than
+была           | she was
+сам            | self
+чтоб           | in order to
+без            | without
+будто          | as if
+человек        | man, person, one
+чего           | genitive form of `what'
+раз            | once
+тоже           | also
+себе           | to oneself
+под            | beneath
+жизнь          | life
+будет          | will be
+ж              | short form of intensifer particle `же'
+тогда          | then
+кто            | who
+этот           | this
+говорил        | was saying
+того           | genitive form of `that'
+потому         | for that reason
+этого          | genitive form of `this'
+какой          | which
+совсем         | altogether
+ним            | prepositional form of `его', `они'
+здесь          | here
+этом           | prepositional form of `этот'
+один           | one
+почти          | almost
+мой            | my
+тем            | instrumental/dative plural of `тот', `то'
+чтобы          | full form of `in order that'
+нее            | her (acc.)
+кажется        | it seems
+сейчас         | now
+были           | they were
+куда           | where to
+зачем          | why
+сказать        | to say
+всех           | all (acc., gen. preposn. plural)
+никогда        | never
+сегодня        | today
+можно          | possible, one can
+при            | by
+наконец        | finally
+два            | two
+об             | alternative form of `о', about
+другой         | another
+хоть           | even
+после          | after
+над            | above
+больше         | more
+тот            | that one (masc.)
+через          | across, in
+эти            | these
+нас            | us
+про            | about
+всего          | in all, only, of all
+них            | prepositional form of `они' (they)
+какая          | which, feminine
+много          | lots
+разве          | interrogative particle
+сказала        | she said
+три            | three
+эту            | this, acc. fem. sing.
+моя            | my, feminine
+впрочем        | moreover, besides
+хорошо         | good
+свою           | ones own, acc. fem. sing.
+этой           | oblique form of `эта', fem. `this'
+перед          | in front of
+иногда         | sometimes
+лучше          | better
+чуть           | a little
+том            | preposn. form of `that one'
+нельзя         | one must not
+такой          | such a one
+им             | to them
+более          | more
+всегда         | always
+конечно        | of course
+всю            | acc. fem. sing of `all'
+между          | between
+
+
+  | b: some paradigms
+  |
+  | personal pronouns
+  |
+  | я  меня  мне  мной  [мною]
+  | ты  тебя  тебе  тобой  [тобою]
+  | он  его  ему  им  [него, нему, ним]
+  | она  ее  эи  ею  [нее, нэи, нею]
+  | оно  его  ему  им  [него, нему, ним]
+  |
+  | мы  нас  нам  нами
+  | вы  вас  вам  вами
+  | они  их  им  ими  [них, ним, ними]
+  |
+  |   себя  себе  собой   [собою]
+  |
+  | demonstrative pronouns: этот (this), тот (that)
+  |
+  | этот  эта  это  эти
+  | этого  эты  это  эти
+  | этого  этой  этого  этих
+  | этому  этой  этому  этим
+  | этим  этой  этим  [этою]  этими
+  | этом  этой  этом  этих
+  |
+  | тот  та  то  те
+  | того  ту  то  те
+  | того  той  того  тех
+  | тому  той  тому  тем
+  | тем  той  тем  [тою]  теми
+  | том  той  том  тех
+  |
+  | determinative pronouns
+  |
+  | (a) весь (all)
+  |
+  | весь  вся  все  все
+  | всего  всю  все  все
+  | всего  всей  всего  всех
+  | всему  всей  всему  всем
+  | всем  всей  всем  [всею]  всеми
+  | всем  всей  всем  всех
+  |
+  | (b) сам (himself etc)
+  |
+  | сам  сама  само  сами
+  | самого саму  само  самих
+  | самого самой самого  самих
+  | самому самой самому  самим
+  | самим  самой  самим  [самою]  самими
+  | самом самой самом  самих
+  |
+  | stems of verbs `to be', `to have', `to do' and modal
+  |
+  | быть  бы  буд  быв  есть  суть
+  | име
+  | дел
+  | мог   мож  мочь
+  | уме
+  | хоч  хот
+  | долж
+  | можн
+  | нужн
+  | нельзя
+
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_sv.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_sv.txt
new file mode 100644
index 0000000..22bddfd
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_sv.txt
@@ -0,0 +1,131 @@
+ | From svn.tartarus.org/snowball/trunk/website/algorithms/swedish/stop.txt
+ | This file is distributed under the BSD License.
+ | See http://snowball.tartarus.org/license.php
+ | Also see http://www.opensource.org/licenses/bsd-license.html
+ |  - Encoding was converted to UTF-8.
+ |  - This notice was added.
+
+ | A Swedish stop word list. Comments begin with vertical bar. Each stop
+ | word is at the start of a line.
+
+ | This is a ranked list (commonest to rarest) of stopwords derived from
+ | a large text sample.
+
+ | Swedish stop words occasionally exhibit homonym clashes. For example
+ |  så = so, but also seed. These are indicated clearly below.
+
+och            | and
+det            | it, this/that
+att            | to (with infinitive)
+i              | in, at
+en             | a
+jag            | I
+hon            | she
+som            | who, that
+han            | he
+på             | on
+den            | it, this/that
+med            | with
+var            | where, each
+sig            | him(self) etc
+för            | for
+så             | so (also: seed)
+till           | to
+är             | is
+men            | but
+ett            | a
+om             | if; around, about
+hade           | had
+de             | they, these/those
+av             | of
+icke           | not, no
+mig            | me
+du             | you
+henne          | her
+då             | then, when
+sin            | his
+nu             | now
+har            | have
+inte           | inte någon = no one
+hans           | his
+honom          | him
+skulle         | 'sake'
+hennes         | her
+där            | there
+min            | my
+man            | one (pronoun)
+ej             | nor
+vid            | at, by, on (also: vast)
+kunde          | could
+något          | some etc
+från           | from, off
+ut             | out
+när            | when
+efter          | after, behind
+upp            | up
+vi             | we
+dem            | them
+vara           | be
+vad            | what
+över           | over
+än             | than
+dig            | you
+kan            | can
+sina           | his
+här            | here
+ha             | have
+mot            | towards
+alla           | all
+under          | under (also: wonder)
+någon          | some etc
+eller          | or (else)
+allt           | all
+mycket         | much
+sedan          | since
+ju             | why
+denna          | this/that
+själv          | myself, yourself etc
+detta          | this/that
+åt             | to
+utan           | without
+varit          | was
+hur            | how
+ingen          | no
+mitt           | my
+ni             | you
+bli            | to be, become
+blev           | from bli
+oss            | us
+din            | thy
+dessa          | these/those
+några          | some etc
+deras          | their
+blir           | from bli
+mina           | my
+samma          | (the) same
+vilken         | who, that
+er             | you, your
+sådan          | such a
+vår            | our
+blivit         | from bli
+dess           | its
+inom           | within
+mellan         | between
+sådant         | such a
+varför         | why
+varje          | each
+vilka          | who, that
+ditt           | thy
+vem            | who
+vilket         | who, that
+sitta          | his
+sådana         | such a
+vart           | each
+dina           | thy
+vars           | whose
+vårt           | our
+våra           | our
+ert            | your
+era            | your
+vilkas         | whose
+
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_th.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_th.txt
new file mode 100644
index 0000000..07f0fab
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_th.txt
@@ -0,0 +1,119 @@
+# Thai stopwords from:
+# "Opinion Detection in Thai Political News Columns
+# Based on Subjectivity Analysis"
+# Khampol Sukhum, Supot Nitsuwat, and Choochart Haruechaiyasak
+ไว้
+ไม่
+ไป
+ได้
+ให้
+ใน
+โดย
+แห่ง
+แล้ว
+และ
+แรก
+แบบ
+แต่
+เอง
+เห็น
+เลย
+เริ่ม
+เรา
+เมื่อ
+เพื่อ
+เพราะ
+เป็นการ
+เป็น
+เปิดเผย
+เปิด
+เนื่องจาก
+เดียวกัน
+เดียว
+เช่น
+เฉพาะ
+เคย
+เข้า
+เขา
+อีก
+อาจ
+อะไร
+ออก
+อย่าง
+อยู่
+อยาก
+หาก
+หลาย
+หลังจาก
+หลัง
+หรือ
+หนึ่ง
+ส่วน
+ส่ง
+สุด
+สําหรับ
+ว่า
+วัน
+ลง
+ร่วม
+ราย
+รับ
+ระหว่าง
+รวม
+ยัง
+มี
+มาก
+มา
+พร้อม
+พบ
+ผ่าน
+ผล
+บาง
+น่า
+นี้
+นํา
+นั้น
+นัก
+นอกจาก
+ทุก
+ที่สุด
+ที่
+ทําให้
+ทํา
+ทาง
+ทั้งนี้
+ทั้ง
+ถ้า
+ถูก
+ถึง
+ต้อง
+ต่างๆ
+ต่าง
+ต่อ
+ตาม
+ตั้งแต่
+ตั้ง
+ด้าน
+ด้วย
+ดัง
+ซึ่ง
+ช่วง
+จึง
+จาก
+จัด
+จะ
+คือ
+ความ
+ครั้ง
+คง
+ขึ้น
+ของ
+ขอ
+ขณะ
+ก่อน
+ก็
+การ
+กับ
+กัน
+กว่า
+กล่าว
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_tr.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_tr.txt
new file mode 100644
index 0000000..84d9408
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/stopwords_tr.txt
@@ -0,0 +1,212 @@
+# Turkish stopwords from LUCENE-559
+# merged with the list from "Information Retrieval on Turkish Texts"
+#   (http://www.users.muohio.edu/canf/papers/JASIST2008offPrint.pdf)
+acaba
+altmış
+altı
+ama
+ancak
+arada
+aslında
+ayrıca
+bana
+bazı
+belki
+ben
+benden
+beni
+benim
+beri
+beş
+bile
+bin
+bir
+birçok
+biri
+birkaç
+birkez
+birşey
+birşeyi
+biz
+bize
+bizden
+bizi
+bizim
+böyle
+böylece
+bu
+buna
+bunda
+bundan
+bunlar
+bunları
+bunların
+bunu
+bunun
+burada
+çok
+çünkü
+da
+daha
+dahi
+de
+defa
+değil
+diğer
+diye
+doksan
+dokuz
+dolayı
+dolayısıyla
+dört
+edecek
+eden
+ederek
+edilecek
+ediliyor
+edilmesi
+ediyor
+eğer
+elli
+en
+etmesi
+etti
+ettiği
+ettiğini
+gibi
+göre
+halen
+hangi
+hatta
+hem
+henüz
+hep
+hepsi
+her
+herhangi
+herkesin
+hiç
+hiçbir
+için
+iki
+ile
+ilgili
+ise
+işte
+itibaren
+itibariyle
+kadar
+karşın
+katrilyon
+kendi
+kendilerine
+kendini
+kendisi
+kendisine
+kendisini
+kez
+ki
+kim
+kimden
+kime
+kimi
+kimse
+kırk
+milyar
+milyon
+mu
+mü
+mı
+nasıl
+ne
+neden
+nedenle
+nerde
+nerede
+nereye
+niye
+niçin
+o
+olan
+olarak
+oldu
+olduğu
+olduğunu
+olduklarını
+olmadı
+olmadığı
+olmak
+olması
+olmayan
+olmaz
+olsa
+olsun
+olup
+olur
+olursa
+oluyor
+on
+ona
+ondan
+onlar
+onlardan
+onları
+onların
+onu
+onun
+otuz
+oysa
+öyle
+pek
+rağmen
+sadece
+sanki
+sekiz
+seksen
+sen
+senden
+seni
+senin
+siz
+sizden
+sizi
+sizin
+şey
+şeyden
+şeyi
+şeyler
+şöyle
+şu
+şuna
+şunda
+şundan
+şunları
+şunu
+tarafından
+trilyon
+tüm
+üç
+üzere
+var
+vardı
+ve
+veya
+ya
+yani
+yapacak
+yapılan
+yapılması
+yapıyor
+yapmak
+yaptı
+yaptığı
+yaptığını
+yaptıkları
+yedi
+yerine
+yetmiş
+yine
+yirmi
+yoksa
+yüz
+zaten
diff --git a/solr/example/example-schemaless/solr/collection1/conf/lang/userdict_ja.txt b/solr/example/example-schemaless/solr/collection1/conf/lang/userdict_ja.txt
new file mode 100644
index 0000000..6f0368e
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/lang/userdict_ja.txt
@@ -0,0 +1,29 @@
+#
+# This is a sample user dictionary for Kuromoji (JapaneseTokenizer)
+#
+# Add entries to this file in order to override the statistical model in terms
+# of segmentation, readings and part-of-speech tags.  Notice that entries do
+# not have weights since they are always used when found.  This is by-design
+# in order to maximize ease-of-use.
+#
+# Entries are defined using the following CSV format:
+#  <text>,<token 1> ... <token n>,<reading 1> ... <reading n>,<part-of-speech tag>
+#
+# Notice that a single half-width space separates tokens and readings, and
+# that the number tokens and readings must match exactly.
+#
+# Also notice that multiple entries with the same <text> is undefined.
+#
+# Whitespace only lines are ignored.  Comments are not allowed on entry lines.
+#
+
+# Custom segmentation for kanji compounds
+日本経済新聞,日本 経済 新聞,ニホン ケイザイ シンブン,カスタム名詞
+関西国際空港,関西 国際 空港,カンサイ コクサイ クウコウ,カスタム名詞
+
+# Custom segmentation for compound katakana
+トートバッグ,トート バッグ,トート バッグ,かずカナ名詞
+ショルダーバッグ,ショルダー バッグ,ショルダー バッグ,かずカナ名詞
+
+# Custom reading for former sumo wrestler
+朝青龍,朝青龍,アサショウリュウ,カスタム人名
diff --git a/solr/example/example-schemaless/solr/collection1/conf/protwords.txt b/solr/example/example-schemaless/solr/collection1/conf/protwords.txt
new file mode 100644
index 0000000..1dfc0ab
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/protwords.txt
@@ -0,0 +1,21 @@
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#-----------------------------------------------------------------------
+# Use a protected word file to protect against the stemmer reducing two
+# unrelated words to the same base word.
+
+# Some non-words that normally won't be encountered,
+# just to test that they won't be stemmed.
+dontstems
+zwhacky
+
diff --git a/solr/example/example-schemaless/solr/collection1/conf/schema.xml b/solr/example/example-schemaless/solr/collection1/conf/schema.xml
new file mode 100755
index 0000000..a157715
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/schema.xml
@@ -0,0 +1,1072 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--  
+ This is the Solr schema file. This file should be named "schema.xml" and
+ should be in the conf directory under the solr home
+ (i.e. ./solr/conf/schema.xml by default) 
+ or located where the classloader for the Solr webapp can find it.
+
+ This example schema is the recommended starting point for users.
+ It should be kept correct and concise, usable out-of-the-box.
+
+ For more information, on how to customize this file, please see
+ http://wiki.apache.org/solr/SchemaXml
+
+ PERFORMANCE NOTE: this schema includes many optional features and should not
+ be used for benchmarking.  To improve performance one could
+  - set stored="false" for all fields possible (esp large fields) when you
+    only need to search on the field but don't need to return the original
+    value.
+  - set indexed="false" if you don't need to search on the field, but only
+    return the field as a result of searching on other indexed fields.
+  - remove all unneeded copyField statements
+  - for best index size and searching performance, set "index" to false
+    for all general text fields, use copyField to copy them to the
+    catchall "text" field, and use that for searching.
+  - For maximum indexing performance, use the StreamingUpdateSolrServer
+    java client.
+  - Remember to run the JVM in server mode, and use a higher logging level
+    that avoids logging every request
+-->
+
+<schema name="example-schemaless" version="1.5">
+  <!-- attribute "name" is the name of this schema and is only used for display purposes.
+       version="x.y" is Solr's version number for the schema syntax and 
+       semantics.  It should not normally be changed by applications.
+
+       1.0: multiValued attribute did not exist, all fields are multiValued 
+            by nature
+       1.1: multiValued attribute introduced, false by default 
+       1.2: omitTermFreqAndPositions attribute introduced, true by default 
+            except for text fields.
+       1.3: removed optional field compress feature
+       1.4: autoGeneratePhraseQueries attribute introduced to drive QueryParser
+            behavior when a single string produces multiple tokens.  Defaults 
+            to off for version >= 1.4
+       1.5: omitNorms defaults to true for primitive field types 
+            (int, float, boolean, string...)
+     -->
+
+  <fields>
+    <!-- Valid attributes for fields:
+     name: mandatory - the name for the field
+     type: mandatory - the name of a field type from the 
+       <types> fieldType section
+     indexed: true if this field should be indexed (searchable or sortable)
+     stored: true if this field should be retrievable
+     docValues: true if this field should have doc values. Doc values are
+       useful for faceting, grouping, sorting and function queries. Although not
+       required, doc values will make the index faster to load, more
+       NRT-friendly and more memory-efficient. They however come with some
+       limitations: they are currently only supported by StrField, UUIDField
+       and all Trie*Fields, and depending on the field type, they might
+       require the field to be single-valued, be required or have a default
+       value (check the documentation of the field type you're interested in
+       for more information)
+     multiValued: true if this field may contain multiple values per document
+     omitNorms: (expert) set to true to omit the norms associated with
+       this field (this disables length normalization and index-time
+       boosting for the field, and saves some memory).  Only full-text
+       fields or fields that need an index-time boost need norms.
+       Norms are omitted for primitive (non-analyzed) types by default.
+     termVectors: [false] set to true to store the term vector for a
+       given field.
+       When using MoreLikeThis, fields used for similarity should be
+       stored for best performance.
+     termPositions: Store position information with the term vector.  
+       This will increase storage costs.
+     termOffsets: Store offset information with the term vector. This 
+       will increase storage costs.
+     required: The field is required.  It will throw an error if the
+       value does not exist
+     default: a value that should be used if no value is specified
+       when adding a document.
+   -->
+
+    <!-- field names should consist of alphanumeric or underscore characters only and
+      not start with a digit.  This is not currently strictly enforced,
+      but other field names will not have first class support from all components
+      and back compatibility is not guaranteed.  Names with both leading and
+      trailing underscores (e.g. _version_) are reserved.
+   -->
+
+    <!-- In this "schemaless" example, only two fields are pre-declared: id and _version_.
+         All other fields will be type guessed and added via the
+         "add-unknown-fields-to-the-schema" update request processor chain declared 
+         in solrconfig.xml.
+      -->
+    <field name="id" type="string" indexed="true" stored="true" required="true" multiValued="false" />
+    <field name="_version_" type="long" indexed="true" stored="true"/>
+
+
+    <!-- Dynamic field definitions allow using convention over configuration
+       for fields via the specification of patterns to match field names. 
+       EXAMPLE:  name="*_i" will match any field ending in _i (like myid_i, z_i)
+       RESTRICTION: the glob-like pattern in the name attribute must have
+       a "*" only at the start or the end.  -->
+   
+    <dynamicField name="*_i"  type="int"    indexed="true"  stored="true"/>
+    <dynamicField name="*_is" type="int"    indexed="true"  stored="true"  multiValued="true"/>
+    <dynamicField name="*_s"  type="string"  indexed="true"  stored="true" />
+    <dynamicField name="*_ss" type="string"  indexed="true"  stored="true" multiValued="true"/>
+    <dynamicField name="*_l"  type="long"   indexed="true"  stored="true"/>
+    <dynamicField name="*_ls" type="long"   indexed="true"  stored="true"  multiValued="true"/>
+    <dynamicField name="*_t"  type="text_general"    indexed="true"  stored="true"/>
+    <dynamicField name="*_txt" type="text_general"   indexed="true"  stored="true" multiValued="true"/>
+    <dynamicField name="*_en"  type="text_en"    indexed="true"  stored="true" multiValued="true"/>
+    <dynamicField name="*_b"  type="boolean" indexed="true" stored="true"/>
+    <dynamicField name="*_bs" type="boolean" indexed="true" stored="true"  multiValued="true"/>
+    <dynamicField name="*_f"  type="float"  indexed="true"  stored="true"/>
+    <dynamicField name="*_fs" type="float"  indexed="true"  stored="true"  multiValued="true"/>
+    <dynamicField name="*_d"  type="double" indexed="true"  stored="true"/>
+    <dynamicField name="*_ds" type="double" indexed="true"  stored="true"  multiValued="true"/>
+
+    <!-- Type used to index the lat and lon components for the "location" FieldType -->
+    <dynamicField name="*_coordinate"  type="tdouble" indexed="true"  stored="false" />
+
+    <dynamicField name="*_dt"  type="date"    indexed="true"  stored="true"/>
+    <dynamicField name="*_dts" type="date"    indexed="true"  stored="true" multiValued="true"/>
+    <dynamicField name="*_p"  type="location" indexed="true" stored="true"/>
+
+    <!-- some trie-coded dynamic fields for faster range queries -->
+    <dynamicField name="*_ti" type="tint"    indexed="true"  stored="true"/>
+    <dynamicField name="*_tl" type="tlong"   indexed="true"  stored="true"/>
+    <dynamicField name="*_tf" type="tfloat"  indexed="true"  stored="true"/>
+    <dynamicField name="*_td" type="tdouble" indexed="true"  stored="true"/>
+    <dynamicField name="*_tdt" type="tdate"  indexed="true"  stored="true"/>
+
+    <dynamicField name="*_pi"  type="pint"    indexed="true"  stored="true"/>
+    <dynamicField name="*_c"   type="currency" indexed="true"  stored="true"/>
+
+    <dynamicField name="ignored_*" type="ignored" multiValued="true"/>
+    <dynamicField name="attr_*" type="text_general" indexed="true" stored="true" multiValued="true"/>
+
+    <dynamicField name="random_*" type="random" />
+
+    <!-- uncomment the following to ignore any fields that don't already match an existing 
+        field name or dynamic field, rather than reporting them as an error. 
+        alternately, change the type="ignored" to some other type e.g. "text" if you want 
+        unknown fields indexed and/or stored by default 
+        
+        NB: use of "*" dynamic fields will disable field type guessing and adding
+        unknown fields to the schema. --> 
+    <!--dynamicField name="*" type="ignored" multiValued="true" /-->
+   
+  </fields>
+
+
+  <!-- Field to use to determine and enforce document uniqueness. 
+      Unless this field is marked with required="false", it will be a required field
+   -->
+  <uniqueKey>id</uniqueKey>
+
+  <!-- DEPRECATED: The defaultSearchField is consulted by various query parsers when
+  parsing a query string that isn't explicit about the field.  Machine (non-user)
+  generated queries are best made explicit, or they can use the "df" request parameter
+  which takes precedence over this.
+  Note: Un-commenting defaultSearchField will be insufficient if your request handler
+  in solrconfig.xml defines "df", which takes precedence. That would need to be removed.
+ <defaultSearchField>text</defaultSearchField> -->
+
+  <!-- DEPRECATED: The defaultOperator (AND|OR) is consulted by various query parsers
+  when parsing a query string to determine if a clause of the query should be marked as
+  required or optional, assuming the clause isn't already marked by some operator.
+  The default is OR, which is generally assumed so it is not a good idea to change it
+  globally here.  The "q.op" request parameter takes precedence over this.
+ <solrQueryParser defaultOperator="OR"/> -->
+
+  <!-- copyField commands copy one field to another at the time a document
+        is added to the index.  It's used either to index the same field differently,
+        or to add multiple fields to the same field for easier/faster searching.
+
+   <copyField source="cat" dest="text"/>
+   <copyField source="name" dest="text"/>
+   <copyField source="manu" dest="text"/>
+   <copyField source="features" dest="text"/>
+   <copyField source="includes" dest="text"/>
+   <copyField source="manu" dest="manu_exact"/>
+   -->
+
+  <!-- Copy the price into a currency enabled field (default USD)
+   <copyField source="price" dest="price_c"/>
+   -->
+
+  <!-- Text fields from SolrCell to search by default in our catch-all field
+   <copyField source="title" dest="text"/>
+   <copyField source="author" dest="text"/>
+   <copyField source="description" dest="text"/>
+   <copyField source="keywords" dest="text"/>
+   <copyField source="content" dest="text"/>
+   <copyField source="content_type" dest="text"/>
+   <copyField source="resourcename" dest="text"/>
+   <copyField source="url" dest="text"/>
+   -->
+
+  <!-- Create a string version of author for faceting
+   <copyField source="author" dest="author_s"/>
+   -->
+	
+  <!-- Above, multiple source fields are copied to the [text] field. 
+	  Another way to map multiple source fields to the same 
+	  destination field is to use the dynamic field syntax. 
+	  copyField also supports a maxChars to copy setting.  -->
+	   
+  <!-- <copyField source="*_t" dest="text" maxChars="3000"/> -->
+
+  <!-- copy name to alphaNameSort, a field designed for sorting by name -->
+  <!-- <copyField source="name" dest="alphaNameSort"/> -->
+ 
+  <types>
+    <!-- field type definitions. The "name" attribute is
+       just a label to be used by field definitions.  The "class"
+       attribute and any other attributes determine the real
+       behavior of the fieldType.
+         Class names starting with "solr" refer to java classes in a
+       standard package such as org.apache.solr.analysis
+    -->
+
+    <!-- The StrField type is not analyzed, but indexed/stored verbatim.
+       It supports doc values but in that case the field needs to be
+       single-valued and either required or have a default value.
+      -->
+    <fieldType name="string" class="solr.StrField" sortMissingLast="true" />
+
+    <!-- boolean type: "true" or "false" -->
+    <fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
+
+    <fieldType name="booleans" class="solr.BoolField" sortMissingLast="true" multiValued="true"/>
+
+    <!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
+         currently supported on types that are sorted internally as strings
+         and on numeric types.
+	     This includes "string","boolean", and, as of 3.5 (and 4.x),
+	     int, float, long, date, double, including the "Trie" variants.
+       - If sortMissingLast="true", then a sort on this field will cause documents
+         without the field to come after documents with the field,
+         regardless of the requested sort order (asc or desc).
+       - If sortMissingFirst="true", then a sort on this field will cause documents
+         without the field to come before documents with the field,
+         regardless of the requested sort order.
+       - If sortMissingLast="false" and sortMissingFirst="false" (the default),
+         then default lucene sorting will be used which places docs without the
+         field first in an ascending sort and last in a descending sort.
+    -->    
+
+    <!--
+      Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types.
+
+      These fields support doc values, but they require the field to be
+      single-valued and either be required or have a default value.
+    -->
+    <fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/>
+    <fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
+    <fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
+    <fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
+
+    <!--
+     Numeric field types that index each value at various levels of precision
+     to accelerate range queries when the number of values between the range
+     endpoints is large. See the javadoc for NumericRangeQuery for internal
+     implementation details.
+
+     Smaller precisionStep values (specified in bits) will lead to more tokens
+     indexed per value, slightly larger index size, and faster range queries.
+     A precisionStep of 0 disables indexing at different precision levels.
+    -->
+    <fieldType name="tint" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0"/>
+    <fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0"/>
+    <fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0"/>
+    <fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0"/>
+    
+    <fieldType name="tints" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0" multiValued="true"/>
+    <fieldType name="tfloats" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0" multiValued="true"/>
+    <fieldType name="tlongs" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0" multiValued="true"/>
+    <fieldType name="tdoubles" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0" multiValued="true"/>
+
+    <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and
+         is a more restricted form of the canonical representation of dateTime
+         http://www.w3.org/TR/xmlschema-2/#dateTime    
+         The trailing "Z" designates UTC time and is mandatory.
+         Optional fractional seconds are allowed: 1995-12-31T23:59:59.999Z
+         All other components are mandatory.
+
+         Expressions can also be used to denote calculations that should be
+         performed relative to "NOW" to determine the value, ie...
+
+               NOW/HOUR
+                  ... Round to the start of the current hour
+               NOW-1DAY
+                  ... Exactly 1 day prior to now
+               NOW/DAY+6MONTHS+3DAYS
+                  ... 6 months and 3 days in the future from the start of
+                      the current day
+                      
+         Consult the DateField javadocs for more information.
+
+         Note: For faster range queries, consider the tdate type
+      -->
+    <fieldType name="date" class="solr.TrieDateField" precisionStep="0" positionIncrementGap="0"/>
+
+    <!-- A Trie based date field for faster date range queries and date faceting. -->
+    <fieldType name="tdate" class="solr.TrieDateField" precisionStep="6" positionIncrementGap="0"/>
+
+    <fieldType name="tdates" class="solr.TrieDateField" precisionStep="6" positionIncrementGap="0" multiValued="true"/>
+
+
+    <!--Binary data type. The data should be sent/retrieved in as Base64 encoded Strings -->
+    <fieldtype name="binary" class="solr.BinaryField"/>
+
+    <!--
+      Note:
+      These should only be used for compatibility with existing indexes (created with lucene or older Solr versions).
+      Use Trie based fields instead. As of Solr 3.5 and 4.x, Trie based fields support sortMissingFirst/Last
+      
+      Plain numeric field types that store and index the text
+      value verbatim (and hence don't correctly support range queries, since the
+      lexicographic ordering isn't equal to the numeric ordering)
+    -->
+    <fieldType name="pint" class="solr.IntField"/>
+    <fieldType name="plong" class="solr.LongField"/>
+    <fieldType name="pfloat" class="solr.FloatField"/>
+    <fieldType name="pdouble" class="solr.DoubleField"/>
+    <fieldType name="pdate" class="solr.DateField" sortMissingLast="true"/>
+
+    <!-- The "RandomSortField" is not used to store or search any
+         data.  You can declare fields of this type it in your schema
+         to generate pseudo-random orderings of your docs for sorting 
+         or function purposes.  The ordering is generated based on the field
+         name and the version of the index. As long as the index version
+         remains unchanged, and the same field name is reused,
+         the ordering of the docs will be consistent.  
+         If you want different psuedo-random orderings of documents,
+         for the same version of the index, use a dynamicField and
+         change the field name in the request.
+     -->
+    <fieldType name="random" class="solr.RandomSortField" indexed="true" />
+
+    <!-- solr.TextField allows the specification of custom text analyzers
+         specified as a tokenizer and a list of token filters. Different
+         analyzers may be specified for indexing and querying.
+
+         The optional positionIncrementGap puts space between multiple fields of
+         this type on the same document, with the purpose of preventing false phrase
+         matching across fields.
+
+         For more info on customizing your analyzer chain, please see
+         http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters
+     -->
+
+    <!-- One can also specify an existing Analyzer class that has a
+         default constructor via the class attribute on the analyzer element.
+         Example:
+    <fieldType name="text_greek" class="solr.TextField">
+      <analyzer class="org.apache.lucene.analysis.el.GreekAnalyzer"/>
+    </fieldType>
+    -->
+
+    <!-- A text field that only splits on whitespace for exact matching of words -->
+    <fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100">
+      <analyzer>
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- A general text field that has reasonable, generic
+         cross-language defaults: it tokenizes with StandardTokenizer,
+	 removes stop words from case-insensitive "stopwords.txt"
+	 (empty by default), and down cases.  At query time only, it
+	 also applies synonyms. -->
+    <fieldType name="text_general" class="solr.TextField" positionIncrementGap="100" multiValued="true">
+      <analyzer type="index">
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
+        <!-- in this example, we will only use synonyms at query time
+        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        -->
+        <filter class="solr.LowerCaseFilterFactory"/>
+      </analyzer>
+      <analyzer type="query">
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- A text field with defaults appropriate for English: it
+         tokenizes with StandardTokenizer, removes English stop words
+         (lang/stopwords_en.txt), down cases, protects words from protwords.txt, and
+         finally applies Porter's stemming.  The query time analyzer
+         also applies synonyms from synonyms.txt. -->
+    <fieldType name="text_en" class="solr.TextField" positionIncrementGap="100">
+      <analyzer type="index">
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <!-- in this example, we will only use synonyms at query time
+        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        -->
+        <!-- Case insensitive stop word removal.
+        -->
+        <filter class="solr.StopFilterFactory"
+                ignoreCase="true"
+                words="lang/stopwords_en.txt"
+            />
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.EnglishPossessiveFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+	-->
+        <filter class="solr.PorterStemFilterFactory"/>
+      </analyzer>
+      <analyzer type="query">
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.StopFilterFactory"
+                ignoreCase="true"
+                words="lang/stopwords_en.txt"
+            />
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.EnglishPossessiveFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+	-->
+        <filter class="solr.PorterStemFilterFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- A text field with defaults appropriate for English, plus
+	 aggressive word-splitting and autophrase features enabled.
+	 This field is just like text_en, except it adds
+	 WordDelimiterFilter to enable splitting and matching of
+	 words on case-change, alpha numeric boundaries, and
+	 non-alphanumeric chars.  This means certain compound word
+	 cases will work, for example query "wi fi" will match
+	 document "WiFi" or "wi-fi".
+        -->
+    <fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
+      <analyzer type="index">
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <!-- in this example, we will only use synonyms at query time
+        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        -->
+        <!-- Case insensitive stop word removal.
+        -->
+        <filter class="solr.StopFilterFactory"
+                ignoreCase="true"
+                words="lang/stopwords_en.txt"
+            />
+        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <filter class="solr.PorterStemFilterFactory"/>
+      </analyzer>
+      <analyzer type="query">
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.StopFilterFactory"
+                ignoreCase="true"
+                words="lang/stopwords_en.txt"
+            />
+        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <filter class="solr.PorterStemFilterFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
+         but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
+    <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
+      <analyzer>
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
+        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
+             possible with WordDelimiterFilter in conjuncton with stemming. -->
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- Just like text_general except it reverses the characters of
+	 each token, to enable more efficient leading wildcard queries. -->
+    <fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
+      <analyzer type="index">
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.ReversedWildcardFilterFactory" withOriginal="true"
+                maxPosAsterisk="3" maxPosQuestion="2" maxFractionAsterisk="0.33"/>
+      </analyzer>
+      <analyzer type="query">
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
+        <filter class="solr.LowerCaseFilterFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- charFilter + WhitespaceTokenizer  -->
+    <!--
+    <fieldType name="text_char_norm" class="solr.TextField" positionIncrementGap="100" >
+      <analyzer>
+        <charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ISOLatin1Accent.txt"/>
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+      </analyzer>
+    </fieldType>
+    -->
+
+    <!-- This is an example of using the KeywordTokenizer along
+         With various TokenFilterFactories to produce a sortable field
+         that does not include some properties of the source text
+      -->
+    <fieldType name="alphaOnlySort" class="solr.TextField" sortMissingLast="true" omitNorms="true">
+      <analyzer>
+        <!-- KeywordTokenizer does no actual tokenizing, so the entire
+             input string is preserved as a single token
+          -->
+        <tokenizer class="solr.KeywordTokenizerFactory"/>
+        <!-- The LowerCase TokenFilter does what you expect, which can be
+             when you want your sorting to be case insensitive
+          -->
+        <filter class="solr.LowerCaseFilterFactory" />
+        <!-- The TrimFilter removes any leading or trailing whitespace -->
+        <filter class="solr.TrimFilterFactory" />
+        <!-- The PatternReplaceFilter gives you the flexibility to use
+             Java Regular expression to replace any sequence of characters
+             matching a pattern with an arbitrary replacement string, 
+             which may include back references to portions of the original
+             string matched by the pattern.
+             
+             See the Java Regular Expression documentation for more
+             information on pattern and replacement string syntax.
+             
+             http://docs.oracle.com/javase/7/docs/api/java/util/regex/package-summary.html
+          -->
+        <filter class="solr.PatternReplaceFilterFactory"
+                pattern="([^a-z])" replacement="" replace="all"
+            />
+      </analyzer>
+    </fieldType>
+    
+    <fieldtype name="phonetic" stored="false" indexed="true" class="solr.TextField" >
+      <analyzer>
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.DoubleMetaphoneFilterFactory" inject="false"/>
+      </analyzer>
+    </fieldtype>
+
+    <fieldtype name="payloads" stored="false" indexed="true" class="solr.TextField" >
+      <analyzer>
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <!--
+        The DelimitedPayloadTokenFilter can put payloads on tokens... for example,
+        a token of "foo|1.4"  would be indexed as "foo" with a payload of 1.4f
+        Attributes of the DelimitedPayloadTokenFilterFactory : 
+         "delimiter" - a one character delimiter. Default is | (pipe)
+	 "encoder" - how to encode the following value into a playload
+	    float -> org.apache.lucene.analysis.payloads.FloatEncoder,
+	    integer -> o.a.l.a.p.IntegerEncoder
+	    identity -> o.a.l.a.p.IdentityEncoder
+            Fully Qualified class name implementing PayloadEncoder, Encoder must have a no arg constructor.
+         -->
+        <filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
+      </analyzer>
+    </fieldtype>
+
+    <!-- lowercases the entire field value, keeping it as a single token.  -->
+    <fieldType name="lowercase" class="solr.TextField" positionIncrementGap="100">
+      <analyzer>
+        <tokenizer class="solr.KeywordTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory" />
+      </analyzer>
+    </fieldType>
+
+    <!-- 
+      Example of using PathHierarchyTokenizerFactory at index time, so
+      queries for paths match documents at that path, or in descendent paths
+    -->
+    <fieldType name="descendent_path" class="solr.TextField">
+      <analyzer type="index">
+        <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
+      </analyzer>
+      <analyzer type="query">
+        <tokenizer class="solr.KeywordTokenizerFactory" />
+      </analyzer>
+    </fieldType>
+    <!-- 
+      Example of using PathHierarchyTokenizerFactory at query time, so
+      queries for paths match documents at that path, or in ancestor paths
+    -->
+    <fieldType name="ancestor_path" class="solr.TextField">
+      <analyzer type="index">
+        <tokenizer class="solr.KeywordTokenizerFactory" />
+      </analyzer>
+      <analyzer type="query">
+        <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
+      </analyzer>
+    </fieldType>
+
+    <!-- since fields of this type are by default not stored or indexed,
+         any data added to them will be ignored outright.  --> 
+    <fieldtype name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField" />
+
+    <!-- This point type indexes the coordinates as separate fields (subFields)
+      If subFieldType is defined, it references a type, and a dynamic field
+      definition is created matching *___<typename>.  Alternately, if 
+      subFieldSuffix is defined, that is used to create the subFields.
+      Example: if subFieldType="double", then the coordinates would be
+        indexed in fields myloc_0___double,myloc_1___double.
+      Example: if subFieldSuffix="_d" then the coordinates would be indexed
+        in fields myloc_0_d,myloc_1_d
+      The subFields are an implementation detail of the fieldType, and end
+      users normally should not need to know about them.
+     -->
+    <fieldType name="point" class="solr.PointType" dimension="2" subFieldSuffix="_d"/>
+
+    <!-- A specialized field for geospatial search. If indexed, this fieldType must not be multivalued. -->
+    <fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
+
+    <!-- An alternative geospatial field type new to Solr 4.  It supports multiValued and polygon shapes.
+      For more information about this and other Spatial fields new to Solr 4, see:
+      http://wiki.apache.org/solr/SolrAdaptersForLuceneSpatial4
+    -->
+    <fieldType name="location_rpt" class="solr.SpatialRecursivePrefixTreeFieldType"
+               geo="true" distErrPct="0.025" maxDistErr="0.000009" units="degrees" />
+
+    <!-- Money/currency field type. See http://wiki.apache.org/solr/MoneyFieldType
+        Parameters:
+          defaultCurrency: Specifies the default currency if none specified. Defaults to "USD"
+          precisionStep:   Specifies the precisionStep for the TrieLong field used for the amount
+          providerClass:   Lets you plug in other exchange provider backend:
+                           solr.FileExchangeRateProvider is the default and takes one parameter:
+                             currencyConfig: name of an xml file holding exchange rates
+                           solr.OpenExchangeRatesOrgProvider uses rates from openexchangerates.org:
+                             ratesFileLocation: URL or path to rates JSON file (default latest.json on the web)
+                             refreshInterval: Number of minutes between each rates fetch (default: 1440, min: 60)
+   -->
+    <fieldType name="currency" class="solr.CurrencyField" precisionStep="8" defaultCurrency="USD" currencyConfig="currency.xml" />
+             
+
+
+    <!-- some examples for different languages (generally ordered by ISO code) -->
+
+    <!-- Arabic -->
+    <fieldType name="text_ar" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <!-- for any non-arabic -->
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_ar.txt" />
+        <!-- normalizes ﻯ to ﻱ, etc -->
+        <filter class="solr.ArabicNormalizationFilterFactory"/>
+        <filter class="solr.ArabicStemFilterFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- Bulgarian -->
+    <fieldType name="text_bg" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/> 
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_bg.txt" /> 
+        <filter class="solr.BulgarianStemFilterFactory"/>       
+      </analyzer>
+    </fieldType>
+    
+    <!-- Catalan -->
+    <fieldType name="text_ca" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <!-- removes l', etc -->
+        <filter class="solr.ElisionFilterFactory" ignoreCase="true" articles="lang/contractions_ca.txt"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_ca.txt" />
+        <filter class="solr.SnowballPorterFilterFactory" language="Catalan"/>       
+      </analyzer>
+    </fieldType>
+    
+    <!-- CJK bigram (see text_ja for a Japanese configuration using morphological analysis) -->
+    <fieldType name="text_cjk" class="solr.TextField" positionIncrementGap="100">
+      <analyzer>
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <!-- normalize width before bigram, as e.g. half-width dakuten combine  -->
+        <filter class="solr.CJKWidthFilterFactory"/>
+        <!-- for any non-CJK -->
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.CJKBigramFilterFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- Czech -->
+    <fieldType name="text_cz" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_cz.txt" />
+        <filter class="solr.CzechStemFilterFactory"/>       
+      </analyzer>
+    </fieldType>
+    
+    <!-- Danish -->
+    <fieldType name="text_da" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_da.txt" format="snowball" />
+        <filter class="solr.SnowballPorterFilterFactory" language="Danish"/>       
+      </analyzer>
+    </fieldType>
+    
+    <!-- German -->
+    <fieldType name="text_de" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_de.txt" format="snowball" />
+        <filter class="solr.GermanNormalizationFilterFactory"/>
+        <filter class="solr.GermanLightStemFilterFactory"/>
+        <!-- less aggressive: <filter class="solr.GermanMinimalStemFilterFactory"/> -->
+        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="German2"/> -->
+      </analyzer>
+    </fieldType>
+    
+    <!-- Greek -->
+    <fieldType name="text_el" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <!-- greek specific lowercase for sigma -->
+        <filter class="solr.GreekLowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="false" words="lang/stopwords_el.txt" />
+        <filter class="solr.GreekStemFilterFactory"/>
+      </analyzer>
+    </fieldType>
+    
+    <!-- Spanish -->
+    <fieldType name="text_es" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_es.txt" format="snowball" />
+        <filter class="solr.SpanishLightStemFilterFactory"/>
+        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="Spanish"/> -->
+      </analyzer>
+    </fieldType>
+    
+    <!-- Basque -->
+    <fieldType name="text_eu" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_eu.txt" />
+        <filter class="solr.SnowballPorterFilterFactory" language="Basque"/>
+      </analyzer>
+    </fieldType>
+    
+    <!-- Persian -->
+    <fieldType name="text_fa" class="solr.TextField" positionIncrementGap="100">
+      <analyzer>
+        <!-- for ZWNJ -->
+        <charFilter class="solr.PersianCharFilterFactory"/>
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.ArabicNormalizationFilterFactory"/>
+        <filter class="solr.PersianNormalizationFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_fa.txt" />
+      </analyzer>
+    </fieldType>
+    
+    <!-- Finnish -->
+    <fieldType name="text_fi" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_fi.txt" format="snowball" />
+        <filter class="solr.SnowballPorterFilterFactory" language="Finnish"/>
+        <!-- less aggressive: <filter class="solr.FinnishLightStemFilterFactory"/> -->
+      </analyzer>
+    </fieldType>
+    
+    <!-- French -->
+    <fieldType name="text_fr" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <!-- removes l', etc -->
+        <filter class="solr.ElisionFilterFactory" ignoreCase="true" articles="lang/contractions_fr.txt"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_fr.txt" format="snowball" />
+        <filter class="solr.FrenchLightStemFilterFactory"/>
+        <!-- less aggressive: <filter class="solr.FrenchMinimalStemFilterFactory"/> -->
+        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="French"/> -->
+      </analyzer>
+    </fieldType>
+    
+    <!-- Irish -->
+    <fieldType name="text_ga" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <!-- removes d', etc -->
+        <filter class="solr.ElisionFilterFactory" ignoreCase="true" articles="lang/contractions_ga.txt"/>
+        <!-- removes n-, etc. position increments is intentionally false! -->
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/hyphenations_ga.txt"/>
+        <filter class="solr.IrishLowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_ga.txt"/>
+        <filter class="solr.SnowballPorterFilterFactory" language="Irish"/>
+      </analyzer>
+    </fieldType>
+    
+    <!-- Galician -->
+    <fieldType name="text_gl" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_gl.txt" />
+        <filter class="solr.GalicianStemFilterFactory"/>
+        <!-- less aggressive: <filter class="solr.GalicianMinimalStemFilterFactory"/> -->
+      </analyzer>
+    </fieldType>
+    
+    <!-- Hindi -->
+    <fieldType name="text_hi" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <!-- normalizes unicode representation -->
+        <filter class="solr.IndicNormalizationFilterFactory"/>
+        <!-- normalizes variation in spelling -->
+        <filter class="solr.HindiNormalizationFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_hi.txt" />
+        <filter class="solr.HindiStemFilterFactory"/>
+      </analyzer>
+    </fieldType>
+    
+    <!-- Hungarian -->
+    <fieldType name="text_hu" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_hu.txt" format="snowball" />
+        <filter class="solr.SnowballPorterFilterFactory" language="Hungarian"/>
+        <!-- less aggressive: <filter class="solr.HungarianLightStemFilterFactory"/> -->   
+      </analyzer>
+    </fieldType>
+    
+    <!-- Armenian -->
+    <fieldType name="text_hy" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_hy.txt" />
+        <filter class="solr.SnowballPorterFilterFactory" language="Armenian"/>
+      </analyzer>
+    </fieldType>
+    
+    <!-- Indonesian -->
+    <fieldType name="text_id" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_id.txt" />
+        <!-- for a less aggressive approach (only inflectional suffixes), set stemDerivational to false -->
+        <filter class="solr.IndonesianStemFilterFactory" stemDerivational="true"/>
+      </analyzer>
+    </fieldType>
+    
+    <!-- Italian -->
+    <fieldType name="text_it" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <!-- removes l', etc -->
+        <filter class="solr.ElisionFilterFactory" ignoreCase="true" articles="lang/contractions_it.txt"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_it.txt" format="snowball" />
+        <filter class="solr.ItalianLightStemFilterFactory"/>
+        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="Italian"/> -->
+      </analyzer>
+    </fieldType>
+    
+    <!-- Japanese using morphological analysis (see text_cjk for a configuration using bigramming)
+
+         NOTE: If you want to optimize search for precision, use default operator AND in your query
+         parser config with <solrQueryParser defaultOperator="AND"/> further down in this file.  Use 
+         OR if you would like to optimize for recall (default).
+    -->
+    <fieldType name="text_ja" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="false">
+      <analyzer>
+        <!-- Kuromoji Japanese morphological analyzer/tokenizer (JapaneseTokenizer)
+
+           Kuromoji has a search mode (default) that does segmentation useful for search.  A heuristic
+           is used to segment compounds into its parts and the compound itself is kept as synonym.
+
+           Valid values for attribute mode are:
+              normal: regular segmentation
+              search: segmentation useful for search with synonyms compounds (default)
+            extended: same as search mode, but unigrams unknown words (experimental)
+
+           For some applications it might be good to use search mode for indexing and normal mode for
+           queries to reduce recall and prevent parts of compounds from being matched and highlighted.
+           Use <analyzer type="index"> and <analyzer type="query"> for this and mode normal in query.
+
+           Kuromoji also has a convenient user dictionary feature that allows overriding the statistical
+           model with your own entries for segmentation, part-of-speech tags and readings without a need
+           to specify weights.  Notice that user dictionaries have not been subject to extensive testing.
+
+           User dictionary attributes are:
+                     userDictionary: user dictionary filename
+             userDictionaryEncoding: user dictionary encoding (default is UTF-8)
+
+           See lang/userdict_ja.txt for a sample user dictionary file.
+
+           Punctuation characters are discarded by default.  Use discardPunctuation="false" to keep them.
+
+           See http://wiki.apache.org/solr/JapaneseLanguageSupport for more on Japanese language support.
+        -->
+        <tokenizer class="solr.JapaneseTokenizerFactory" mode="search"/>
+        <!--<tokenizer class="solr.JapaneseTokenizerFactory" mode="search" userDictionary="lang/userdict_ja.txt"/>-->
+        <!-- Reduces inflected verbs and adjectives to their base/dictionary forms (辞書形) -->
+        <filter class="solr.JapaneseBaseFormFilterFactory"/>
+        <!-- Removes tokens with certain part-of-speech tags -->
+        <filter class="solr.JapanesePartOfSpeechStopFilterFactory" tags="lang/stoptags_ja.txt" />
+        <!-- Normalizes full-width romaji to half-width and half-width kana to full-width (Unicode NFKC subset) -->
+        <filter class="solr.CJKWidthFilterFactory"/>
+        <!-- Removes common tokens typically not useful for search, but have a negative effect on ranking -->
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_ja.txt" />
+        <!-- Normalizes common katakana spelling variations by removing any last long sound character (U+30FC) -->
+        <filter class="solr.JapaneseKatakanaStemFilterFactory" minimumLength="4"/>
+        <!-- Lower-cases romaji characters -->
+        <filter class="solr.LowerCaseFilterFactory"/>
+      </analyzer>
+    </fieldType>
+    
+    <!-- Latvian -->
+    <fieldType name="text_lv" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_lv.txt" />
+        <filter class="solr.LatvianStemFilterFactory"/>
+      </analyzer>
+    </fieldType>
+    
+    <!-- Dutch -->
+    <fieldType name="text_nl" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_nl.txt" format="snowball" />
+        <filter class="solr.StemmerOverrideFilterFactory" dictionary="lang/stemdict_nl.txt" ignoreCase="false"/>
+        <filter class="solr.SnowballPorterFilterFactory" language="Dutch"/>
+      </analyzer>
+    </fieldType>
+    
+    <!-- Norwegian -->
+    <fieldType name="text_no" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_no.txt" format="snowball" />
+        <filter class="solr.SnowballPorterFilterFactory" language="Norwegian"/>
+        <!-- less aggressive: <filter class="solr.NorwegianLightStemFilterFactory"/> -->
+        <!-- singular/plural: <filter class="solr.NorwegianMinimalStemFilterFactory"/> -->
+      </analyzer>
+    </fieldType>
+    
+    <!-- Portuguese -->
+    <fieldType name="text_pt" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_pt.txt" format="snowball" />
+        <filter class="solr.PortugueseLightStemFilterFactory"/>
+        <!-- less aggressive: <filter class="solr.PortugueseMinimalStemFilterFactory"/> -->
+        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="Portuguese"/> -->
+        <!-- most aggressive: <filter class="solr.PortugueseStemFilterFactory"/> -->
+      </analyzer>
+    </fieldType>
+    
+    <!-- Romanian -->
+    <fieldType name="text_ro" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_ro.txt" />
+        <filter class="solr.SnowballPorterFilterFactory" language="Romanian"/>
+      </analyzer>
+    </fieldType>
+    
+    <!-- Russian -->
+    <fieldType name="text_ru" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_ru.txt" format="snowball" />
+        <filter class="solr.SnowballPorterFilterFactory" language="Russian"/>
+        <!-- less aggressive: <filter class="solr.RussianLightStemFilterFactory"/> -->
+      </analyzer>
+    </fieldType>
+    
+    <!-- Swedish -->
+    <fieldType name="text_sv" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_sv.txt" format="snowball" />
+        <filter class="solr.SnowballPorterFilterFactory" language="Swedish"/>
+        <!-- less aggressive: <filter class="solr.SwedishLightStemFilterFactory"/> -->
+      </analyzer>
+    </fieldType>
+    
+    <!-- Thai -->
+    <fieldType name="text_th" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.ThaiWordFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_th.txt" />
+      </analyzer>
+    </fieldType>
+    
+    <!-- Turkish -->
+    <fieldType name="text_tr" class="solr.TextField" positionIncrementGap="100">
+      <analyzer> 
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.TurkishLowerCaseFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="false" words="lang/stopwords_tr.txt" />
+        <filter class="solr.SnowballPorterFilterFactory" language="Turkish"/>
+      </analyzer>
+    </fieldType>
+
+  </types>
+  
+  <!-- Similarity is the scoring routine for each document vs. a query.
+       A custom Similarity or SimilarityFactory may be specified here, but 
+       the default is fine for most applications.  
+       For more info: http://wiki.apache.org/solr/SchemaXml#Similarity
+    -->
+  <!--
+     <similarity class="com.example.solr.CustomSimilarityFactory">
+       <str name="paramkey">param value</str>
+     </similarity>
+    -->
+
+</schema>
diff --git a/solr/example/example-schemaless/solr/collection1/conf/solrconfig.xml b/solr/example/example-schemaless/solr/collection1/conf/solrconfig.xml
new file mode 100755
index 0000000..e6f9522
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/solrconfig.xml
@@ -0,0 +1,1888 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- 
+     For more details about configurations options that may appear in
+     this file, see http://wiki.apache.org/solr/SolrConfigXml. 
+-->
+<config>
+  <!-- In all configuration below, a prefix of "solr." for class names
+       is an alias that causes solr to search appropriate packages,
+       including org.apache.solr.(search|update|request|core|analysis)
+
+       You may also specify a fully qualified Java classname if you
+       have your own custom plugins.
+    -->
+
+  <!-- Controls what version of Lucene various components of Solr
+       adhere to.  Generally, you want to use the latest version to
+       get all bug fixes and improvements. It is highly recommended
+       that you fully re-index after changing this setting as it can
+       affect both how text is indexed and queried.
+  -->
+  <luceneMatchVersion>LUCENE_50</luceneMatchVersion>
+
+  <!-- <lib/> directives can be used to instruct Solr to load an Jars
+       identified and use them to resolve any "plugins" specified in
+       your solrconfig.xml or schema.xml (ie: Analyzers, Request
+       Handlers, etc...).
+
+       All directories and paths are resolved relative to the
+       instanceDir.
+
+       Please note that <lib/> directives are processed in the order
+       that they appear in your solrconfig.xml file, and are "stacked" 
+       on top of each other when building a ClassLoader - so if you have 
+       plugin jars with dependencies on other jars, the "lower level" 
+       dependency jars should be loaded first.
+
+       If a "./lib" directory exists in your instanceDir, all files
+       found in it are included as if you had used the following
+       syntax...
+       
+              <lib dir="./lib" />
+    -->
+
+  <!-- A 'dir' option by itself adds any files found in the directory 
+       to the classpath, this is useful for including all jars in a
+       directory.
+
+       When a 'regex' is specified in addition to a 'dir', only the
+       files in that directory which completely match the regex
+       (anchored on both ends) will be included.
+
+       If a 'dir' option (with or without a regex) is used and nothing
+       is found that matches, a warning will be logged.
+
+       The examples below can be used to load some solr-contribs along 
+       with their external dependencies.
+    -->
+  <lib dir="../../../contrib/extraction/lib" regex=".*\.jar" />
+  <lib dir="../../../dist/" regex="solr-cell-\d.*\.jar" />
+
+  <lib dir="../../../contrib/clustering/lib/" regex=".*\.jar" />
+  <lib dir="../../../dist/" regex="solr-clustering-\d.*\.jar" />
+
+  <lib dir="../../../contrib/langid/lib/" regex=".*\.jar" />
+  <lib dir="../../../dist/" regex="solr-langid-\d.*\.jar" />
+
+  <lib dir="../../../contrib/velocity/lib" regex=".*\.jar" />
+  <lib dir="../../../dist/" regex="solr-velocity-\d.*\.jar" />
+
+  <!-- an exact 'path' can be used instead of a 'dir' to specify a 
+       specific jar file.  This will cause a serious error to be logged 
+       if it can't be loaded.
+    -->
+  <!--
+     <lib path="../a-jar-that-does-not-exist.jar" /> 
+  -->
+
+  <!-- Data Directory
+
+       Used to specify an alternate directory to hold all index data
+       other than the default ./data under the Solr home.  If
+       replication is in use, this should match the replication
+       configuration.
+    -->
+  <dataDir>${solr.data.dir:}</dataDir>
+
+
+  <!-- The DirectoryFactory to use for indexes.
+       
+       solr.StandardDirectoryFactory is filesystem
+       based and tries to pick the best implementation for the current
+       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,
+       wraps solr.StandardDirectoryFactory and caches small files in memory
+       for better NRT performance.
+
+       One can force a particular implementation via solr.MMapDirectoryFactory,
+       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.
+
+       solr.RAMDirectoryFactory is memory based, not
+       persistent, and doesn't work with replication.
+    -->
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
+
+  <!-- The CodecFactory for defining the format of the inverted index.
+       The default implementation is SchemaCodecFactory, which is the official Lucene
+       index format, but hooks into the schema to provide per-field customization of
+       the postings lists and per-document values in the fieldType element
+       (postingsFormat/docValuesFormat). Note that most of the alternative implementations
+       are experimental, so if you choose to customize the index format, its a good
+       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
+       before upgrading to a newer version to avoid unnecessary reindexing.
+  -->
+  <codecFactory class="solr.SchemaCodecFactory"/>
+
+  <!-- To disable dynamic schema REST APIs, use the following for <schemaFactory>:
+  
+       <schemaFactory class="ClassicIndexSchemaFactory"/>
+
+       When ManagedIndexSchemaFactory is specified instead, Solr will load the schema from
+       he resource named in 'managedSchemaResourceName', rather than from schema.xml.
+       Note that the managed schema resource CANNOT be named schema.xml.  If the managed
+       schema does not exist, Solr will create it after reading schema.xml, then rename
+       'schema.xml' to 'schema.xml.bak'. 
+       
+       Do NOT hand edit the managed schema - external modifications will be ignored and
+       overwritten as a result of schema modification REST API calls.
+
+       When ManagedIndexSchemaFactory is specified with mutable = true, schema
+       modification REST API calls will be allowed; otherwise, error responses will be
+       sent back for these requests. 
+  -->
+  <schemaFactory class="ManagedIndexSchemaFactory">
+    <bool name="mutable">true</bool>
+    <str name="managedSchemaResourceName">managed-schema</str>
+  </schemaFactory>
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Index Config - These settings control low-level behavior of indexing
+       Most example settings here show the default value, but are commented
+       out, to more easily see where customizations have been made.
+       
+       Note: This replaces <indexDefaults> and <mainIndex> from older versions
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <indexConfig>
+    <!-- maxFieldLength was removed in 4.0. To get similar behavior, include a 
+         LimitTokenCountFilterFactory in your fieldType definition. E.g. 
+     <filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
+    -->
+    <!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->
+    <!-- <writeLockTimeout>1000</writeLockTimeout>  -->
+
+    <!-- The maximum number of simultaneous threads that may be
+         indexing documents at once in IndexWriter; if more than this
+         many threads arrive they will wait for others to finish.
+         Default in Solr/Lucene is 8. -->
+    <!-- <maxIndexingThreads>8</maxIndexingThreads>  -->
+
+    <!-- Expert: Enabling compound file will use less files for the index, 
+         using fewer file descriptors on the expense of performance decrease. 
+         Default in Lucene is "true". Default in Solr is "false" (since 3.6) -->
+    <!-- <useCompoundFile>false</useCompoundFile> -->
+
+    <!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene
+         indexing for buffering added documents and deletions before they are
+         flushed to the Directory.
+         maxBufferedDocs sets a limit on the number of documents buffered
+         before flushing.
+         If both ramBufferSizeMB and maxBufferedDocs is set, then
+         Lucene will flush based on whichever limit is hit first.  -->
+    <!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
+    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
+
+    <!-- Expert: Merge Policy 
+         The Merge Policy in Lucene controls how merging of segments is done.
+         The default since Solr/Lucene 3.3 is TieredMergePolicy.
+         The default since Lucene 2.3 was the LogByteSizeMergePolicy,
+         Even older versions of Lucene used LogDocMergePolicy.
+      -->
+    <!--
+        <mergePolicy class="org.apache.lucene.index.TieredMergePolicy">
+          <int name="maxMergeAtOnce">10</int>
+          <int name="segmentsPerTier">10</int>
+        </mergePolicy>
+      -->
+
+    <!-- Merge Factor
+         The merge factor controls how many segments will get merged at a time.
+         For TieredMergePolicy, mergeFactor is a convenience parameter which
+         will set both MaxMergeAtOnce and SegmentsPerTier at once.
+         For LogByteSizeMergePolicy, mergeFactor decides how many new segments
+         will be allowed before they are merged into one.
+         Default is 10 for both merge policies.
+      -->
+    <!-- 
+    <mergeFactor>10</mergeFactor>
+      -->
+
+    <!-- Expert: Merge Scheduler
+         The Merge Scheduler in Lucene controls how merges are
+         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)
+         can perform merges in the background using separate threads.
+         The SerialMergeScheduler (Lucene 2.2 default) does not.
+     -->
+    <!-- 
+       <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
+       -->
+
+    <!-- LockFactory 
+
+         This option specifies which Lucene LockFactory implementation
+         to use.
+      
+         single = SingleInstanceLockFactory - suggested for a
+                  read-only index or when there is no possibility of
+                  another process trying to modify the index.
+         native = NativeFSLockFactory - uses OS native file locking.
+                  Do not use when multiple solr webapps in the same
+                  JVM are attempting to share a single index.
+         simple = SimpleFSLockFactory  - uses a plain file for locking
+
+         Defaults: 'native' is default for Solr3.6 and later, otherwise
+                   'simple' is the default
+
+         More details on the nuances of each LockFactory...
+         http://wiki.apache.org/lucene-java/AvailableLockFactories
+    -->
+    <lockType>${solr.lock.type:native}</lockType>
+
+    <!-- Unlock On Startup
+
+         If true, unlock any held write or commit locks on startup.
+         This defeats the locking mechanism that allows multiple
+         processes to safely access a lucene index, and should be used
+         with care. Default is "false".
+
+         This is not needed if lock type is 'single'
+     -->
+    <!--
+    <unlockOnStartup>false</unlockOnStartup>
+      -->
+
+    <!-- Expert: Controls how often Lucene loads terms into memory
+         Default is 128 and is likely good for most everyone.
+      -->
+    <!-- <termIndexInterval>128</termIndexInterval> -->
+
+    <!-- If true, IndexReaders will be reopened (often more efficient)
+         instead of closed and then opened. Default: true
+      -->
+    <!-- 
+    <reopenReaders>true</reopenReaders>
+      -->
+
+    <!-- Commit Deletion Policy
+         Custom deletion policies can be specified here. The class must
+         implement org.apache.lucene.index.IndexDeletionPolicy.
+
+         The default Solr IndexDeletionPolicy implementation supports
+         deleting index commit points on number of commits, age of
+         commit point and optimized status.
+         
+         The latest commit point should always be preserved regardless
+         of the criteria.
+    -->
+    <!-- 
+    <deletionPolicy class="solr.SolrDeletionPolicy">
+    -->
+    <!-- The number of commit points to be kept -->
+    <!-- <str name="maxCommitsToKeep">1</str> -->
+    <!-- The number of optimized commit points to be kept -->
+    <!-- <str name="maxOptimizedCommitsToKeep">0</str> -->
+    <!--
+        Delete all commit points once they have reached the given age.
+        Supports DateMathParser syntax e.g.
+      -->
+    <!--
+       <str name="maxCommitAge">30MINUTES</str>
+       <str name="maxCommitAge">1DAY</str>
+    -->
+    <!-- 
+    </deletionPolicy>
+    -->
+
+    <!-- Lucene Infostream
+       
+         To aid in advanced debugging, Lucene provides an "InfoStream"
+         of detailed information when indexing.
+
+         Setting The value to true will instruct the underlying Lucene
+         IndexWriter to write its debugging info the specified file
+      -->
+    <!-- <infoStream file="INFOSTREAM.txt">false</infoStream> -->
+  </indexConfig>
+
+
+  <!-- JMX
+       
+       This example enables JMX if and only if an existing MBeanServer
+       is found, use this if you want to configure JMX through JVM
+       parameters. Remove this to disable exposing Solr configuration
+       and statistics to JMX.
+
+       For more details see http://wiki.apache.org/solr/SolrJmx
+    -->
+  <jmx />
+  <!-- If you want to connect to a particular server, specify the
+       agentId 
+    -->
+  <!-- <jmx agentId="myAgent" /> -->
+  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
+  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
+    -->
+
+  <!-- The default high-performance update handler -->
+  <updateHandler class="solr.DirectUpdateHandler2">
+
+    <!-- Enables a transaction log, used for real-time get, durability, and
+         and solr cloud replica recovery.  The log can grow as big as
+         uncommitted changes to the index, so use of a hard autoCommit
+         is recommended (see below).
+         "dir" - the target directory for transaction logs, defaults to the
+                solr data directory.  -->
+    <updateLog>
+      <str name="dir">${solr.ulog.dir:}</str>
+    </updateLog>
+
+    <!-- AutoCommit
+
+         Perform a hard commit automatically under certain conditions.
+         Instead of enabling autoCommit, consider using "commitWithin"
+         when adding documents. 
+
+         http://wiki.apache.org/solr/UpdateXmlMessages
+
+         maxDocs - Maximum number of documents to add since the last
+                   commit before automatically triggering a new commit.
+
+         maxTime - Maximum amount of time in ms that is allowed to pass
+                   since a document was added before automatically
+                   triggering a new commit. 
+         openSearcher - if false, the commit causes recent index changes
+           to be flushed to stable storage, but does not cause a new
+           searcher to be opened to make those changes visible.
+
+         If the updateLog is enabled, then it's highly recommended to
+         have some sort of hard autoCommit to limit the log size.
+      -->
+    <autoCommit>
+      <maxTime>15000</maxTime>
+      <openSearcher>false</openSearcher>
+    </autoCommit>
+
+    <!-- softAutoCommit is like autoCommit except it causes a
+         'soft' commit which only ensures that changes are visible
+         but does not ensure that data is synced to disk.  This is
+         faster and more near-realtime friendly than a hard commit.
+      -->
+    <!--
+      <autoSoftCommit> 
+        <maxTime>1000</maxTime> 
+      </autoSoftCommit>
+     -->
+
+    <!-- Update Related Event Listeners
+         
+         Various IndexWriter related events can trigger Listeners to
+         take actions.
+
+         postCommit - fired after every commit or optimize command
+         postOptimize - fired after every optimize command
+      -->
+    <!-- The RunExecutableListener executes an external command from a
+         hook such as postCommit or postOptimize.
+         
+         exe - the name of the executable to run
+         dir - dir to use as the current working directory. (default=".")
+         wait - the calling thread waits until the executable returns. 
+                (default="true")
+         args - the arguments to pass to the program.  (default is none)
+         env - environment variables to set.  (default is none)
+      -->
+    <!-- This example shows how RunExecutableListener could be used
+         with the script based replication...
+         http://wiki.apache.org/solr/CollectionDistribution
+      -->
+    <!--
+       <listener event="postCommit" class="solr.RunExecutableListener">
+         <str name="exe">solr/bin/snapshooter</str>
+         <str name="dir">.</str>
+         <bool name="wait">true</bool>
+         <arr name="args"> <str>arg1</str> <str>arg2</str> </arr>
+         <arr name="env"> <str>MYVAR=val1</str> </arr>
+       </listener>
+      -->
+
+  </updateHandler>
+
+  <!-- IndexReaderFactory
+
+       Use the following format to specify a custom IndexReaderFactory,
+       which allows for alternate IndexReader implementations.
+
+       ** Experimental Feature **
+
+       Please note - Using a custom IndexReaderFactory may prevent
+       certain other features from working. The API to
+       IndexReaderFactory may change without warning or may even be
+       removed from future releases if the problems cannot be
+       resolved.
+
+
+       ** Features that may not work with custom IndexReaderFactory **
+
+       The ReplicationHandler assumes a disk-resident index. Using a
+       custom IndexReader implementation may cause incompatibility
+       with ReplicationHandler and may cause replication to not work
+       correctly. See SOLR-1366 for details.
+
+    -->
+  <!--
+  <indexReaderFactory name="IndexReaderFactory" class="package.class">
+    <str name="someArg">Some Value</str>
+  </indexReaderFactory >
+  -->
+  <!-- By explicitly declaring the Factory, the termIndexDivisor can
+       be specified.
+    -->
+  <!--
+     <indexReaderFactory name="IndexReaderFactory" 
+                         class="solr.StandardIndexReaderFactory">
+       <int name="setTermIndexDivisor">12</int>
+     </indexReaderFactory >
+    -->
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Query section - these settings control query time things like caches
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <query>
+    <!-- Max Boolean Clauses
+
+         Maximum number of clauses in each BooleanQuery,  an exception
+         is thrown if exceeded.
+
+         ** WARNING **
+         
+         This option actually modifies a global Lucene property that
+         will affect all SolrCores.  If multiple solrconfig.xml files
+         disagree on this property, the value at any given moment will
+         be based on the last SolrCore to be initialized.
+         
+      -->
+    <maxBooleanClauses>1024</maxBooleanClauses>
+
+
+    <!-- Solr Internal Query Caches
+
+         There are two implementations of cache available for Solr,
+         LRUCache, based on a synchronized LinkedHashMap, and
+         FastLRUCache, based on a ConcurrentHashMap.  
+
+         FastLRUCache has faster gets and slower puts in single
+         threaded operation and thus is generally faster than LRUCache
+         when the hit ratio of the cache is high (> 75%), and may be
+         faster under other scenarios on multi-cpu systems.
+    -->
+
+    <!-- Filter Cache
+
+         Cache used by SolrIndexSearcher for filters (DocSets),
+         unordered sets of *all* documents that match a query.  When a
+         new searcher is opened, its caches may be prepopulated or
+         "autowarmed" using data from caches in the old searcher.
+         autowarmCount is the number of items to prepopulate.  For
+         LRUCache, the autowarmed items will be the most recently
+         accessed items.
+
+         Parameters:
+           class - the SolrCache implementation LRUCache or
+               (LRUCache or FastLRUCache)
+           size - the maximum number of entries in the cache
+           initialSize - the initial capacity (number of entries) of
+               the cache.  (see java.util.HashMap)
+           autowarmCount - the number of entries to prepopulate from
+               and old cache.  
+      -->
+    <filterCache class="solr.FastLRUCache"
+                 size="512"
+                 initialSize="512"
+                 autowarmCount="0"/>
+
+    <!-- Query Result Cache
+         
+         Caches results of searches - ordered lists of document ids
+         (DocList) based on a query, a sort, and the range of documents requested.  
+      -->
+    <queryResultCache class="solr.LRUCache"
+                      size="512"
+                      initialSize="512"
+                      autowarmCount="0"/>
+
+    <!-- Document Cache
+
+         Caches Lucene Document objects (the stored fields for each
+         document).  Since Lucene internal document ids are transient,
+         this cache will not be autowarmed.  
+      -->
+    <documentCache class="solr.LRUCache"
+                   size="512"
+                   initialSize="512"
+                   autowarmCount="0"/>
+
+    <!-- Field Value Cache
+         
+         Cache used to hold field values that are quickly accessible
+         by document id.  The fieldValueCache is created by default
+         even if not configured here.
+      -->
+    <!--
+       <fieldValueCache class="solr.FastLRUCache"
+                        size="512"
+                        autowarmCount="128"
+                        showItems="32" />
+      -->
+
+    <!-- Custom Cache
+
+         Example of a generic cache.  These caches may be accessed by
+         name through SolrIndexSearcher.getCache(),cacheLookup(), and
+         cacheInsert().  The purpose is to enable easy caching of
+         user/application level data.  The regenerator argument should
+         be specified as an implementation of solr.CacheRegenerator 
+         if autowarming is desired.  
+      -->
+    <!--
+       <cache name="myUserCache"
+              class="solr.LRUCache"
+              size="4096"
+              initialSize="1024"
+              autowarmCount="1024"
+              regenerator="com.mycompany.MyRegenerator"
+              />
+      -->
+
+
+    <!-- Lazy Field Loading
+
+         If true, stored fields that are not requested will be loaded
+         lazily.  This can result in a significant speed improvement
+         if the usual case is to not load all stored fields,
+         especially if the skipped fields are large compressed text
+         fields.
+    -->
+    <enableLazyFieldLoading>true</enableLazyFieldLoading>
+
+    <!-- Use Filter For Sorted Query
+ 
+         A possible optimization that attempts to use a filter to
+         satisfy a search.  If the requested sort does not include
+         score, then the filterCache will be checked for a filter
+         matching the query. If found, the filter will be used as the
+         source of document ids, and then the sort will be applied to
+         that.
+ 
+         For most situations, this will not be useful unless you
+         frequently get the same search repeatedly with different sort
+         options, and none of them ever use "score"
+      -->
+    <!--
+       <useFilterForSortedQuery>true</useFilterForSortedQuery>
+      -->
+
+    <!-- Result Window Size
+ 
+         An optimization for use with the queryResultCache.  When a search
+         is requested, a superset of the requested number of document ids
+         are collected.  For example, if a search for a particular query
+         requests matching documents 10 through 19, and queryWindowSize is 50,
+         then documents 0 through 49 will be collected and cached.  Any further
+         requests in that range can be satisfied via the cache.  
+      -->
+    <queryResultWindowSize>20</queryResultWindowSize>
+
+    <!-- Maximum number of documents to cache for any entry in the
+         queryResultCache. 
+      -->
+    <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
+
+    <!-- Query Related Event Listeners
+ 
+         Various IndexSearcher related events can trigger Listeners to
+         take actions.
+ 
+         newSearcher - fired whenever a new searcher is being prepared
+         and there is a current searcher handling requests (aka
+         registered).  It can be used to prime certain caches to
+         prevent long request times for certain requests.
+ 
+         firstSearcher - fired whenever a new searcher is being
+         prepared but there is no current registered searcher to handle
+         requests or to gain autowarming data from.
+ 
+         
+      -->
+    <!-- QuerySenderListener takes an array of NamedList and executes a
+         local query request for each NamedList in sequence. 
+      -->
+    <listener event="newSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <!--
+           <lst><str name="q">solr</str><str name="sort">price asc</str></lst>
+           <lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
+          -->
+      </arr>
+    </listener>
+    <listener event="firstSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <!--
+        <lst>
+          <str name="q">static firstSearcher warming in solrconfig.xml</str>
+        </lst>
+        -->
+      </arr>
+    </listener>
+
+    <!-- Use Cold Searcher
+
+         If a search request comes in and there is no current
+         registered searcher, then immediately register the still
+         warming searcher and use it.  If "false" then all requests
+         will block until the first searcher is done warming.
+      -->
+    <useColdSearcher>false</useColdSearcher>
+
+    <!-- Max Warming Searchers
+         
+         Maximum number of searchers that may be warming in the
+         background concurrently.  An error is returned if this limit
+         is exceeded.
+
+         Recommend values of 1-2 for read-only slaves, higher for
+         masters w/o cache warming.
+      -->
+    <maxWarmingSearchers>2</maxWarmingSearchers>
+
+  </query>
+
+
+  <!-- Request Dispatcher
+
+       This section contains instructions for how the SolrDispatchFilter
+       should behave when processing requests for this SolrCore.
+
+       handleSelect is a legacy option that affects the behavior of requests
+       such as /select?qt=XXX
+
+       handleSelect="true" will cause the SolrDispatchFilter to process
+       the request and dispatch the query to a handler specified by the 
+       "qt" param, assuming "/select" isn't already registered.
+
+       handleSelect="false" will cause the SolrDispatchFilter to
+       ignore "/select" requests, resulting in a 404 unless a handler
+       is explicitly registered with the name "/select"
+
+       handleSelect="true" is not recommended for new users, but is the default
+       for backwards compatibility
+    -->
+  <requestDispatcher handleSelect="false" >
+    <!-- Request Parsing
+
+         These settings indicate how Solr Requests may be parsed, and
+         what restrictions may be placed on the ContentStreams from
+         those requests
+
+         enableRemoteStreaming - enables use of the stream.file
+         and stream.url parameters for specifying remote streams.
+
+         multipartUploadLimitInKB - specifies the max size (in KiB) of
+         Multipart File Uploads that Solr will allow in a Request.
+         
+         formdataUploadLimitInKB - specifies the max size (in KiB) of
+         form data (application/x-www-form-urlencoded) sent via
+         POST. You can use POST to pass request parameters not
+         fitting into the URL.
+         
+         addHttpRequestToContext - if set to true, it will instruct
+         the requestParsers to include the original HttpServletRequest
+         object in the context map of the SolrQueryRequest under the 
+         key "httpRequest". It will not be used by any of the existing
+         Solr components, but may be useful when developing custom 
+         plugins.
+         
+         *** WARNING ***
+         The settings below authorize Solr to fetch remote files, You
+         should make sure your system has some authentication before
+         using enableRemoteStreaming="true"
+
+      -->
+    <requestParsers enableRemoteStreaming="true"
+                    multipartUploadLimitInKB="2048000"
+                    formdataUploadLimitInKB="2048"
+                    addHttpRequestToContext="false"/>
+
+    <!-- HTTP Caching
+
+         Set HTTP caching related parameters (for proxy caches and clients).
+
+         The options below instruct Solr not to output any HTTP Caching
+         related headers
+      -->
+    <httpCaching never304="true" />
+    <!-- If you include a <cacheControl> directive, it will be used to
+         generate a Cache-Control header (as well as an Expires header
+         if the value contains "max-age=")
+         
+         By default, no Cache-Control header is generated.
+         
+         You can use the <cacheControl> option even if you have set
+         never304="true"
+      -->
+    <!--
+       <httpCaching never304="true" >
+         <cacheControl>max-age=30, public</cacheControl> 
+       </httpCaching>
+      -->
+    <!-- To enable Solr to respond with automatically generated HTTP
+         Caching headers, and to response to Cache Validation requests
+         correctly, set the value of never304="false"
+         
+         This will cause Solr to generate Last-Modified and ETag
+         headers based on the properties of the Index.
+
+         The following options can also be specified to affect the
+         values of these headers...
+
+         lastModFrom - the default value is "openTime" which means the
+         Last-Modified value (and validation against If-Modified-Since
+         requests) will all be relative to when the current Searcher
+         was opened.  You can change it to lastModFrom="dirLastMod" if
+         you want the value to exactly correspond to when the physical
+         index was last modified.
+
+         etagSeed="..." is an option you can change to force the ETag
+         header (and validation against If-None-Match requests) to be
+         different even if the index has not changed (ie: when making
+         significant changes to your config file)
+
+         (lastModifiedFrom and etagSeed are both ignored if you use
+         the never304="true" option)
+      -->
+    <!--
+       <httpCaching lastModifiedFrom="openTime"
+                    etagSeed="Solr">
+         <cacheControl>max-age=30, public</cacheControl> 
+       </httpCaching>
+      -->
+  </requestDispatcher>
+
+  <!-- Request Handlers 
+
+       http://wiki.apache.org/solr/SolrRequestHandler
+
+       Incoming queries will be dispatched to a specific handler by name
+       based on the path specified in the request.
+
+       Legacy behavior: If the request path uses "/select" but no Request
+       Handler has that name, and if handleSelect="true" has been specified in
+       the requestDispatcher, then the Request Handler is dispatched based on
+       the qt parameter.  Handlers without a leading '/' are accessed this way
+       like so: http://host/app/[core/]select?qt=name  If no qt is
+       given, then the requestHandler that declares default="true" will be
+       used or the one named "standard".
+
+       If a Request Handler is declared with startup="lazy", then it will
+       not be initialized until the first request that uses it.
+
+    -->
+  <!-- SearchHandler
+
+       http://wiki.apache.org/solr/SearchHandler
+
+       For processing Search Queries, the primary Request Handler
+       provided with Solr is "SearchHandler" It delegates to a sequent
+       of SearchComponents (see below) and supports distributed
+       queries across multiple shards
+    -->
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <!-- default values for query parameters can be specified, these
+         will be overridden by parameters in the request
+      -->
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <int name="rows">10</int>
+      <!-- <str name="df">text</str> -->
+    </lst>
+    <!-- In addition to defaults, "appends" params can be specified
+         to identify values which should be appended to the list of
+         multi-val params from the query (or the existing "defaults").
+      -->
+    <!-- In this example, the param "fq=instock:true" would be appended to
+         any query time fq params the user may specify, as a mechanism for
+         partitioning the index, independent of any user selected filtering
+         that may also be desired (perhaps as a result of faceted searching).
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "appends" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="appends">
+         <str name="fq">inStock:true</str>
+       </lst>
+      -->
+    <!-- "invariants" are a way of letting the Solr maintainer lock down
+         the options available to Solr clients.  Any params values
+         specified here are used regardless of what values may be specified
+         in either the query, the "defaults", or the "appends" params.
+
+         In this example, the facet.field and facet.query params would
+         be fixed, limiting the facets clients can use.  Faceting is
+         not turned on by default - but if the client does specify
+         facet=true in the request, these are the only facets they
+         will be able to see counts for; regardless of what other
+         facet.field or facet.query params they may specify.
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "invariants" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="invariants">
+         <str name="facet.field">cat</str>
+         <str name="facet.field">manu_exact</str>
+         <str name="facet.query">price:[* TO 500]</str>
+         <str name="facet.query">price:[500 TO *]</str>
+       </lst>
+      -->
+    <!-- If the default list of SearchComponents is not desired, that
+         list can either be overridden completely, or components can be
+         prepended or appended to the default list.  (see below)
+      -->
+    <!--
+       <arr name="components">
+         <str>nameOfCustomComponent1</str>
+         <str>nameOfCustomComponent2</str>
+       </arr>
+      -->
+  </requestHandler>
+
+  <!-- A request handler that returns indented JSON by default -->
+  <requestHandler name="/query" class="solr.SearchHandler">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <str name="wt">json</str>
+      <str name="indent">true</str>
+      <str name="df">text</str>
+    </lst>
+  </requestHandler>
+
+
+  <!-- realtime get handler, guaranteed to return the latest stored fields of
+       any document, without the need to commit or open a new searcher.  The
+       current implementation relies on the updateLog feature being enabled. -->
+  <requestHandler name="/get" class="solr.RealTimeGetHandler">
+    <lst name="defaults">
+      <str name="omitHeader">true</str>
+      <str name="wt">json</str>
+      <str name="indent">true</str>
+    </lst>
+  </requestHandler>
+
+
+  <!-- A Robust Example 
+       
+       This example SearchHandler declaration shows off usage of the
+       SearchHandler with many defaults declared
+
+       Note that multiple instances of the same Request Handler
+       (SearchHandler) can be registered multiple times with different
+       names (and different init parameters)
+    -->
+  <requestHandler name="/browse" class="solr.SearchHandler">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+
+      <!-- VelocityResponseWriter settings -->
+      <str name="wt">velocity</str>
+      <str name="v.template">browse</str>
+      <str name="v.layout">layout</str>
+      <str name="title">Solritas</str>
+
+      <!-- Query settings -->
+      <str name="defType">edismax</str>
+      <str name="qf">
+        text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+        title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
+      </str>
+      <str name="df">text</str>
+      <str name="mm">100%</str>
+      <str name="q.alt">*:*</str>
+      <str name="rows">10</str>
+      <str name="fl">*,score</str>
+
+      <str name="mlt.qf">
+        text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+        title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
+      </str>
+      <str name="mlt.fl">text,features,name,sku,id,manu,cat,title,description,keywords,author,resourcename</str>
+      <int name="mlt.count">3</int>
+
+      <!-- Faceting defaults -->
+      <str name="facet">on</str>
+      <str name="facet.field">cat</str>
+      <str name="facet.field">manu_exact</str>
+      <str name="facet.field">content_type</str>
+      <str name="facet.field">author_s</str>
+      <str name="facet.query">ipod</str>
+      <str name="facet.query">GB</str>
+      <str name="facet.mincount">1</str>
+      <str name="facet.pivot">cat,inStock</str>
+      <str name="facet.range.other">after</str>
+      <str name="facet.range">price</str>
+      <int name="f.price.facet.range.start">0</int>
+      <int name="f.price.facet.range.end">600</int>
+      <int name="f.price.facet.range.gap">50</int>
+      <str name="facet.range">popularity</str>
+      <int name="f.popularity.facet.range.start">0</int>
+      <int name="f.popularity.facet.range.end">10</int>
+      <int name="f.popularity.facet.range.gap">3</int>
+      <str name="facet.range">manufacturedate_dt</str>
+      <str name="f.manufacturedate_dt.facet.range.start">NOW/YEAR-10YEARS</str>
+      <str name="f.manufacturedate_dt.facet.range.end">NOW</str>
+      <str name="f.manufacturedate_dt.facet.range.gap">+1YEAR</str>
+      <str name="f.manufacturedate_dt.facet.range.other">before</str>
+      <str name="f.manufacturedate_dt.facet.range.other">after</str>
+
+      <!-- Highlighting defaults -->
+      <str name="hl">on</str>
+      <str name="hl.fl">content features title name</str>
+      <str name="hl.encoder">html</str>
+      <str name="hl.simple.pre">&lt;b&gt;</str>
+      <str name="hl.simple.post">&lt;/b&gt;</str>
+      <str name="f.title.hl.fragsize">0</str>
+      <str name="f.title.hl.alternateField">title</str>
+      <str name="f.name.hl.fragsize">0</str>
+      <str name="f.name.hl.alternateField">name</str>
+      <str name="f.content.hl.snippets">3</str>
+      <str name="f.content.hl.fragsize">200</str>
+      <str name="f.content.hl.alternateField">content</str>
+      <str name="f.content.hl.maxAlternateFieldLength">750</str>
+
+      <!-- Spell checking defaults -->
+      <str name="spellcheck">on</str>
+      <str name="spellcheck.extendedResults">false</str>
+      <str name="spellcheck.count">5</str>
+      <str name="spellcheck.alternativeTermCount">2</str>
+      <str name="spellcheck.maxResultsForSuggest">5</str>
+      <str name="spellcheck.collate">true</str>
+      <str name="spellcheck.collateExtendedResults">true</str>
+      <str name="spellcheck.maxCollationTries">5</str>
+      <str name="spellcheck.maxCollations">3</str>
+    </lst>
+
+    <!-- append spellchecking to our list of components -->
+    <arr name="last-components">
+      <str>spellcheck</str>
+    </arr>
+  </requestHandler>
+
+
+  <!-- Update Request Handler.  
+       
+       http://wiki.apache.org/solr/UpdateXmlMessages
+
+       The canonical Request Handler for Modifying the Index through
+       commands specified using XML, JSON, CSV, or JAVABIN
+
+       Note: Since solr1.1 requestHandlers requires a valid content
+       type header if posted in the body. For example, curl now
+       requires: -H 'Content-type:text/xml; charset=utf-8'
+       
+       To override the request content type and force a specific 
+       Content-type, use the request parameter: 
+         ?update.contentType=text/csv
+       
+       This handler will pick a response format to match the input
+       if the 'wt' parameter is not explicit
+    -->
+  <requestHandler name="/update" class="solr.UpdateRequestHandler">
+    <!-- See below for information on defining 
+         updateRequestProcessorChains that can be used by name 
+         on each Update Request
+      -->
+    <lst name="defaults">
+      <str name="update.chain">add-unknown-fields-to-the-schema</str>
+    </lst>
+  </requestHandler>
+
+  <!-- for back compat with clients using /update/json and /update/csv -->
+  <requestHandler name="/update/json" class="solr.JsonUpdateRequestHandler">
+    <lst name="defaults">
+      <str name="stream.contentType">application/json</str>
+      <str name="update.chain">add-unknown-fields-to-the-schema</str>
+    </lst>
+  </requestHandler>
+  <requestHandler name="/update/csv" class="solr.CSVRequestHandler">
+    <lst name="defaults">
+      <str name="stream.contentType">application/csv</str>
+      <str name="update.chain">add-unknown-fields-to-the-schema</str>
+    </lst>
+  </requestHandler>
+
+  <!-- Solr Cell Update Request Handler
+
+       http://wiki.apache.org/solr/ExtractingRequestHandler 
+
+    -->
+  <requestHandler name="/update/extract"
+                  startup="lazy"
+                  class="solr.extraction.ExtractingRequestHandler" >
+    <lst name="defaults">
+      <str name="lowernames">true</str>
+      <str name="uprefix">ignored_</str>
+
+      <!-- capture link hrefs but ignore div attributes -->
+      <str name="captureAttr">true</str>
+      <str name="fmap.a">links</str>
+      <str name="fmap.div">ignored_</str>
+    </lst>
+  </requestHandler>
+
+
+  <!-- Field Analysis Request Handler
+
+       RequestHandler that provides much the same functionality as
+       analysis.jsp. Provides the ability to specify multiple field
+       types and field names in the same request and outputs
+       index-time and query-time analysis for each of them.
+
+       Request parameters are:
+       analysis.fieldname - field name whose analyzers are to be used
+
+       analysis.fieldtype - field type whose analyzers are to be used
+       analysis.fieldvalue - text for index-time analysis
+       q (or analysis.q) - text for query time analysis
+       analysis.showmatch (true|false) - When set to true and when
+           query analysis is performed, the produced tokens of the
+           field value analysis will be marked as "matched" for every
+           token that is produces by the query analysis
+   -->
+  <requestHandler name="/analysis/field"
+                  startup="lazy"
+                  class="solr.FieldAnalysisRequestHandler" />
+
+
+  <!-- Document Analysis Handler
+
+       http://wiki.apache.org/solr/AnalysisRequestHandler
+
+       An analysis handler that provides a breakdown of the analysis
+       process of provided documents. This handler expects a (single)
+       content stream with the following format:
+
+       <docs>
+         <doc>
+           <field name="id">1</field>
+           <field name="name">The Name</field>
+           <field name="text">The Text Value</field>
+         </doc>
+         <doc>...</doc>
+         <doc>...</doc>
+         ...
+       </docs>
+
+    Note: Each document must contain a field which serves as the
+    unique key. This key is used in the returned response to associate
+    an analysis breakdown to the analyzed document.
+
+    Like the FieldAnalysisRequestHandler, this handler also supports
+    query analysis by sending either an "analysis.query" or "q"
+    request parameter that holds the query text to be analyzed. It
+    also supports the "analysis.showmatch" parameter which when set to
+    true, all field tokens that match the query tokens will be marked
+    as a "match". 
+  -->
+  <requestHandler name="/analysis/document"
+                  class="solr.DocumentAnalysisRequestHandler"
+                  startup="lazy" />
+
+  <!-- Admin Handlers
+
+       Admin Handlers - This will register all the standard admin
+       RequestHandlers.  
+    -->
+  <requestHandler name="/admin/"
+                  class="solr.admin.AdminHandlers" />
+  <!-- This single handler is equivalent to the following... -->
+  <!--
+     <requestHandler name="/admin/luke"       class="solr.admin.LukeRequestHandler" />
+     <requestHandler name="/admin/system"     class="solr.admin.SystemInfoHandler" />
+     <requestHandler name="/admin/plugins"    class="solr.admin.PluginInfoHandler" />
+     <requestHandler name="/admin/threads"    class="solr.admin.ThreadDumpHandler" />
+     <requestHandler name="/admin/properties" class="solr.admin.PropertiesRequestHandler" />
+     <requestHandler name="/admin/file"       class="solr.admin.ShowFileRequestHandler" >
+    -->
+  <!-- If you wish to hide files under ${solr.home}/conf, explicitly
+       register the ShowFileRequestHandler using: 
+    -->
+  <!--
+     <requestHandler name="/admin/file" 
+                     class="solr.admin.ShowFileRequestHandler" >
+       <lst name="invariants">
+         <str name="hidden">synonyms.txt</str> 
+         <str name="hidden">anotherfile.txt</str> 
+       </lst>
+     </requestHandler>
+    -->
+
+  <!-- ping/healthcheck -->
+  <requestHandler name="/admin/ping" class="solr.PingRequestHandler">
+    <lst name="invariants">
+      <str name="q">solrpingquery</str>
+    </lst>
+    <lst name="defaults">
+      <str name="echoParams">all</str>
+    </lst>
+    <!-- An optional feature of the PingRequestHandler is to configure the 
+         handler with a "healthcheckFile" which can be used to enable/disable 
+         the PingRequestHandler.
+         relative paths are resolved against the data dir 
+      -->
+    <!-- <str name="healthcheckFile">server-enabled.txt</str> -->
+  </requestHandler>
+
+  <!-- Echo the request contents back to the client -->
+  <requestHandler name="/debug/dump" class="solr.DumpRequestHandler" >
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <str name="echoHandler">true</str>
+    </lst>
+  </requestHandler>
+
+  <!-- Solr Replication
+
+       The SolrReplicationHandler supports replicating indexes from a
+       "master" used for indexing and "slaves" used for queries.
+
+       http://wiki.apache.org/solr/SolrReplication 
+
+       It is also necessary for SolrCloud to function (in Cloud mode, the
+       replication handler is used to bulk transfer segments when nodes 
+       are added or need to recover).
+
+       https://wiki.apache.org/solr/SolrCloud/
+    -->
+  <requestHandler name="/replication" class="solr.ReplicationHandler" >
+    <!--
+       To enable simple master/slave replication, uncomment one of the 
+       sections below, depending on whether this solr instance should be
+       the "master" or a "slave".  If this instance is a "slave" you will 
+       also need to fill in the masterUrl to point to a real machine.
+    -->
+    <!--
+       <lst name="master">
+         <str name="replicateAfter">commit</str>
+         <str name="replicateAfter">startup</str>
+         <str name="confFiles">schema.xml,stopwords.txt</str>
+       </lst>
+    -->
+    <!--
+       <lst name="slave">
+         <str name="masterUrl">http://your-master-hostname:8983/solr</str>
+         <str name="pollInterval">00:00:60</str>
+       </lst>
+    -->
+  </requestHandler>
+
+  <!-- Search Components
+
+       Search components are registered to SolrCore and used by 
+       instances of SearchHandler (which can access them by name)
+       
+       By default, the following components are available:
+       
+       <searchComponent name="query"     class="solr.QueryComponent" />
+       <searchComponent name="facet"     class="solr.FacetComponent" />
+       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
+       <searchComponent name="highlight" class="solr.HighlightComponent" />
+       <searchComponent name="stats"     class="solr.StatsComponent" />
+       <searchComponent name="debug"     class="solr.DebugComponent" />
+   
+       Default configuration in a requestHandler would look like:
+
+       <arr name="components">
+         <str>query</str>
+         <str>facet</str>
+         <str>mlt</str>
+         <str>highlight</str>
+         <str>stats</str>
+         <str>debug</str>
+       </arr>
+
+       If you register a searchComponent to one of the standard names, 
+       that will be used instead of the default.
+
+       To insert components before or after the 'standard' components, use:
+    
+       <arr name="first-components">
+         <str>myFirstComponentName</str>
+       </arr>
+    
+       <arr name="last-components">
+         <str>myLastComponentName</str>
+       </arr>
+
+       NOTE: The component registered with the name "debug" will
+       always be executed after the "last-components" 
+       
+     -->
+
+  <!-- Spell Check
+
+       The spell check component can return a list of alternative spelling
+       suggestions.  
+
+       http://wiki.apache.org/solr/SpellCheckComponent
+    -->
+  <searchComponent name="spellcheck" class="solr.SpellCheckComponent">
+
+    <str name="queryAnalyzerFieldType">text_general</str>
+
+    <!-- Multiple "Spell Checkers" can be declared and used by this
+         component
+      -->
+
+    <!-- a spellchecker built from a field of the main index -->
+    <lst name="spellchecker">
+      <str name="name">default</str>
+      <str name="field">text</str>
+      <str name="classname">solr.DirectSolrSpellChecker</str>
+      <!-- the spellcheck distance measure used, the default is the internal levenshtein -->
+      <str name="distanceMeasure">internal</str>
+      <!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
+      <float name="accuracy">0.5</float>
+      <!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
+      <int name="maxEdits">2</int>
+      <!-- the minimum shared prefix when enumerating terms -->
+      <int name="minPrefix">1</int>
+      <!-- maximum number of inspections per result. -->
+      <int name="maxInspections">5</int>
+      <!-- minimum length of a query term to be considered for correction -->
+      <int name="minQueryLength">4</int>
+      <!-- maximum threshold of documents a query term can appear to be considered for correction -->
+      <float name="maxQueryFrequency">0.01</float>
+      <!-- uncomment this to require suggestions to occur in 1% of the documents
+      	<float name="thresholdTokenFrequency">.01</float>
+      -->
+    </lst>
+
+    <!-- a spellchecker that can break or combine words.  See "/spell" handler below for usage -->
+    <lst name="spellchecker">
+      <str name="name">wordbreak</str>
+      <str name="classname">solr.WordBreakSolrSpellChecker</str>
+      <str name="field">name</str>
+      <str name="combineWords">true</str>
+      <str name="breakWords">true</str>
+      <int name="maxChanges">10</int>
+    </lst>
+
+    <!-- a spellchecker that uses a different distance measure -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">jarowinkler</str>
+         <str name="field">spell</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="distanceMeasure">
+           org.apache.lucene.search.spell.JaroWinklerDistance
+         </str>
+       </lst>
+     -->
+
+    <!-- a spellchecker that use an alternate comparator 
+
+         comparatorClass be one of:
+          1. score (default)
+          2. freq (Frequency first, then score)
+          3. A fully qualified class name
+      -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">freq</str>
+         <str name="field">lowerfilt</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="comparatorClass">freq</str>
+      -->
+
+    <!-- A spellchecker that reads the list of words from a file -->
+    <!--
+       <lst name="spellchecker">
+         <str name="classname">solr.FileBasedSpellChecker</str>
+         <str name="name">file</str>
+         <str name="sourceLocation">spellings.txt</str>
+         <str name="characterEncoding">UTF-8</str>
+         <str name="spellcheckIndexDir">spellcheckerFile</str>
+       </lst>
+      -->
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the spellcheck component.  
+
+       NOTE: This is purely as an example.  The whole purpose of the
+       SpellCheckComponent is to hook it into the request handler that
+       handles your normal user queries so that a separate request is
+       not needed to get suggestions.
+
+       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS
+       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!
+       
+       See http://wiki.apache.org/solr/SpellCheckComponent for details
+       on the request parameters.
+    -->
+  <requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="df">text</str>
+      <!-- Solr will use suggestions from both the 'default' spellchecker
+           and from the 'wordbreak' spellchecker and combine them.
+           collations (re-written queries) can include a combination of
+           corrections from both spellcheckers -->
+      <str name="spellcheck.dictionary">default</str>
+      <str name="spellcheck.dictionary">wordbreak</str>
+      <str name="spellcheck">on</str>
+      <str name="spellcheck.extendedResults">true</str>
+      <str name="spellcheck.count">10</str>
+      <str name="spellcheck.alternativeTermCount">5</str>
+      <str name="spellcheck.maxResultsForSuggest">5</str>
+      <str name="spellcheck.collate">true</str>
+      <str name="spellcheck.collateExtendedResults">true</str>
+      <str name="spellcheck.maxCollationTries">10</str>
+      <str name="spellcheck.maxCollations">5</str>
+    </lst>
+    <arr name="last-components">
+      <str>spellcheck</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Term Vector Component
+
+       http://wiki.apache.org/solr/TermVectorComponent
+    -->
+  <searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
+
+  <!-- A request handler for demonstrating the term vector component
+
+       This is purely as an example.
+
+       In reality you will likely want to add the component to your 
+       already specified request handlers. 
+    -->
+  <requestHandler name="/tvrh" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="df">text</str>
+      <bool name="tv">true</bool>
+    </lst>
+    <arr name="last-components">
+      <str>tvComponent</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Clustering Component
+
+       http://wiki.apache.org/solr/ClusteringComponent
+
+       You'll need to set the solr.clustering.enabled system property
+       when running solr to run with clustering enabled:
+
+            java -Dsolr.clustering.enabled=true -jar start.jar
+
+    -->
+  <searchComponent name="clustering"
+                   enable="${solr.clustering.enabled:false}"
+                   class="solr.clustering.ClusteringComponent" >
+    <!-- Declare an engine -->
+    <lst name="engine">
+      <!-- The name, only one can be named "default" -->
+      <str name="name">default</str>
+
+      <!-- Class name of Carrot2 clustering algorithm.
+
+           Currently available algorithms are:
+           
+           * org.carrot2.clustering.lingo.LingoClusteringAlgorithm
+           * org.carrot2.clustering.stc.STCClusteringAlgorithm
+           * org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm
+           
+           See http://project.carrot2.org/algorithms.html for the
+           algorithm's characteristics.
+        -->
+      <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
+
+      <!-- Overriding values for Carrot2 default algorithm attributes.
+
+           For a description of all available attributes, see:
+           http://download.carrot2.org/stable/manual/#chapter.components.
+           Use attribute key as name attribute of str elements
+           below. These can be further overridden for individual
+           requests by specifying attribute key as request parameter
+           name and attribute value as parameter value.
+        -->
+      <str name="LingoClusteringAlgorithm.desiredClusterCountBase">20</str>
+
+      <!-- Location of Carrot2 lexical resources.
+
+           A directory from which to load Carrot2-specific stop words
+           and stop labels. Absolute or relative to Solr config directory.
+           If a specific resource (e.g. stopwords.en) is present in the
+           specified dir, it will completely override the corresponding
+           default one that ships with Carrot2.
+
+           For an overview of Carrot2 lexical resources, see:
+           http://download.carrot2.org/head/manual/#chapter.lexical-resources
+        -->
+      <str name="carrot.lexicalResourcesDir">clustering/carrot2</str>
+
+      <!-- The language to assume for the documents.
+
+           For a list of allowed values, see:
+           http://download.carrot2.org/stable/manual/#section.attribute.lingo.MultilingualClustering.defaultLanguage
+       -->
+      <str name="MultilingualClustering.defaultLanguage">ENGLISH</str>
+    </lst>
+    <lst name="engine">
+      <str name="name">stc</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
+    </lst>
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the clustering component
+
+       This is purely as an example.
+
+       In reality you will likely want to add the component to your 
+       already specified request handlers. 
+    -->
+  <requestHandler name="/clustering"
+                  startup="lazy"
+                  enable="${solr.clustering.enabled:false}"
+                  class="solr.SearchHandler">
+    <lst name="defaults">
+      <bool name="clustering">true</bool>
+      <str name="clustering.engine">default</str>
+      <bool name="clustering.results">true</bool>
+      <!-- The title field -->
+      <str name="carrot.title">name</str>
+      <str name="carrot.url">id</str>
+      <!-- The field to cluster on -->
+      <str name="carrot.snippet">features</str>
+      <!-- produce summaries -->
+      <bool name="carrot.produceSummary">true</bool>
+      <!-- the maximum number of labels per cluster -->
+      <!--<int name="carrot.numDescriptions">5</int>-->
+      <!-- produce sub clusters -->
+      <bool name="carrot.outputSubClusters">false</bool>
+
+      <str name="defType">edismax</str>
+      <str name="qf">
+        text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+      </str>
+      <str name="q.alt">*:*</str>
+      <str name="rows">10</str>
+      <str name="fl">*,score</str>
+    </lst>
+    <arr name="last-components">
+      <str>clustering</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Terms Component
+
+       http://wiki.apache.org/solr/TermsComponent
+
+       A component to return terms and document frequency of those
+       terms
+    -->
+  <searchComponent name="terms" class="solr.TermsComponent"/>
+
+  <!-- A request handler for demonstrating the terms component -->
+  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <bool name="terms">true</bool>
+      <bool name="distrib">false</bool>
+    </lst>
+    <arr name="components">
+      <str>terms</str>
+    </arr>
+  </requestHandler>
+
+
+  <!-- Query Elevation Component
+
+       http://wiki.apache.org/solr/QueryElevationComponent
+
+       a search component that enables you to configure the top
+       results for a given query regardless of the normal lucene
+       scoring.
+    -->
+  <searchComponent name="elevator" class="solr.QueryElevationComponent" >
+    <!-- pick a fieldType to analyze queries -->
+    <str name="queryFieldType">string</str>
+    <str name="config-file">elevate.xml</str>
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the elevator component -->
+  <requestHandler name="/elevate" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <str name="df">text</str>
+    </lst>
+    <arr name="last-components">
+      <str>elevator</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Highlighting Component
+
+       http://wiki.apache.org/solr/HighlightingParameters
+    -->
+  <searchComponent class="solr.HighlightComponent" name="highlight">
+    <highlighting>
+      <!-- Configure the standard fragmenter -->
+      <!-- This could most likely be commented out in the "default" case -->
+      <fragmenter name="gap"
+                  default="true"
+                  class="solr.highlight.GapFragmenter">
+        <lst name="defaults">
+          <int name="hl.fragsize">100</int>
+        </lst>
+      </fragmenter>
+
+      <!-- A regular-expression-based fragmenter 
+           (for sentence extraction) 
+        -->
+      <fragmenter name="regex"
+                  class="solr.highlight.RegexFragmenter">
+        <lst name="defaults">
+          <!-- slightly smaller fragsizes work better because of slop -->
+          <int name="hl.fragsize">70</int>
+          <!-- allow 50% slop on fragment sizes -->
+          <float name="hl.regex.slop">0.5</float>
+          <!-- a basic sentence pattern -->
+          <str name="hl.regex.pattern">[-\w ,/\n\&quot;&apos;]{20,200}</str>
+        </lst>
+      </fragmenter>
+
+      <!-- Configure the standard formatter -->
+      <formatter name="html"
+                 default="true"
+                 class="solr.highlight.HtmlFormatter">
+        <lst name="defaults">
+          <str name="hl.simple.pre"><![CDATA[<em>]]></str>
+          <str name="hl.simple.post"><![CDATA[</em>]]></str>
+        </lst>
+      </formatter>
+
+      <!-- Configure the standard encoder -->
+      <encoder name="html"
+               class="solr.highlight.HtmlEncoder" />
+
+      <!-- Configure the standard fragListBuilder -->
+      <fragListBuilder name="simple"
+                       class="solr.highlight.SimpleFragListBuilder"/>
+
+      <!-- Configure the single fragListBuilder -->
+      <fragListBuilder name="single"
+                       class="solr.highlight.SingleFragListBuilder"/>
+
+      <!-- Configure the weighted fragListBuilder -->
+      <fragListBuilder name="weighted"
+                       default="true"
+                       class="solr.highlight.WeightedFragListBuilder"/>
+
+      <!-- default tag FragmentsBuilder -->
+      <fragmentsBuilder name="default"
+                        default="true"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <!-- 
+        <lst name="defaults">
+          <str name="hl.multiValuedSeparatorChar">/</str>
+        </lst>
+        -->
+      </fragmentsBuilder>
+
+      <!-- multi-colored tag FragmentsBuilder -->
+      <fragmentsBuilder name="colored"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <lst name="defaults">
+          <str name="hl.tag.pre"><![CDATA[
+               <b style="background:yellow">,<b style="background:lawgreen">,
+               <b style="background:aquamarine">,<b style="background:magenta">,
+               <b style="background:palegreen">,<b style="background:coral">,
+               <b style="background:wheat">,<b style="background:khaki">,
+               <b style="background:lime">,<b style="background:deepskyblue">]]></str>
+          <str name="hl.tag.post"><![CDATA[</b>]]></str>
+        </lst>
+      </fragmentsBuilder>
+
+      <boundaryScanner name="default"
+                       default="true"
+                       class="solr.highlight.SimpleBoundaryScanner">
+        <lst name="defaults">
+          <str name="hl.bs.maxScan">10</str>
+          <str name="hl.bs.chars">.,!? &#9;&#10;&#13;</str>
+        </lst>
+      </boundaryScanner>
+
+      <boundaryScanner name="breakIterator"
+                       class="solr.highlight.BreakIteratorBoundaryScanner">
+        <lst name="defaults">
+          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
+          <str name="hl.bs.type">WORD</str>
+          <!-- language and country are used when constructing Locale object.  -->
+          <!-- And the Locale object will be used when getting instance of BreakIterator -->
+          <str name="hl.bs.language">en</str>
+          <str name="hl.bs.country">US</str>
+        </lst>
+      </boundaryScanner>
+    </highlighting>
+  </searchComponent>
+
+  <!-- Update Processors
+
+       Chains of Update Processor Factories for dealing with Update
+       Requests can be declared, and then used by name in Update
+       Request Processors
+
+       http://wiki.apache.org/solr/UpdateRequestProcessor
+
+    -->
+  
+  <!-- Add unknown fields to the schema 
+  
+       An example field type guessing update processor that will
+       attempt to parse string-typed field values as Booleans, Longs,
+       Doubles, or Dates, and then add schema fields with the guessed
+       field types.  
+       
+       This requires that the schema is both managed and mutable, by
+       declaring schemaFactory as ManagedIndexSchemaFactory, with
+       mutable specified as true. 
+       
+       See http://wiki.apache.org/solr/GuessingFieldTypes
+    -->
+  <updateRequestProcessorChain name="add-unknown-fields-to-the-schema">
+    <processor class="solr.RemoveBlankFieldUpdateProcessorFactory"/>
+    <processor class="solr.ParseBooleanFieldUpdateProcessorFactory"/>
+    <processor class="solr.ParseLongFieldUpdateProcessorFactory"/>
+    <processor class="solr.ParseDoubleFieldUpdateProcessorFactory"/>
+    <processor class="solr.ParseDateFieldUpdateProcessorFactory">
+      <arr name="format">
+        <str>yyyy-MM-dd'T'HH:mm:ss.SSSZ</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss,SSSZ</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss.SSS</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss,SSS</str>
+        <str>yyyy-MM-dd'T'HH:mm:ssZ</str>
+        <str>yyyy-MM-dd'T'HH:mm:ss</str>
+        <str>yyyy-MM-dd'T'HH:mmZ</str>
+        <str>yyyy-MM-dd'T'HH:mm</str>
+        <str>yyyy-MM-dd HH:mm:ss.SSSZ</str>
+        <str>yyyy-MM-dd HH:mm:ss,SSSZ</str>
+        <str>yyyy-MM-dd HH:mm:ss.SSS</str>
+        <str>yyyy-MM-dd HH:mm:ss,SSS</str>
+        <str>yyyy-MM-dd HH:mm:ssZ</str>
+        <str>yyyy-MM-dd HH:mm:ss</str>
+        <str>yyyy-MM-dd HH:mmZ</str>
+        <str>yyyy-MM-dd HH:mm</str>
+        <str>yyyy-MM-dd</str>
+      </arr>
+    </processor>
+    <processor class="solr.AddSchemaFieldsUpdateProcessorFactory">
+      <str name="defaultFieldType">text_general</str>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Boolean</str>
+        <str name="fieldType">booleans</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.util.Date</str>
+        <str name="fieldType">tdates</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Long</str>
+        <str name="valueClass">java.lang.Integer</str>
+        <str name="fieldType">tlongs</str>
+      </lst>
+      <lst name="typeMapping">
+        <str name="valueClass">java.lang.Number</str>
+        <str name="fieldType">tdoubles</str>
+      </lst>
+    </processor>
+    <processor class="solr.LogUpdateProcessorFactory"/>
+    <processor class="solr.RunUpdateProcessorFactory"/>
+  </updateRequestProcessorChain>
+
+  <!-- Deduplication
+
+       An example dedup update processor that creates the "id" field
+       on the fly based on the hash code of some other fields.  This
+       example has overwriteDupes set to false since we are using the
+       id field as the signatureField and Solr will maintain
+       uniqueness based on that anyway.  
+       
+    -->
+  <!--
+     <updateRequestProcessorChain name="dedupe">
+       <processor class="solr.processor.SignatureUpdateProcessorFactory">
+         <bool name="enabled">true</bool>
+         <str name="signatureField">id</str>
+         <bool name="overwriteDupes">false</bool>
+         <str name="fields">name,features,cat</str>
+         <str name="signatureClass">solr.processor.Lookup3Signature</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Language identification
+
+       This example update chain identifies the language of the incoming
+       documents using the langid contrib. The detected language is
+       written to field language_s. No field name mapping is done.
+       The fields used for detection are text, title, subject and description,
+       making this example suitable for detecting languages form full-text
+       rich documents injected via ExtractingRequestHandler.
+       See more about langId at http://wiki.apache.org/solr/LanguageDetection
+    -->
+  <!--
+   <updateRequestProcessorChain name="langid">
+     <processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
+       <str name="langid.fl">text,title,subject,description</str>
+       <str name="langid.langField">language_s</str>
+       <str name="langid.fallback">en</str>
+     </processor>
+     <processor class="solr.LogUpdateProcessorFactory" />
+     <processor class="solr.RunUpdateProcessorFactory" />
+   </updateRequestProcessorChain>
+  -->
+
+  <!-- Script update processor
+
+    This example hooks in an update processor implemented using JavaScript.
+
+    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor
+  -->
+  <!--
+    <updateRequestProcessorChain name="script">
+      <processor class="solr.StatelessScriptUpdateProcessorFactory">
+        <str name="script">update-script.js</str>
+        <lst name="params">
+          <str name="config_param">example config parameter</str>
+        </lst>
+      </processor>
+      <processor class="solr.RunUpdateProcessorFactory" />
+    </updateRequestProcessorChain>
+  -->
+
+  <!-- Response Writers
+
+       http://wiki.apache.org/solr/QueryResponseWriter
+
+       Request responses will be written using the writer specified by
+       the 'wt' request parameter matching the name of a registered
+       writer.
+
+       The "default" writer is the default and will be used if 'wt' is
+       not specified in the request.
+    -->
+  <!-- The following response writers are implicitly configured unless
+       overridden...
+    -->
+  <!--
+     <queryResponseWriter name="xml" 
+                          default="true"
+                          class="solr.XMLResponseWriter" />
+     <queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
+     <queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
+     <queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
+     <queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
+     <queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
+     <queryResponseWriter name="csv" class="solr.CSVResponseWriter"/>
+     <queryResponseWriter name="schema.xml" class="solr.SchemaXmlResponseWriter"/>
+    -->
+
+  <queryResponseWriter name="json" class="solr.JSONResponseWriter">
+    <!-- For the purposes of the tutorial, JSON responses are written as
+     plain text so that they are easy to read in *any* browser.
+     If you expect a MIME type of "application/json" just remove this override.
+    -->
+    <str name="content-type">text/plain; charset=UTF-8</str>
+  </queryResponseWriter>
+
+  <!--
+     Custom response writers can be declared as needed...
+    -->
+  <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy"/>
+
+
+  <!-- XSLT response writer transforms the XML output by any xslt file found
+       in Solr's conf/xslt directory.  Changes to xslt files are checked for
+       every xsltCacheLifetimeSeconds.  
+    -->
+  <queryResponseWriter name="xslt" class="solr.XSLTResponseWriter">
+    <int name="xsltCacheLifetimeSeconds">5</int>
+  </queryResponseWriter>
+
+  <!-- Query Parsers
+
+       http://wiki.apache.org/solr/SolrQuerySyntax
+
+       Multiple QParserPlugins can be registered by name, and then
+       used in either the "defType" param for the QueryComponent (used
+       by SearchHandler) or in LocalParams
+    -->
+  <!-- example of registering a query parser -->
+  <!--
+     <queryParser name="myparser" class="com.mycompany.MyQParserPlugin"/>
+    -->
+
+  <!-- Function Parsers
+
+       http://wiki.apache.org/solr/FunctionQuery
+
+       Multiple ValueSourceParsers can be registered by name, and then
+       used as function names when using the "func" QParser.
+    -->
+  <!-- example of registering a custom function parser  -->
+  <!--
+     <valueSourceParser name="myfunc" 
+                        class="com.mycompany.MyValueSourceParser" />
+    -->
+
+
+  <!-- Document Transformers
+       http://wiki.apache.org/solr/DocTransformers
+    -->
+  <!--
+     Could be something like:
+     <transformer name="db" class="com.mycompany.LoadFromDatabaseTransformer" >
+       <int name="connection">jdbc://....</int>
+     </transformer>
+     
+     To add a constant value to all docs, use:
+     <transformer name="mytrans2" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <int name="value">5</int>
+     </transformer>
+     
+     If you want the user to still be able to change it with _value:something_ use this:
+     <transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <double name="defaultValue">5</double>
+     </transformer>
+
+      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The
+      EditorialMarkerFactory will do exactly that:
+     <transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
+    -->
+
+
+  <!-- Legacy config for the admin interface -->
+  <admin>
+    <defaultQuery>*:*</defaultQuery>
+  </admin>
+
+</config>
diff --git a/solr/example/example-schemaless/solr/collection1/conf/stopwords.txt b/solr/example/example-schemaless/solr/collection1/conf/stopwords.txt
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/stopwords.txt
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/solr/example/example-schemaless/solr/collection1/conf/synonyms.txt b/solr/example/example-schemaless/solr/collection1/conf/synonyms.txt
new file mode 100644
index 0000000..7f72128
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/conf/synonyms.txt
@@ -0,0 +1,29 @@
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#-----------------------------------------------------------------------
+#some test synonym mappings unlikely to appear in real input text
+aaafoo => aaabar
+bbbfoo => bbbfoo bbbbar
+cccfoo => cccbar cccbaz
+fooaaa,baraaa,bazaaa
+
+# Some synonym groups specific to this example
+GB,gib,gigabyte,gigabytes
+MB,mib,megabyte,megabytes
+Television, Televisions, TV, TVs
+#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
+#after us won't split it into two words.
+
+# Synonym mappings can be used for spelling correction too
+pixima => pixma
+
diff --git a/solr/example/example-schemaless/solr/collection1/core.properties b/solr/example/example-schemaless/solr/collection1/core.properties
new file mode 100644
index 0000000..bc0cf7d
--- /dev/null
+++ b/solr/example/example-schemaless/solr/collection1/core.properties
@@ -0,0 +1 @@
+name=collection1
\ No newline at end of file
diff --git a/solr/example/resources/log4j.properties b/solr/example/resources/log4j.properties
index 93dc62a..f33fa71 100644
--- a/solr/example/resources/log4j.properties
+++ b/solr/example/resources/log4j.properties
@@ -1,4 +1,5 @@
 #  Logging level
+solr.log=logs/
 log4j.rootLogger=INFO, file, CONSOLE
 
 log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
@@ -12,8 +13,12 @@
 log4j.appender.file.MaxBackupIndex=9
 
 #- File to log to and log format
-log4j.appender.file.File=logs/solr.log
+log4j.appender.file.File=${solr.log}/solr.log
 log4j.appender.file.layout=org.apache.log4j.PatternLayout
 log4j.appender.file.layout.ConversionPattern=%-5p - %d{yyyy-MM-dd HH:mm:ss.SSS}; %C; %m\n
 
-log4j.logger.org.apache.zookeeper=WARN
\ No newline at end of file
+log4j.logger.org.apache.zookeeper=WARN
+log4j.logger.org.apache.hadoop=WARN
+
+# set to INFO to enable infostream log messages
+log4j.logger.org.apache.solr.update.LoggingInfoStream=OFF
diff --git a/solr/example/solr/collection1/conf/schema.xml b/solr/example/solr/collection1/conf/schema.xml
index 6f26f2c..9cdd297 100755
--- a/solr/example/solr/collection1/conf/schema.xml
+++ b/solr/example/solr/collection1/conf/schema.xml
@@ -1031,8 +1031,9 @@
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_no.txt" format="snowball" />
         <filter class="solr.SnowballPorterFilterFactory" language="Norwegian"/>
-        <!-- less aggressive: <filter class="solr.NorwegianLightStemFilterFactory"/> -->
-        <!-- singular/plural: <filter class="solr.NorwegianMinimalStemFilterFactory"/> -->
+        <!-- less aggressive: <filter class="solr.NorwegianLightStemFilterFactory" variant="nb"/> -->
+        <!-- singular/plural: <filter class="solr.NorwegianMinimalStemFilterFactory" variant="nb"/> -->
+        <!-- The "light" and "minimal" stemmers support variants: nb=Bokmål, nn=Nynorsk, no=Both -->
       </analyzer>
     </fieldType>
     
diff --git a/solr/example/solr/collection1/conf/solrconfig.xml b/solr/example/solr/collection1/conf/solrconfig.xml
index c403ebe..1fc3dc4 100755
--- a/solr/example/solr/collection1/conf/solrconfig.xml
+++ b/solr/example/solr/collection1/conf/solrconfig.xml
@@ -66,6 +66,9 @@
        files in that directory which completely match the regex
        (anchored on both ends) will be included.
 
+       If a 'dir' option (with or without a regex) is used and nothing
+       is found that matches, a warning will be logged.
+
        The examples below can be used to load some solr-contribs along 
        with their external dependencies.
     -->
@@ -81,11 +84,6 @@
   <lib dir="../../../contrib/velocity/lib" regex=".*\.jar" />
   <lib dir="../../../dist/" regex="solr-velocity-\d.*\.jar" />
 
-  <!-- If a 'dir' option (with or without a regex) is used and nothing
-       is found that matches, a warning will be logged.
-    -->
-  <lib dir="/non/existent/dir/yields/warning" /> 
-
   <!-- an exact 'path' can be used instead of a 'dir' to specify a 
        specific jar file.  This will cause a serious error to be logged 
        if it can't be loaded.
@@ -186,7 +184,8 @@
          maxBufferedDocs sets a limit on the number of documents buffered
          before flushing.
          If both ramBufferSizeMB and maxBufferedDocs is set, then
-         Lucene will flush based on whichever limit is hit first.  -->
+         Lucene will flush based on whichever limit is hit first.
+         The default is 100 MB.  -->
     <!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
     <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
 
@@ -306,10 +305,11 @@
          To aid in advanced debugging, Lucene provides an "InfoStream"
          of detailed information when indexing.
 
-         Setting The value to true will instruct the underlying Lucene
-         IndexWriter to write its debugging info the specified file
+         Setting the value to true will instruct the underlying Lucene
+         IndexWriter to write its info stream to solr's log. By default,
+         this is enabled here, and controlled through log4j.properties.
       -->
-     <!-- <infoStream file="INFOSTREAM.txt">false</infoStream> --> 
+     <infoStream>true</infoStream>
   </indexConfig>
 
 
@@ -366,7 +366,7 @@
          have some sort of hard autoCommit to limit the log size.
       -->
      <autoCommit> 
-       <maxTime>15000</maxTime> 
+       <maxTime>${solr.autoCommit.maxTime:15000}</maxTime> 
        <openSearcher>false</openSearcher> 
      </autoCommit>
 
@@ -375,11 +375,10 @@
          but does not ensure that data is synced to disk.  This is
          faster and more near-realtime friendly than a hard commit.
       -->
-     <!--
-       <autoSoftCommit> 
-         <maxTime>1000</maxTime> 
-       </autoSoftCommit>
-      -->
+
+     <autoSoftCommit> 
+       <maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime> 
+     </autoSoftCommit>
 
     <!-- Update Related Event Listeners
          
diff --git a/solr/example/solr/solr.xml b/solr/example/solr/solr.xml
index d02800b..7ae7244 100644
--- a/solr/example/solr/solr.xml
+++ b/solr/example/solr/solr.xml
@@ -33,6 +33,7 @@
     <int name="hostPort">${jetty.port:8983}</int>
     <str name="hostContext">${hostContext:solr}</str>
     <int name="zkClientTimeout">${zkClientTimeout:15000}</int>
+    <bool name="genericCoreNodeNames">${genericCoreNodeNames:true}</bool>
   </solrcloud>
 
   <shardHandlerFactory name="shardHandlerFactory"
diff --git a/solr/licenses/commons-configuration-1.6.jar.sha1 b/solr/licenses/commons-configuration-1.6.jar.sha1
new file mode 100644
index 0000000..1f4ad47
--- /dev/null
+++ b/solr/licenses/commons-configuration-1.6.jar.sha1
@@ -0,0 +1 @@
+32cadde23955d7681b0d94a2715846d20b425235
diff --git a/solr/licenses/commons-configuration-LICENSE-ASL.txt b/solr/licenses/commons-configuration-LICENSE-ASL.txt
new file mode 100644
index 0000000..dd726f2
--- /dev/null
+++ b/solr/licenses/commons-configuration-LICENSE-ASL.txt
@@ -0,0 +1,403 @@
+
+
+                                 Apache License
+
+                           Version 2.0, January 2004
+
+                        http://www.apache.org/licenses/
+
+
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+
+
+   1. Definitions.
+
+
+
+      "License" shall mean the terms and conditions for use, reproduction,
+
+      and distribution as defined by Sections 1 through 9 of this document.
+
+
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+
+      the copyright owner that is granting the License.
+
+
+
+      "Legal Entity" shall mean the union of the acting entity and all
+
+      other entities that control, are controlled by, or are under common
+
+      control with that entity. For the purposes of this definition,
+
+      "control" means (i) the power, direct or indirect, to cause the
+
+      direction or management of such entity, whether by contract or
+
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+
+      exercising permissions granted by this License.
+
+
+
+      "Source" form shall mean the preferred form for making modifications,
+
+      including but not limited to software source code, documentation
+
+      source, and configuration files.
+
+
+
+      "Object" form shall mean any form resulting from mechanical
+
+      transformation or translation of a Source form, including but
+
+      not limited to compiled object code, generated documentation,
+
+      and conversions to other media types.
+
+
+
+      "Work" shall mean the work of authorship, whether in Source or
+
+      Object form, made available under the License, as indicated by a
+
+      copyright notice that is included in or attached to the work
+
+      (an example is provided in the Appendix below).
+
+
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+
+      form, that is based on (or derived from) the Work and for which the
+
+      editorial revisions, annotations, elaborations, or other modifications
+
+      represent, as a whole, an original work of authorship. For the purposes
+
+      of this License, Derivative Works shall not include works that remain
+
+      separable from, or merely link (or bind by name) to the interfaces of,
+
+      the Work and Derivative Works thereof.
+
+
+
+      "Contribution" shall mean any work of authorship, including
+
+      the original version of the Work and any modifications or additions
+
+      to that Work or Derivative Works thereof, that is intentionally
+
+      submitted to Licensor for inclusion in the Work by the copyright owner
+
+      or by an individual or Legal Entity authorized to submit on behalf of
+
+      the copyright owner. For the purposes of this definition, "submitted"
+
+      means any form of electronic, verbal, or written communication sent
+
+      to the Licensor or its representatives, including but not limited to
+
+      communication on electronic mailing lists, source code control systems,
+
+      and issue tracking systems that are managed by, or on behalf of, the
+
+      Licensor for the purpose of discussing and improving the Work, but
+
+      excluding communication that is conspicuously marked or otherwise
+
+      designated in writing by the copyright owner as "Not a Contribution."
+
+
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+
+      on behalf of whom a Contribution has been received by Licensor and
+
+      subsequently incorporated within the Work.
+
+
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+
+      this License, each Contributor hereby grants to You a perpetual,
+
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+
+      copyright license to reproduce, prepare Derivative Works of,
+
+      publicly display, publicly perform, sublicense, and distribute the
+
+      Work and such Derivative Works in Source or Object form.
+
+
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+
+      this License, each Contributor hereby grants to You a perpetual,
+
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+
+      (except as stated in this section) patent license to make, have made,
+
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+
+      where such license applies only to those patent claims licensable
+
+      by such Contributor that are necessarily infringed by their
+
+      Contribution(s) alone or by combination of their Contribution(s)
+
+      with the Work to which such Contribution(s) was submitted. If You
+
+      institute patent litigation against any entity (including a
+
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+
+      or a Contribution incorporated within the Work constitutes direct
+
+      or contributory patent infringement, then any patent licenses
+
+      granted to You under this License for that Work shall terminate
+
+      as of the date such litigation is filed.
+
+
+
+   4. Redistribution. You may reproduce and distribute copies of the
+
+      Work or Derivative Works thereof in any medium, with or without
+
+      modifications, and in Source or Object form, provided that You
+
+      meet the following conditions:
+
+
+
+      (a) You must give any other recipients of the Work or
+
+          Derivative Works a copy of this License; and
+
+
+
+      (b) You must cause any modified files to carry prominent notices
+
+          stating that You changed the files; and
+
+
+
+      (c) You must retain, in the Source form of any Derivative Works
+
+          that You distribute, all copyright, patent, trademark, and
+
+          attribution notices from the Source form of the Work,
+
+          excluding those notices that do not pertain to any part of
+
+          the Derivative Works; and
+
+
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+
+          distribution, then any Derivative Works that You distribute must
+
+          include a readable copy of the attribution notices contained
+
+          within such NOTICE file, excluding those notices that do not
+
+          pertain to any part of the Derivative Works, in at least one
+
+          of the following places: within a NOTICE text file distributed
+
+          as part of the Derivative Works; within the Source form or
+
+          documentation, if provided along with the Derivative Works; or,
+
+          within a display generated by the Derivative Works, if and
+
+          wherever such third-party notices normally appear. The contents
+
+          of the NOTICE file are for informational purposes only and
+
+          do not modify the License. You may add Your own attribution
+
+          notices within Derivative Works that You distribute, alongside
+
+          or as an addendum to the NOTICE text from the Work, provided
+
+          that such additional attribution notices cannot be construed
+
+          as modifying the License.
+
+
+
+      You may add Your own copyright statement to Your modifications and
+
+      may provide additional or different license terms and conditions
+
+      for use, reproduction, or distribution of Your modifications, or
+
+      for any such Derivative Works as a whole, provided Your use,
+
+      reproduction, and distribution of the Work otherwise complies with
+
+      the conditions stated in this License.
+
+
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+
+      any Contribution intentionally submitted for inclusion in the Work
+
+      by You to the Licensor shall be under the terms and conditions of
+
+      this License, without any additional terms or conditions.
+
+      Notwithstanding the above, nothing herein shall supersede or modify
+
+      the terms of any separate license agreement you may have executed
+
+      with Licensor regarding such Contributions.
+
+
+
+   6. Trademarks. This License does not grant permission to use the trade
+
+      names, trademarks, service marks, or product names of the Licensor,
+
+      except as required for reasonable and customary use in describing the
+
+      origin of the Work and reproducing the content of the NOTICE file.
+
+
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+
+      agreed to in writing, Licensor provides the Work (and each
+
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+
+      implied, including, without limitation, any warranties or conditions
+
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+
+      appropriateness of using or redistributing the Work and assume any
+
+      risks associated with Your exercise of permissions under this License.
+
+
+
+   8. Limitation of Liability. In no event and under no legal theory,
+
+      whether in tort (including negligence), contract, or otherwise,
+
+      unless required by applicable law (such as deliberate and grossly
+
+      negligent acts) or agreed to in writing, shall any Contributor be
+
+      liable to You for damages, including any direct, indirect, special,
+
+      incidental, or consequential damages of any character arising as a
+
+      result of this License or out of the use or inability to use the
+
+      Work (including but not limited to damages for loss of goodwill,
+
+      work stoppage, computer failure or malfunction, or any and all
+
+      other commercial damages or losses), even if such Contributor
+
+      has been advised of the possibility of such damages.
+
+
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+
+      the Work or Derivative Works thereof, You may choose to offer,
+
+      and charge a fee for, acceptance of support, warranty, indemnity,
+
+      or other liability obligations and/or rights consistent with this
+
+      License. However, in accepting such obligations, You may act only
+
+      on Your own behalf and on Your sole responsibility, not on behalf
+
+      of any other Contributor, and only if You agree to indemnify,
+
+      defend, and hold each Contributor harmless for any liability
+
+      incurred by, or claims asserted against, such Contributor by reason
+
+      of your accepting any such warranty or additional liability.
+
+
+
+   END OF TERMS AND CONDITIONS
+
+
+
+   APPENDIX: How to apply the Apache License to your work.
+
+
+
+      To apply the Apache License to your work, attach the following
+
+      boilerplate notice, with the fields enclosed by brackets "[]"
+
+      replaced with your own identifying information. (Don't include
+
+      the brackets!)  The text should be enclosed in the appropriate
+
+      comment syntax for the file format. We also recommend that a
+
+      file or class name and description of purpose be included on the
+
+      same "printed page" as the copyright notice for easier
+
+      identification within third-party archives.
+
+
+
+   Copyright [yyyy] [name of copyright owner]
+
+
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+
+   you may not use this file except in compliance with the License.
+
+   You may obtain a copy of the License at
+
+
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+
+
+   Unless required by applicable law or agreed to in writing, software
+
+   distributed under the License is distributed on an "AS IS" BASIS,
+
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+
+   See the License for the specific language governing permissions and
+
+   limitations under the License.
\ No newline at end of file
diff --git a/solr/licenses/commons-configuration-NOTICE.txt b/solr/licenses/commons-configuration-NOTICE.txt
new file mode 100644
index 0000000..1916335
--- /dev/null
+++ b/solr/licenses/commons-configuration-NOTICE.txt
@@ -0,0 +1,9 @@
+Apache Commons Configuration
+
+Copyright 2001-2008 The Apache Software Foundation
+
+
+
+This product includes software developed by
+
+The Apache Software Foundation (http://www.apache.org/).
\ No newline at end of file
diff --git a/solr/licenses/concurrentlinkedhashmap-lru-1.2.jar.sha1 b/solr/licenses/concurrentlinkedhashmap-lru-1.2.jar.sha1
new file mode 100644
index 0000000..9c0fe8a
--- /dev/null
+++ b/solr/licenses/concurrentlinkedhashmap-lru-1.2.jar.sha1
@@ -0,0 +1 @@
+4316d710b6619ffe210c98deb2b0893587dad454
diff --git a/solr/licenses/concurrentlinkedhashmap-lru-LICENSE-ASL.txt b/solr/licenses/concurrentlinkedhashmap-lru-LICENSE-ASL.txt
new file mode 100644
index 0000000..dd726f2
--- /dev/null
+++ b/solr/licenses/concurrentlinkedhashmap-lru-LICENSE-ASL.txt
@@ -0,0 +1,403 @@
+
+
+                                 Apache License
+
+                           Version 2.0, January 2004
+
+                        http://www.apache.org/licenses/
+
+
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+
+
+   1. Definitions.
+
+
+
+      "License" shall mean the terms and conditions for use, reproduction,
+
+      and distribution as defined by Sections 1 through 9 of this document.
+
+
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+
+      the copyright owner that is granting the License.
+
+
+
+      "Legal Entity" shall mean the union of the acting entity and all
+
+      other entities that control, are controlled by, or are under common
+
+      control with that entity. For the purposes of this definition,
+
+      "control" means (i) the power, direct or indirect, to cause the
+
+      direction or management of such entity, whether by contract or
+
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+
+      exercising permissions granted by this License.
+
+
+
+      "Source" form shall mean the preferred form for making modifications,
+
+      including but not limited to software source code, documentation
+
+      source, and configuration files.
+
+
+
+      "Object" form shall mean any form resulting from mechanical
+
+      transformation or translation of a Source form, including but
+
+      not limited to compiled object code, generated documentation,
+
+      and conversions to other media types.
+
+
+
+      "Work" shall mean the work of authorship, whether in Source or
+
+      Object form, made available under the License, as indicated by a
+
+      copyright notice that is included in or attached to the work
+
+      (an example is provided in the Appendix below).
+
+
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+
+      form, that is based on (or derived from) the Work and for which the
+
+      editorial revisions, annotations, elaborations, or other modifications
+
+      represent, as a whole, an original work of authorship. For the purposes
+
+      of this License, Derivative Works shall not include works that remain
+
+      separable from, or merely link (or bind by name) to the interfaces of,
+
+      the Work and Derivative Works thereof.
+
+
+
+      "Contribution" shall mean any work of authorship, including
+
+      the original version of the Work and any modifications or additions
+
+      to that Work or Derivative Works thereof, that is intentionally
+
+      submitted to Licensor for inclusion in the Work by the copyright owner
+
+      or by an individual or Legal Entity authorized to submit on behalf of
+
+      the copyright owner. For the purposes of this definition, "submitted"
+
+      means any form of electronic, verbal, or written communication sent
+
+      to the Licensor or its representatives, including but not limited to
+
+      communication on electronic mailing lists, source code control systems,
+
+      and issue tracking systems that are managed by, or on behalf of, the
+
+      Licensor for the purpose of discussing and improving the Work, but
+
+      excluding communication that is conspicuously marked or otherwise
+
+      designated in writing by the copyright owner as "Not a Contribution."
+
+
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+
+      on behalf of whom a Contribution has been received by Licensor and
+
+      subsequently incorporated within the Work.
+
+
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+
+      this License, each Contributor hereby grants to You a perpetual,
+
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+
+      copyright license to reproduce, prepare Derivative Works of,
+
+      publicly display, publicly perform, sublicense, and distribute the
+
+      Work and such Derivative Works in Source or Object form.
+
+
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+
+      this License, each Contributor hereby grants to You a perpetual,
+
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+
+      (except as stated in this section) patent license to make, have made,
+
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+
+      where such license applies only to those patent claims licensable
+
+      by such Contributor that are necessarily infringed by their
+
+      Contribution(s) alone or by combination of their Contribution(s)
+
+      with the Work to which such Contribution(s) was submitted. If You
+
+      institute patent litigation against any entity (including a
+
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+
+      or a Contribution incorporated within the Work constitutes direct
+
+      or contributory patent infringement, then any patent licenses
+
+      granted to You under this License for that Work shall terminate
+
+      as of the date such litigation is filed.
+
+
+
+   4. Redistribution. You may reproduce and distribute copies of the
+
+      Work or Derivative Works thereof in any medium, with or without
+
+      modifications, and in Source or Object form, provided that You
+
+      meet the following conditions:
+
+
+
+      (a) You must give any other recipients of the Work or
+
+          Derivative Works a copy of this License; and
+
+
+
+      (b) You must cause any modified files to carry prominent notices
+
+          stating that You changed the files; and
+
+
+
+      (c) You must retain, in the Source form of any Derivative Works
+
+          that You distribute, all copyright, patent, trademark, and
+
+          attribution notices from the Source form of the Work,
+
+          excluding those notices that do not pertain to any part of
+
+          the Derivative Works; and
+
+
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+
+          distribution, then any Derivative Works that You distribute must
+
+          include a readable copy of the attribution notices contained
+
+          within such NOTICE file, excluding those notices that do not
+
+          pertain to any part of the Derivative Works, in at least one
+
+          of the following places: within a NOTICE text file distributed
+
+          as part of the Derivative Works; within the Source form or
+
+          documentation, if provided along with the Derivative Works; or,
+
+          within a display generated by the Derivative Works, if and
+
+          wherever such third-party notices normally appear. The contents
+
+          of the NOTICE file are for informational purposes only and
+
+          do not modify the License. You may add Your own attribution
+
+          notices within Derivative Works that You distribute, alongside
+
+          or as an addendum to the NOTICE text from the Work, provided
+
+          that such additional attribution notices cannot be construed
+
+          as modifying the License.
+
+
+
+      You may add Your own copyright statement to Your modifications and
+
+      may provide additional or different license terms and conditions
+
+      for use, reproduction, or distribution of Your modifications, or
+
+      for any such Derivative Works as a whole, provided Your use,
+
+      reproduction, and distribution of the Work otherwise complies with
+
+      the conditions stated in this License.
+
+
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+
+      any Contribution intentionally submitted for inclusion in the Work
+
+      by You to the Licensor shall be under the terms and conditions of
+
+      this License, without any additional terms or conditions.
+
+      Notwithstanding the above, nothing herein shall supersede or modify
+
+      the terms of any separate license agreement you may have executed
+
+      with Licensor regarding such Contributions.
+
+
+
+   6. Trademarks. This License does not grant permission to use the trade
+
+      names, trademarks, service marks, or product names of the Licensor,
+
+      except as required for reasonable and customary use in describing the
+
+      origin of the Work and reproducing the content of the NOTICE file.
+
+
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+
+      agreed to in writing, Licensor provides the Work (and each
+
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+
+      implied, including, without limitation, any warranties or conditions
+
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+
+      appropriateness of using or redistributing the Work and assume any
+
+      risks associated with Your exercise of permissions under this License.
+
+
+
+   8. Limitation of Liability. In no event and under no legal theory,
+
+      whether in tort (including negligence), contract, or otherwise,
+
+      unless required by applicable law (such as deliberate and grossly
+
+      negligent acts) or agreed to in writing, shall any Contributor be
+
+      liable to You for damages, including any direct, indirect, special,
+
+      incidental, or consequential damages of any character arising as a
+
+      result of this License or out of the use or inability to use the
+
+      Work (including but not limited to damages for loss of goodwill,
+
+      work stoppage, computer failure or malfunction, or any and all
+
+      other commercial damages or losses), even if such Contributor
+
+      has been advised of the possibility of such damages.
+
+
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+
+      the Work or Derivative Works thereof, You may choose to offer,
+
+      and charge a fee for, acceptance of support, warranty, indemnity,
+
+      or other liability obligations and/or rights consistent with this
+
+      License. However, in accepting such obligations, You may act only
+
+      on Your own behalf and on Your sole responsibility, not on behalf
+
+      of any other Contributor, and only if You agree to indemnify,
+
+      defend, and hold each Contributor harmless for any liability
+
+      incurred by, or claims asserted against, such Contributor by reason
+
+      of your accepting any such warranty or additional liability.
+
+
+
+   END OF TERMS AND CONDITIONS
+
+
+
+   APPENDIX: How to apply the Apache License to your work.
+
+
+
+      To apply the Apache License to your work, attach the following
+
+      boilerplate notice, with the fields enclosed by brackets "[]"
+
+      replaced with your own identifying information. (Don't include
+
+      the brackets!)  The text should be enclosed in the appropriate
+
+      comment syntax for the file format. We also recommend that a
+
+      file or class name and description of purpose be included on the
+
+      same "printed page" as the copyright notice for easier
+
+      identification within third-party archives.
+
+
+
+   Copyright [yyyy] [name of copyright owner]
+
+
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+
+   you may not use this file except in compliance with the License.
+
+   You may obtain a copy of the License at
+
+
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+
+
+   Unless required by applicable law or agreed to in writing, software
+
+   distributed under the License is distributed on an "AS IS" BASIS,
+
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+
+   See the License for the specific language governing permissions and
+
+   limitations under the License.
\ No newline at end of file
diff --git a/solr/licenses/concurrentlinkedhashmap-lru-NOTICE.txt b/solr/licenses/concurrentlinkedhashmap-lru-NOTICE.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/solr/licenses/concurrentlinkedhashmap-lru-NOTICE.txt
diff --git a/solr/licenses/fontbox-1.7.1.jar.sha1 b/solr/licenses/fontbox-1.7.1.jar.sha1
deleted file mode 100644
index 9fab3a6..0000000
--- a/solr/licenses/fontbox-1.7.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6a6b6598d0e263c751fccff26ef4fdf8543888e7
diff --git a/solr/licenses/fontbox-1.8.1.jar.sha1 b/solr/licenses/fontbox-1.8.1.jar.sha1
new file mode 100644
index 0000000..7f6a9c0
--- /dev/null
+++ b/solr/licenses/fontbox-1.8.1.jar.sha1
@@ -0,0 +1 @@
+32879bb6bb87b15c6d53bc358e83ede40fc729ae
diff --git a/solr/licenses/hadoop-annotations-2.0.5-alpha.jar.sha1 b/solr/licenses/hadoop-annotations-2.0.5-alpha.jar.sha1
new file mode 100644
index 0000000..b3ae758
--- /dev/null
+++ b/solr/licenses/hadoop-annotations-2.0.5-alpha.jar.sha1
@@ -0,0 +1 @@
+64e2b38638f3b3ecf14806a12c919334ebd77ff7
diff --git a/solr/licenses/hadoop-annotations-LICENSE-ASL.txt b/solr/licenses/hadoop-annotations-LICENSE-ASL.txt
new file mode 100644
index 0000000..9a8e847
--- /dev/null
+++ b/solr/licenses/hadoop-annotations-LICENSE-ASL.txt
@@ -0,0 +1,244 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+APACHE HADOOP SUBCOMPONENTS:
+
+The Apache Hadoop project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses. 
+
+For the org.apache.hadoop.util.bloom.* classes:
+
+/**
+ *
+ * Copyright (c) 2005, European Commission project OneLab under contract
+ * 034819 (http://www.one-lab.org)
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or 
+ * without modification, are permitted provided that the following 
+ * conditions are met:
+ *  - Redistributions of source code must retain the above copyright 
+ *    notice, this list of conditions and the following disclaimer.
+ *  - Redistributions in binary form must reproduce the above copyright 
+ *    notice, this list of conditions and the following disclaimer in 
+ *    the documentation and/or other materials provided with the distribution.
+ *  - Neither the name of the University Catholique de Louvain - UCL
+ *    nor the names of its contributors may be used to endorse or 
+ *    promote products derived from this software without specific prior 
+ *    written permission.
+ *    
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
\ No newline at end of file
diff --git a/solr/licenses/hadoop-annotations-NOTICE.txt b/solr/licenses/hadoop-annotations-NOTICE.txt
new file mode 100644
index 0000000..c56a5e4
--- /dev/null
+++ b/solr/licenses/hadoop-annotations-NOTICE.txt
@@ -0,0 +1,2 @@
+This product includes software developed by The Apache Software
+Foundation (http://www.apache.org/).
\ No newline at end of file
diff --git a/solr/licenses/hadoop-auth-2.0.5-alpha.jar.sha1 b/solr/licenses/hadoop-auth-2.0.5-alpha.jar.sha1
new file mode 100644
index 0000000..d53dcc4
--- /dev/null
+++ b/solr/licenses/hadoop-auth-2.0.5-alpha.jar.sha1
@@ -0,0 +1 @@
+8ca2f6521f2582bd3b95575614d6866d81e224b7
diff --git a/solr/licenses/hadoop-auth-LICENSE-ASL.txt b/solr/licenses/hadoop-auth-LICENSE-ASL.txt
new file mode 100644
index 0000000..9a8e847
--- /dev/null
+++ b/solr/licenses/hadoop-auth-LICENSE-ASL.txt
@@ -0,0 +1,244 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+APACHE HADOOP SUBCOMPONENTS:
+
+The Apache Hadoop project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses. 
+
+For the org.apache.hadoop.util.bloom.* classes:
+
+/**
+ *
+ * Copyright (c) 2005, European Commission project OneLab under contract
+ * 034819 (http://www.one-lab.org)
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or 
+ * without modification, are permitted provided that the following 
+ * conditions are met:
+ *  - Redistributions of source code must retain the above copyright 
+ *    notice, this list of conditions and the following disclaimer.
+ *  - Redistributions in binary form must reproduce the above copyright 
+ *    notice, this list of conditions and the following disclaimer in 
+ *    the documentation and/or other materials provided with the distribution.
+ *  - Neither the name of the University Catholique de Louvain - UCL
+ *    nor the names of its contributors may be used to endorse or 
+ *    promote products derived from this software without specific prior 
+ *    written permission.
+ *    
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
\ No newline at end of file
diff --git a/solr/licenses/hadoop-auth-NOTICE.txt b/solr/licenses/hadoop-auth-NOTICE.txt
new file mode 100644
index 0000000..c56a5e4
--- /dev/null
+++ b/solr/licenses/hadoop-auth-NOTICE.txt
@@ -0,0 +1,2 @@
+This product includes software developed by The Apache Software
+Foundation (http://www.apache.org/).
\ No newline at end of file
diff --git a/solr/licenses/hadoop-common-2.0.0-cdh4.2.0-SNAPSHOT-tests-NOTICE.txt b/solr/licenses/hadoop-common-2.0.0-cdh4.2.0-SNAPSHOT-tests-NOTICE.txt
new file mode 100644
index 0000000..c56a5e4
--- /dev/null
+++ b/solr/licenses/hadoop-common-2.0.0-cdh4.2.0-SNAPSHOT-tests-NOTICE.txt
@@ -0,0 +1,2 @@
+This product includes software developed by The Apache Software
+Foundation (http://www.apache.org/).
\ No newline at end of file
diff --git a/solr/licenses/hadoop-common-2.0.5-alpha-tests.jar.sha1 b/solr/licenses/hadoop-common-2.0.5-alpha-tests.jar.sha1
new file mode 100644
index 0000000..c869ddf
--- /dev/null
+++ b/solr/licenses/hadoop-common-2.0.5-alpha-tests.jar.sha1
@@ -0,0 +1 @@
+58d40fdc9428d1b0eec42b951a7c7ecba5d91b1a
diff --git a/solr/licenses/hadoop-common-2.0.5-alpha.jar.sha1 b/solr/licenses/hadoop-common-2.0.5-alpha.jar.sha1
new file mode 100644
index 0000000..e85d293
--- /dev/null
+++ b/solr/licenses/hadoop-common-2.0.5-alpha.jar.sha1
@@ -0,0 +1 @@
+86250ad536d7bb46f7d7d7f25863343d140a83c2
diff --git a/solr/licenses/hadoop-common-LICENSE-ASL.txt b/solr/licenses/hadoop-common-LICENSE-ASL.txt
new file mode 100644
index 0000000..9a8e847
--- /dev/null
+++ b/solr/licenses/hadoop-common-LICENSE-ASL.txt
@@ -0,0 +1,244 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+APACHE HADOOP SUBCOMPONENTS:
+
+The Apache Hadoop project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses. 
+
+For the org.apache.hadoop.util.bloom.* classes:
+
+/**
+ *
+ * Copyright (c) 2005, European Commission project OneLab under contract
+ * 034819 (http://www.one-lab.org)
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or 
+ * without modification, are permitted provided that the following 
+ * conditions are met:
+ *  - Redistributions of source code must retain the above copyright 
+ *    notice, this list of conditions and the following disclaimer.
+ *  - Redistributions in binary form must reproduce the above copyright 
+ *    notice, this list of conditions and the following disclaimer in 
+ *    the documentation and/or other materials provided with the distribution.
+ *  - Neither the name of the University Catholique de Louvain - UCL
+ *    nor the names of its contributors may be used to endorse or 
+ *    promote products derived from this software without specific prior 
+ *    written permission.
+ *    
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
\ No newline at end of file
diff --git a/solr/licenses/hadoop-common-NOTICE.txt b/solr/licenses/hadoop-common-NOTICE.txt
new file mode 100644
index 0000000..c56a5e4
--- /dev/null
+++ b/solr/licenses/hadoop-common-NOTICE.txt
@@ -0,0 +1,2 @@
+This product includes software developed by The Apache Software
+Foundation (http://www.apache.org/).
\ No newline at end of file
diff --git a/solr/licenses/hadoop-common-tests-LICENSE-ASL.txt b/solr/licenses/hadoop-common-tests-LICENSE-ASL.txt
new file mode 100644
index 0000000..9a8e847
--- /dev/null
+++ b/solr/licenses/hadoop-common-tests-LICENSE-ASL.txt
@@ -0,0 +1,244 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+APACHE HADOOP SUBCOMPONENTS:
+
+The Apache Hadoop project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses. 
+
+For the org.apache.hadoop.util.bloom.* classes:
+
+/**
+ *
+ * Copyright (c) 2005, European Commission project OneLab under contract
+ * 034819 (http://www.one-lab.org)
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or 
+ * without modification, are permitted provided that the following 
+ * conditions are met:
+ *  - Redistributions of source code must retain the above copyright 
+ *    notice, this list of conditions and the following disclaimer.
+ *  - Redistributions in binary form must reproduce the above copyright 
+ *    notice, this list of conditions and the following disclaimer in 
+ *    the documentation and/or other materials provided with the distribution.
+ *  - Neither the name of the University Catholique de Louvain - UCL
+ *    nor the names of its contributors may be used to endorse or 
+ *    promote products derived from this software without specific prior 
+ *    written permission.
+ *    
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
\ No newline at end of file
diff --git a/solr/licenses/hadoop-hdfs-2.0.5-alpha-tests.jar.sha1 b/solr/licenses/hadoop-hdfs-2.0.5-alpha-tests.jar.sha1
new file mode 100644
index 0000000..258080f
--- /dev/null
+++ b/solr/licenses/hadoop-hdfs-2.0.5-alpha-tests.jar.sha1
@@ -0,0 +1 @@
+453231318386c5ad0285c189362013d085da18d8
diff --git a/solr/licenses/hadoop-hdfs-2.0.5-alpha.jar.sha1 b/solr/licenses/hadoop-hdfs-2.0.5-alpha.jar.sha1
new file mode 100644
index 0000000..d3641c8
--- /dev/null
+++ b/solr/licenses/hadoop-hdfs-2.0.5-alpha.jar.sha1
@@ -0,0 +1 @@
+ef9f0780c8a4a82f01db076c1738453d4b40d7f3
diff --git a/solr/licenses/hadoop-hdfs-LICENSE-ASL.txt b/solr/licenses/hadoop-hdfs-LICENSE-ASL.txt
new file mode 100644
index 0000000..9a8e847
--- /dev/null
+++ b/solr/licenses/hadoop-hdfs-LICENSE-ASL.txt
@@ -0,0 +1,244 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+APACHE HADOOP SUBCOMPONENTS:
+
+The Apache Hadoop project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses. 
+
+For the org.apache.hadoop.util.bloom.* classes:
+
+/**
+ *
+ * Copyright (c) 2005, European Commission project OneLab under contract
+ * 034819 (http://www.one-lab.org)
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or 
+ * without modification, are permitted provided that the following 
+ * conditions are met:
+ *  - Redistributions of source code must retain the above copyright 
+ *    notice, this list of conditions and the following disclaimer.
+ *  - Redistributions in binary form must reproduce the above copyright 
+ *    notice, this list of conditions and the following disclaimer in 
+ *    the documentation and/or other materials provided with the distribution.
+ *  - Neither the name of the University Catholique de Louvain - UCL
+ *    nor the names of its contributors may be used to endorse or 
+ *    promote products derived from this software without specific prior 
+ *    written permission.
+ *    
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
\ No newline at end of file
diff --git a/solr/licenses/hadoop-hdfs-NOTICE.txt b/solr/licenses/hadoop-hdfs-NOTICE.txt
new file mode 100644
index 0000000..c56a5e4
--- /dev/null
+++ b/solr/licenses/hadoop-hdfs-NOTICE.txt
@@ -0,0 +1,2 @@
+This product includes software developed by The Apache Software
+Foundation (http://www.apache.org/).
\ No newline at end of file
diff --git a/solr/licenses/hadoop-hdfs-tests-LICENSE-ASL.txt b/solr/licenses/hadoop-hdfs-tests-LICENSE-ASL.txt
new file mode 100644
index 0000000..9a8e847
--- /dev/null
+++ b/solr/licenses/hadoop-hdfs-tests-LICENSE-ASL.txt
@@ -0,0 +1,244 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+APACHE HADOOP SUBCOMPONENTS:
+
+The Apache Hadoop project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses. 
+
+For the org.apache.hadoop.util.bloom.* classes:
+
+/**
+ *
+ * Copyright (c) 2005, European Commission project OneLab under contract
+ * 034819 (http://www.one-lab.org)
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or 
+ * without modification, are permitted provided that the following 
+ * conditions are met:
+ *  - Redistributions of source code must retain the above copyright 
+ *    notice, this list of conditions and the following disclaimer.
+ *  - Redistributions in binary form must reproduce the above copyright 
+ *    notice, this list of conditions and the following disclaimer in 
+ *    the documentation and/or other materials provided with the distribution.
+ *  - Neither the name of the University Catholique de Louvain - UCL
+ *    nor the names of its contributors may be used to endorse or 
+ *    promote products derived from this software without specific prior 
+ *    written permission.
+ *    
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
\ No newline at end of file
diff --git a/solr/licenses/hadoop-hdfs-tests-NOTICE.txt b/solr/licenses/hadoop-hdfs-tests-NOTICE.txt
new file mode 100644
index 0000000..c56a5e4
--- /dev/null
+++ b/solr/licenses/hadoop-hdfs-tests-NOTICE.txt
@@ -0,0 +1,2 @@
+This product includes software developed by The Apache Software
+Foundation (http://www.apache.org/).
\ No newline at end of file
diff --git a/solr/licenses/javassist-3.6.0.GA-LICENSE-MPL.txt b/solr/licenses/javassist-3.6.0.GA-LICENSE-MPL.txt
deleted file mode 100644
index 06f9651..0000000
--- a/solr/licenses/javassist-3.6.0.GA-LICENSE-MPL.txt
+++ /dev/null
@@ -1,469 +0,0 @@
-                          MOZILLA PUBLIC LICENSE
-                                Version 1.1
-
-                              ---------------
-
-1. Definitions.
-
-     1.0.1. "Commercial Use" means distribution or otherwise making the
-     Covered Code available to a third party.
-
-     1.1. "Contributor" means each entity that creates or contributes to
-     the creation of Modifications.
-
-     1.2. "Contributor Version" means the combination of the Original
-     Code, prior Modifications used by a Contributor, and the Modifications
-     made by that particular Contributor.
-
-     1.3. "Covered Code" means the Original Code or Modifications or the
-     combination of the Original Code and Modifications, in each case
-     including portions thereof.
-
-     1.4. "Electronic Distribution Mechanism" means a mechanism generally
-     accepted in the software development community for the electronic
-     transfer of data.
-
-     1.5. "Executable" means Covered Code in any form other than Source
-     Code.
-
-     1.6. "Initial Developer" means the individual or entity identified
-     as the Initial Developer in the Source Code notice required by Exhibit
-     A.
-
-     1.7. "Larger Work" means a work which combines Covered Code or
-     portions thereof with code not governed by the terms of this License.
-
-     1.8. "License" means this document.
-
-     1.8.1. "Licensable" means having the right to grant, to the maximum
-     extent possible, whether at the time of the initial grant or
-     subsequently acquired, any and all of the rights conveyed herein.
-
-     1.9. "Modifications" means any addition to or deletion from the
-     substance or structure of either the Original Code or any previous
-     Modifications. When Covered Code is released as a series of files, a
-     Modification is:
-          A. Any addition to or deletion from the contents of a file
-          containing Original Code or previous Modifications.
-
-          B. Any new file that contains any part of the Original Code or
-          previous Modifications.
-
-     1.10. "Original Code" means Source Code of computer software code
-     which is described in the Source Code notice required by Exhibit A as
-     Original Code, and which, at the time of its release under this
-     License is not already Covered Code governed by this License.
-
-     1.10.1. "Patent Claims" means any patent claim(s), now owned or
-     hereafter acquired, including without limitation,  method, process,
-     and apparatus claims, in any patent Licensable by grantor.
-
-     1.11. "Source Code" means the preferred form of the Covered Code for
-     making modifications to it, including all modules it contains, plus
-     any associated interface definition files, scripts used to control
-     compilation and installation of an Executable, or source code
-     differential comparisons against either the Original Code or another
-     well known, available Covered Code of the Contributor's choice. The
-     Source Code can be in a compressed or archival form, provided the
-     appropriate decompression or de-archiving software is widely available
-     for no charge.
-
-     1.12. "You" (or "Your")  means an individual or a legal entity
-     exercising rights under, and complying with all of the terms of, this
-     License or a future version of this License issued under Section 6.1.
-     For legal entities, "You" includes any entity which controls, is
-     controlled by, or is under common control with You. For purposes of
-     this definition, "control" means (a) the power, direct or indirect,
-     to cause the direction or management of such entity, whether by
-     contract or otherwise, or (b) ownership of more than fifty percent
-     (50%) of the outstanding shares or beneficial ownership of such
-     entity.
-
-2. Source Code License.
-
-     2.1. The Initial Developer Grant.
-     The Initial Developer hereby grants You a world-wide, royalty-free,
-     non-exclusive license, subject to third party intellectual property
-     claims:
-          (a)  under intellectual property rights (other than patent or
-          trademark) Licensable by Initial Developer to use, reproduce,
-          modify, display, perform, sublicense and distribute the Original
-          Code (or portions thereof) with or without Modifications, and/or
-          as part of a Larger Work; and
-
-          (b) under Patents Claims infringed by the making, using or
-          selling of Original Code, to make, have made, use, practice,
-          sell, and offer for sale, and/or otherwise dispose of the
-          Original Code (or portions thereof).
-
-          (c) the licenses granted in this Section 2.1(a) and (b) are
-          effective on the date Initial Developer first distributes
-          Original Code under the terms of this License.
-
-          (d) Notwithstanding Section 2.1(b) above, no patent license is
-          granted: 1) for code that You delete from the Original Code; 2)
-          separate from the Original Code;  or 3) for infringements caused
-          by: i) the modification of the Original Code or ii) the
-          combination of the Original Code with other software or devices.
-
-     2.2. Contributor Grant.
-     Subject to third party intellectual property claims, each Contributor
-     hereby grants You a world-wide, royalty-free, non-exclusive license
-
-          (a)  under intellectual property rights (other than patent or
-          trademark) Licensable by Contributor, to use, reproduce, modify,
-          display, perform, sublicense and distribute the Modifications
-          created by such Contributor (or portions thereof) either on an
-          unmodified basis, with other Modifications, as Covered Code
-          and/or as part of a Larger Work; and
-
-          (b) under Patent Claims infringed by the making, using, or
-          selling of  Modifications made by that Contributor either alone
-          and/or in combination with its Contributor Version (or portions
-          of such combination), to make, use, sell, offer for sale, have
-          made, and/or otherwise dispose of: 1) Modifications made by that
-          Contributor (or portions thereof); and 2) the combination of
-          Modifications made by that Contributor with its Contributor
-          Version (or portions of such combination).
-
-          (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
-          effective on the date Contributor first makes Commercial Use of
-          the Covered Code.
-
-          (d)    Notwithstanding Section 2.2(b) above, no patent license is
-          granted: 1) for any code that Contributor has deleted from the
-          Contributor Version; 2)  separate from the Contributor Version;
-          3)  for infringements caused by: i) third party modifications of
-          Contributor Version or ii)  the combination of Modifications made
-          by that Contributor with other software  (except as part of the
-          Contributor Version) or other devices; or 4) under Patent Claims
-          infringed by Covered Code in the absence of Modifications made by
-          that Contributor.
-
-3. Distribution Obligations.
-
-     3.1. Application of License.
-     The Modifications which You create or to which You contribute are
-     governed by the terms of this License, including without limitation
-     Section 2.2. The Source Code version of Covered Code may be
-     distributed only under the terms of this License or a future version
-     of this License released under Section 6.1, and You must include a
-     copy of this License with every copy of the Source Code You
-     distribute. You may not offer or impose any terms on any Source Code
-     version that alters or restricts the applicable version of this
-     License or the recipients' rights hereunder. However, You may include
-     an additional document offering the additional rights described in
-     Section 3.5.
-
-     3.2. Availability of Source Code.
-     Any Modification which You create or to which You contribute must be
-     made available in Source Code form under the terms of this License
-     either on the same media as an Executable version or via an accepted
-     Electronic Distribution Mechanism to anyone to whom you made an
-     Executable version available; and if made available via Electronic
-     Distribution Mechanism, must remain available for at least twelve (12)
-     months after the date it initially became available, or at least six
-     (6) months after a subsequent version of that particular Modification
-     has been made available to such recipients. You are responsible for
-     ensuring that the Source Code version remains available even if the
-     Electronic Distribution Mechanism is maintained by a third party.
-
-     3.3. Description of Modifications.
-     You must cause all Covered Code to which You contribute to contain a
-     file documenting the changes You made to create that Covered Code and
-     the date of any change. You must include a prominent statement that
-     the Modification is derived, directly or indirectly, from Original
-     Code provided by the Initial Developer and including the name of the
-     Initial Developer in (a) the Source Code, and (b) in any notice in an
-     Executable version or related documentation in which You describe the
-     origin or ownership of the Covered Code.
-
-     3.4. Intellectual Property Matters
-          (a) Third Party Claims.
-          If Contributor has knowledge that a license under a third party's
-          intellectual property rights is required to exercise the rights
-          granted by such Contributor under Sections 2.1 or 2.2,
-          Contributor must include a text file with the Source Code
-          distribution titled "LEGAL" which describes the claim and the
-          party making the claim in sufficient detail that a recipient will
-          know whom to contact. If Contributor obtains such knowledge after
-          the Modification is made available as described in Section 3.2,
-          Contributor shall promptly modify the LEGAL file in all copies
-          Contributor makes available thereafter and shall take other steps
-          (such as notifying appropriate mailing lists or newsgroups)
-          reasonably calculated to inform those who received the Covered
-          Code that new knowledge has been obtained.
-
-          (b) Contributor APIs.
-          If Contributor's Modifications include an application programming
-          interface and Contributor has knowledge of patent licenses which
-          are reasonably necessary to implement that API, Contributor must
-          also include this information in the LEGAL file.
-
-               (c)    Representations.
-          Contributor represents that, except as disclosed pursuant to
-          Section 3.4(a) above, Contributor believes that Contributor's
-          Modifications are Contributor's original creation(s) and/or
-          Contributor has sufficient rights to grant the rights conveyed by
-          this License.
-
-     3.5. Required Notices.
-     You must duplicate the notice in Exhibit A in each file of the Source
-     Code.  If it is not possible to put such notice in a particular Source
-     Code file due to its structure, then You must include such notice in a
-     location (such as a relevant directory) where a user would be likely
-     to look for such a notice.  If You created one or more Modification(s)
-     You may add your name as a Contributor to the notice described in
-     Exhibit A.  You must also duplicate this License in any documentation
-     for the Source Code where You describe recipients' rights or ownership
-     rights relating to Covered Code.  You may choose to offer, and to
-     charge a fee for, warranty, support, indemnity or liability
-     obligations to one or more recipients of Covered Code. However, You
-     may do so only on Your own behalf, and not on behalf of the Initial
-     Developer or any Contributor. You must make it absolutely clear than
-     any such warranty, support, indemnity or liability obligation is
-     offered by You alone, and You hereby agree to indemnify the Initial
-     Developer and every Contributor for any liability incurred by the
-     Initial Developer or such Contributor as a result of warranty,
-     support, indemnity or liability terms You offer.
-
-     3.6. Distribution of Executable Versions.
-     You may distribute Covered Code in Executable form only if the
-     requirements of Section 3.1-3.5 have been met for that Covered Code,
-     and if You include a notice stating that the Source Code version of
-     the Covered Code is available under the terms of this License,
-     including a description of how and where You have fulfilled the
-     obligations of Section 3.2. The notice must be conspicuously included
-     in any notice in an Executable version, related documentation or
-     collateral in which You describe recipients' rights relating to the
-     Covered Code. You may distribute the Executable version of Covered
-     Code or ownership rights under a license of Your choice, which may
-     contain terms different from this License, provided that You are in
-     compliance with the terms of this License and that the license for the
-     Executable version does not attempt to limit or alter the recipient's
-     rights in the Source Code version from the rights set forth in this
-     License. If You distribute the Executable version under a different
-     license You must make it absolutely clear that any terms which differ
-     from this License are offered by You alone, not by the Initial
-     Developer or any Contributor. You hereby agree to indemnify the
-     Initial Developer and every Contributor for any liability incurred by
-     the Initial Developer or such Contributor as a result of any such
-     terms You offer.
-
-     3.7. Larger Works.
-     You may create a Larger Work by combining Covered Code with other code
-     not governed by the terms of this License and distribute the Larger
-     Work as a single product. In such a case, You must make sure the
-     requirements of this License are fulfilled for the Covered Code.
-
-4. Inability to Comply Due to Statute or Regulation.
-
-     If it is impossible for You to comply with any of the terms of this
-     License with respect to some or all of the Covered Code due to
-     statute, judicial order, or regulation then You must: (a) comply with
-     the terms of this License to the maximum extent possible; and (b)
-     describe the limitations and the code they affect. Such description
-     must be included in the LEGAL file described in Section 3.4 and must
-     be included with all distributions of the Source Code. Except to the
-     extent prohibited by statute or regulation, such description must be
-     sufficiently detailed for a recipient of ordinary skill to be able to
-     understand it.
-
-5. Application of this License.
-
-     This License applies to code to which the Initial Developer has
-     attached the notice in Exhibit A and to related Covered Code.
-
-6. Versions of the License.
-
-     6.1. New Versions.
-     Netscape Communications Corporation ("Netscape") may publish revised
-     and/or new versions of the License from time to time. Each version
-     will be given a distinguishing version number.
-
-     6.2. Effect of New Versions.
-     Once Covered Code has been published under a particular version of the
-     License, You may always continue to use it under the terms of that
-     version. You may also choose to use such Covered Code under the terms
-     of any subsequent version of the License published by Netscape. No one
-     other than Netscape has the right to modify the terms applicable to
-     Covered Code created under this License.
-
-     6.3. Derivative Works.
-     If You create or use a modified version of this License (which you may
-     only do in order to apply it to code which is not already Covered Code
-     governed by this License), You must (a) rename Your license so that
-     the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
-     "MPL", "NPL" or any confusingly similar phrase do not appear in your
-     license (except to note that your license differs from this License)
-     and (b) otherwise make it clear that Your version of the license
-     contains terms which differ from the Mozilla Public License and
-     Netscape Public License. (Filling in the name of the Initial
-     Developer, Original Code or Contributor in the notice described in
-     Exhibit A shall not of themselves be deemed to be modifications of
-     this License.)
-
-7. DISCLAIMER OF WARRANTY.
-
-     COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
-     WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
-     WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
-     DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
-     THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
-     IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
-     YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
-     COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
-     OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
-     ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-8. TERMINATION.
-
-     8.1.  This License and the rights granted hereunder will terminate
-     automatically if You fail to comply with terms herein and fail to cure
-     such breach within 30 days of becoming aware of the breach. All
-     sublicenses to the Covered Code which are properly granted shall
-     survive any termination of this License. Provisions which, by their
-     nature, must remain in effect beyond the termination of this License
-     shall survive.
-
-     8.2.  If You initiate litigation by asserting a patent infringement
-     claim (excluding declatory judgment actions) against Initial Developer
-     or a Contributor (the Initial Developer or Contributor against whom
-     You file such action is referred to as "Participant")  alleging that:
-
-     (a)  such Participant's Contributor Version directly or indirectly
-     infringes any patent, then any and all rights granted by such
-     Participant to You under Sections 2.1 and/or 2.2 of this License
-     shall, upon 60 days notice from Participant terminate prospectively,
-     unless if within 60 days after receipt of notice You either: (i)
-     agree in writing to pay Participant a mutually agreeable reasonable
-     royalty for Your past and future use of Modifications made by such
-     Participant, or (ii) withdraw Your litigation claim with respect to
-     the Contributor Version against such Participant.  If within 60 days
-     of notice, a reasonable royalty and payment arrangement are not
-     mutually agreed upon in writing by the parties or the litigation claim
-     is not withdrawn, the rights granted by Participant to You under
-     Sections 2.1 and/or 2.2 automatically terminate at the expiration of
-     the 60 day notice period specified above.
-
-     (b)  any software, hardware, or device, other than such Participant's
-     Contributor Version, directly or indirectly infringes any patent, then
-     any rights granted to You by such Participant under Sections 2.1(b)
-     and 2.2(b) are revoked effective as of the date You first made, used,
-     sold, distributed, or had made, Modifications made by that
-     Participant.
-
-     8.3.  If You assert a patent infringement claim against Participant
-     alleging that such Participant's Contributor Version directly or
-     indirectly infringes any patent where such claim is resolved (such as
-     by license or settlement) prior to the initiation of patent
-     infringement litigation, then the reasonable value of the licenses
-     granted by such Participant under Sections 2.1 or 2.2 shall be taken
-     into account in determining the amount or value of any payment or
-     license.
-
-     8.4.  In the event of termination under Sections 8.1 or 8.2 above,
-     all end user license agreements (excluding distributors and resellers)
-     which have been validly granted by You or any distributor hereunder
-     prior to termination shall survive termination.
-
-9. LIMITATION OF LIABILITY.
-
-     UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
-     (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
-     DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
-     OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
-     ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
-     CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
-     WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
-     COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
-     INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
-     LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
-     RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
-     PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
-     EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
-     THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-10. U.S. GOVERNMENT END USERS.
-
-     The Covered Code is a "commercial item," as that term is defined in
-     48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
-     software" and "commercial computer software documentation," as such
-     terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
-     C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
-     all U.S. Government End Users acquire Covered Code with only those
-     rights set forth herein.
-
-11. MISCELLANEOUS.
-
-     This License represents the complete agreement concerning subject
-     matter hereof. If any provision of this License is held to be
-     unenforceable, such provision shall be reformed only to the extent
-     necessary to make it enforceable. This License shall be governed by
-     California law provisions (except to the extent applicable law, if
-     any, provides otherwise), excluding its conflict-of-law provisions.
-     With respect to disputes in which at least one party is a citizen of,
-     or an entity chartered or registered to do business in the United
-     States of America, any litigation relating to this License shall be
-     subject to the jurisdiction of the Federal Courts of the Northern
-     District of California, with venue lying in Santa Clara County,
-     California, with the losing party responsible for costs, including
-     without limitation, court costs and reasonable attorneys' fees and
-     expenses. The application of the United Nations Convention on
-     Contracts for the International Sale of Goods is expressly excluded.
-     Any law or regulation which provides that the language of a contract
-     shall be construed against the drafter shall not apply to this
-     License.
-
-12. RESPONSIBILITY FOR CLAIMS.
-
-     As between Initial Developer and the Contributors, each party is
-     responsible for claims and damages arising, directly or indirectly,
-     out of its utilization of rights under this License and You agree to
-     work with Initial Developer and Contributors to distribute such
-     responsibility on an equitable basis. Nothing herein is intended or
-     shall be deemed to constitute any admission of liability.
-
-13. MULTIPLE-LICENSED CODE.
-
-     Initial Developer may designate portions of the Covered Code as
-     "Multiple-Licensed".  "Multiple-Licensed" means that the Initial
-     Developer permits you to utilize portions of the Covered Code under
-     Your choice of the NPL or the alternative licenses, if any, specified
-     by the Initial Developer in the file described in Exhibit A.
-
-EXHIBIT A -Mozilla Public License.
-
-     ``The contents of this file are subject to the Mozilla Public License
-     Version 1.1 (the "License"); you may not use this file except in
-     compliance with the License. You may obtain a copy of the License at
-     http://www.mozilla.org/MPL/
-
-     Software distributed under the License is distributed on an "AS IS"
-     basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-     License for the specific language governing rights and limitations
-     under the License.
-
-     The Original Code is ______________________________________.
-
-     The Initial Developer of the Original Code is ________________________.
-     Portions created by ______________________ are Copyright (C) ______
-     _______________________. All Rights Reserved.
-
-     Contributor(s): ______________________________________.
-
-     Alternatively, the contents of this file may be used under the terms
-     of the _____ license (the  "[___] License"), in which case the
-     provisions of [______] License are applicable instead of those
-     above.  If you wish to allow use of your version of this file only
-     under the terms of the [____] License and not to allow others to use
-     your version of this file under the MPL, indicate your decision by
-     deleting  the provisions above and replace  them with the notice and
-     other provisions required by the [___] License.  If you do not delete
-     the provisions above, a recipient may use your version of this file
-     under either the MPL or the [___] License."
-
-     [NOTE: The text of this Exhibit A may differ slightly from the text of
-     the notices in the Source Code files of the Original Code. You should
-     use the text of this Exhibit A rather than the text found in the
-     Original Code Source Code for Your Modifications.]
diff --git a/solr/licenses/javassist-3.6.0.GA-NOTICE.txt b/solr/licenses/javassist-3.6.0.GA-NOTICE.txt
deleted file mode 100644
index d7e1a9c..0000000
--- a/solr/licenses/javassist-3.6.0.GA-NOTICE.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Copyright notices
-
-Javassist, a Java-bytecode translator toolkit. 
-Copyright (C) 1999-2007 Shigeru Chiba. All Rights Reserved.
-
-The contents of this software, Javassist, are subject to the Mozilla Public License Version 1.1 (the "License");
-you may not use this software except in compliance with the License. You may obtain a copy of the License at 
-http://www.mozilla.org/MPL/
-
-Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF 
-ANY KIND, either express or implied. See the License for the specific language governing rights and 
-limitations under the License.
-
-The Original Code is Javassist.
-
-The Initial Developer of the Original Code is Shigeru Chiba. Portions created by the Initial Developer are
-  Copyright (C) 1999-2006 Shigeru Chiba. All Rights Reserved.
-
-Contributor(s): ______________________________________.
-
-Alternatively, the contents of this software may be used under the terms of the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), in which case the provisions of the LGPL are applicable instead of those above. If you wish to allow use of your version of this software only under the terms of the LGPL, and not to allow others to use your version of this software under the terms of the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the LGPL. If you do not delete the provisions above, a recipient may use your version of this software under the terms of either the MPL or the LGPL.
-
-If you obtain this software as part of JBoss, the contents of this software may be used under only the terms of the LGPL. To use them under the MPL, you must obtain a separate package including only Javassist but not the other part of JBoss.
-
-All the contributors to the original source tree must agree to the original license term described above.
\ No newline at end of file
diff --git a/solr/licenses/jempbox-1.7.1.jar.sha1 b/solr/licenses/jempbox-1.7.1.jar.sha1
deleted file mode 100644
index c2e17d9..0000000
--- a/solr/licenses/jempbox-1.7.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-a94e7b5289f58486390619e87a50a3bb1d9750c3
diff --git a/solr/licenses/jempbox-1.8.1.jar.sha1 b/solr/licenses/jempbox-1.8.1.jar.sha1
new file mode 100644
index 0000000..34ebdae
--- /dev/null
+++ b/solr/licenses/jempbox-1.8.1.jar.sha1
@@ -0,0 +1 @@
+1a34de98c20293b07474aa17cb05dad66070b9c8
diff --git a/solr/licenses/jersey-core-1.16.jar.sha1 b/solr/licenses/jersey-core-1.16.jar.sha1
new file mode 100644
index 0000000..28e748d
--- /dev/null
+++ b/solr/licenses/jersey-core-1.16.jar.sha1
@@ -0,0 +1 @@
+34e9e164039913283da97af8d806ed92a931d32b
diff --git a/solr/licenses/jersey-core-LICENSE-CDDL.txt b/solr/licenses/jersey-core-LICENSE-CDDL.txt
new file mode 100644
index 0000000..542df80
--- /dev/null
+++ b/solr/licenses/jersey-core-LICENSE-CDDL.txt
@@ -0,0 +1,81 @@
+COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)Version 1.1
+
+1. Definitions.
+
+1.1. “Contributor” means each individual or entity that creates or contributes to the creation of Modifications.
+1.2. “Contributor Version” means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor.
+1.3. “Covered Software” means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof.
+1.4. “Executable” means the Covered Software in any form other than Source Code.
+1.5. “Initial Developer” means the individual or entity that first makes Original Software available under this License.
+1.6. “Larger Work” means a work which combines Covered Software or portions thereof with code not governed by the terms of this License.
+1.7. “License” means this document.
+1.8. “Licensable” means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein.
+1.9. “Modifications” means the Source Code and Executable form of any of the following:
+A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications;
+B. Any new file that contains any part of the Original Software or previous Modification; or
+C. Any new file that is contributed or otherwise made available under the terms of this License.
+1.10. “Original Software” means the Source Code and Executable form of computer software code that is originally released under this License.
+1.11. “Patent Claims” means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor.
+1.12. “Source Code” means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code.
+1.13. “You” (or “Your”) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, “You” includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
+2. License Grants.
+
+2.1. The Initial Developer Grant.
+Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license:
+(a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and
+(b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof).
+(c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License.
+(d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices.
+2.2. Contributor Grant.
+Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
+(a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and
+(b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination).
+(c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party.
+(d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor.
+3. Distribution Obligations.
+
+3.1. Availability of Source Code.
+Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange.
+3.2. Modifications.
+The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License.
+3.3. Required Notices.
+You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer.
+3.4. Application of Additional Terms.
+You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients' rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer.
+3.5. Distribution of Executable Versions.
+You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient's rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer.
+3.6. Larger Works.
+You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software.
+4. Versions of the License.
+
+4.1. New Versions.
+Oracle is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License.
+4.2. Effect of New Versions.
+You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward.
+4.3. Modified Versions.
+When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License.
+5. DISCLAIMER OF WARRANTY.
+
+COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN “AS IS” BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+6. TERMINATION.
+
+6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive.
+6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as “Participant”) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant.
+6.3. If You assert a patent infringement claim against Participant alleging that the Participant Software directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by such Participant under Sections 2.1 or 2.2 shall be taken into account in determining the amount or value of any payment or license.
+6.4. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination.
+7. LIMITATION OF LIABILITY.
+
+UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+8. U.S. GOVERNMENT END USERS.
+
+The Covered Software is a “commercial item,” as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of “commercial computer software” (as that term is defined at 48 C.F.R. § 252.227-7014(a)(1)) and “commercial computer software documentation” as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License.
+
+9. MISCELLANEOUS.
+
+This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction's conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software.
+
+10. RESPONSIBILITY FOR CLAIMS.
+
+As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.
\ No newline at end of file
diff --git a/solr/licenses/jetty-6.1.26.jar.sha1 b/solr/licenses/jetty-6.1.26.jar.sha1
new file mode 100644
index 0000000..9d5e647
--- /dev/null
+++ b/solr/licenses/jetty-6.1.26.jar.sha1
@@ -0,0 +1 @@
+2f546e289fddd5b1fab1d4199fbb6e9ef43ee4b0
diff --git a/solr/licenses/jetty-util-6.1.26.jar.sha1 b/solr/licenses/jetty-util-6.1.26.jar.sha1
new file mode 100644
index 0000000..a5f4c50
--- /dev/null
+++ b/solr/licenses/jetty-util-6.1.26.jar.sha1
@@ -0,0 +1 @@
+e5642fe0399814e1687d55a3862aa5a3417226a9
diff --git a/solr/licenses/joda-time-2.2.jar.sha1 b/solr/licenses/joda-time-2.2.jar.sha1
new file mode 100644
index 0000000..5e68639
--- /dev/null
+++ b/solr/licenses/joda-time-2.2.jar.sha1
@@ -0,0 +1 @@
+a5f29a7acaddea3f4af307e8cf2d0cc82645fd7d
diff --git a/solr/licenses/joda-time-LICENSE-ASL.txt b/solr/licenses/joda-time-LICENSE-ASL.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/solr/licenses/joda-time-LICENSE-ASL.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/solr/licenses/joda-time-NOTICE.txt b/solr/licenses/joda-time-NOTICE.txt
new file mode 100644
index 0000000..dffbcf3
--- /dev/null
+++ b/solr/licenses/joda-time-NOTICE.txt
@@ -0,0 +1,5 @@
+=============================================================================
+= NOTICE file corresponding to section 4d of the Apache License Version 2.0 =
+=============================================================================
+This product includes software developed by
+Joda.org (http://www.joda.org/).
diff --git a/solr/licenses/morfologik-fsa-1.5.5.jar.sha1 b/solr/licenses/morfologik-fsa-1.5.5.jar.sha1
deleted file mode 100644
index 3a8935a..0000000
--- a/solr/licenses/morfologik-fsa-1.5.5.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-7965a39db114f7c404b71d38bc7f0e6a332c4e73
diff --git a/solr/licenses/morfologik-fsa-1.6.0.jar.sha1 b/solr/licenses/morfologik-fsa-1.6.0.jar.sha1
new file mode 100644
index 0000000..8041cb4
--- /dev/null
+++ b/solr/licenses/morfologik-fsa-1.6.0.jar.sha1
@@ -0,0 +1 @@
+397a99307020797e6790f2faf8cf865983b52559
diff --git a/solr/licenses/morfologik-fsa-LICENSE-BSD.txt b/solr/licenses/morfologik-fsa-LICENSE-BSD.txt
index f97fb7d..4daba47 100644
--- a/solr/licenses/morfologik-fsa-LICENSE-BSD.txt
+++ b/solr/licenses/morfologik-fsa-LICENSE-BSD.txt
@@ -1,6 +1,6 @@
 
 Copyright (c) 2006 Dawid Weiss
-Copyright (c) 2007-2012 Dawid Weiss, Marcin Miłkowski
+Copyright (c) 2007-2013 Dawid Weiss, Marcin Miłkowski
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without modification, 
@@ -26,4 +26,4 @@
 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 
 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/solr/licenses/morfologik-polish-1.5.5.jar.sha1 b/solr/licenses/morfologik-polish-1.5.5.jar.sha1
deleted file mode 100644
index 10c14c0..0000000
--- a/solr/licenses/morfologik-polish-1.5.5.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b4a3a9746cab8b2c99c33d2ceeda2ece3f8d8ef2
diff --git a/solr/licenses/morfologik-polish-1.6.0.jar.sha1 b/solr/licenses/morfologik-polish-1.6.0.jar.sha1
new file mode 100644
index 0000000..b44ead1
--- /dev/null
+++ b/solr/licenses/morfologik-polish-1.6.0.jar.sha1
@@ -0,0 +1 @@
+ca0663530971b54420fc1cea00a6338f68428232
diff --git a/solr/licenses/morfologik-polish-LICENSE-BSD.txt b/solr/licenses/morfologik-polish-LICENSE-BSD.txt
index 04ffd07..660f633 100644
--- a/solr/licenses/morfologik-polish-LICENSE-BSD.txt
+++ b/solr/licenses/morfologik-polish-LICENSE-BSD.txt
@@ -1,62 +1,26 @@
 BSD-licensed dictionary of Polish (Morfologik)
 
-Copyright (c) 2012, Marcin Miłkowski
+Morfologik Polish dictionary.
+Version: 2.0 PoliMorf
+Copyright (c) 2013, Marcin Miłkowski
 All rights reserved.
 
-Redistribution and  use in  source and binary  forms, with  or without
-modification, are permitted provided that the following conditions are
-met:
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
 
-1. Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
+1. Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution. 
 
-2. Redistributions in binary form must reproduce the above copyright
-   notice, this list of conditions and the following disclaimer in the
-   documentation and/or other materials provided with the
-   distribution.
-
-THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS “AS IS” AND ANY EXPRESS
-OR  IMPLIED WARRANTIES,  INCLUDING, BUT  NOT LIMITED  TO,  THE IMPLIED
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED.  IN NO EVENT  SHALL COPYRIGHT  HOLDERS OR  CONTRIBUTORS BE
-LIABLE FOR  ANY DIRECT,  INDIRECT, INCIDENTAL, SPECIAL,  EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES  (INCLUDING, BUT NOT LIMITED  TO, PROCUREMENT OF
-SUBSTITUTE  GOODS OR  SERVICES;  LOSS  OF USE,  DATA,  OR PROFITS;  OR
-BUSINESS INTERRUPTION) HOWEVER CAUSED  AND ON ANY THEORY OF LIABILITY,
-WHETHER IN  CONTRACT, STRICT LIABILITY, OR  TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
---
-
-BSD-licensed dictionary of Polish (SGJP)
-http://sgjp.pl/morfeusz/
-
-Copyright © 2011 Zygmunt Saloni, Włodzimierz Gruszczyński, 
-	    	 Marcin Woliński, Robert Wołosz
-
-All rights reserved.
-
-Redistribution and  use in  source and binary  forms, with  or without
-modification, are permitted provided that the following conditions are
-met:
-
-1. Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright
-   notice, this list of conditions and the following disclaimer in the
-   documentation and/or other materials provided with the
-   distribution.
-
-THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS “AS IS” AND ANY EXPRESS
-OR  IMPLIED WARRANTIES,  INCLUDING, BUT  NOT LIMITED  TO,  THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED.  IN NO EVENT  SHALL COPYRIGHT  HOLDERS OR  CONTRIBUTORS BE
-LIABLE FOR  ANY DIRECT,  INDIRECT, INCIDENTAL, SPECIAL,  EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES  (INCLUDING, BUT NOT LIMITED  TO, PROCUREMENT OF
-SUBSTITUTE  GOODS OR  SERVICES;  LOSS  OF USE,  DATA,  OR PROFITS;  OR
-BUSINESS INTERRUPTION) HOWEVER CAUSED  AND ON ANY THEORY OF LIABILITY,
-WHETHER IN  CONTRACT, STRICT LIABILITY, OR  TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/solr/licenses/morfologik-polish-NOTICE.txt b/solr/licenses/morfologik-polish-NOTICE.txt
index a8a3aa1..45d4cba 100644
--- a/solr/licenses/morfologik-polish-NOTICE.txt
+++ b/solr/licenses/morfologik-polish-NOTICE.txt
@@ -1,6 +1,3 @@
 
-This product includes data from BSD-licensed dictionary of Polish (Morfologik)
-(http://morfologik.blogspot.com/)
-
-This product includes data from BSD-licensed dictionary of Polish (SGJP)
-(http://sgjp.pl/morfeusz/)
+This product includes data from BSD-licensed dictionary of Polish (Morfologik, PoliMorf)
+(http://morfologik.blogspot.com/)
\ No newline at end of file
diff --git a/solr/licenses/morfologik-stemming-1.5.5.jar.sha1 b/solr/licenses/morfologik-stemming-1.5.5.jar.sha1
deleted file mode 100644
index c9824e4..0000000
--- a/solr/licenses/morfologik-stemming-1.5.5.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e5dc913adeba3b89539cd5f82e5b88d136a1d85b
diff --git a/solr/licenses/morfologik-stemming-1.6.0.jar.sha1 b/solr/licenses/morfologik-stemming-1.6.0.jar.sha1
new file mode 100644
index 0000000..4ba5467
--- /dev/null
+++ b/solr/licenses/morfologik-stemming-1.6.0.jar.sha1
@@ -0,0 +1 @@
+8a284571bea2cdd305cd86fbac9bab6deef31c7f
diff --git a/solr/licenses/morfologik-stemming-LICENSE-BSD.txt b/solr/licenses/morfologik-stemming-LICENSE-BSD.txt
index f97fb7d..4daba47 100644
--- a/solr/licenses/morfologik-stemming-LICENSE-BSD.txt
+++ b/solr/licenses/morfologik-stemming-LICENSE-BSD.txt
@@ -1,6 +1,6 @@
 
 Copyright (c) 2006 Dawid Weiss
-Copyright (c) 2007-2012 Dawid Weiss, Marcin Miłkowski
+Copyright (c) 2007-2013 Dawid Weiss, Marcin Miłkowski
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without modification, 
@@ -26,4 +26,4 @@
 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 
 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/solr/licenses/pdfbox-1.7.1.jar.sha1 b/solr/licenses/pdfbox-1.7.1.jar.sha1
deleted file mode 100644
index be61b40..0000000
--- a/solr/licenses/pdfbox-1.7.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b762c177f881e8aa095cf515df185191a123d949
diff --git a/solr/licenses/pdfbox-1.8.1.jar.sha1 b/solr/licenses/pdfbox-1.8.1.jar.sha1
new file mode 100644
index 0000000..ea2e686
--- /dev/null
+++ b/solr/licenses/pdfbox-1.8.1.jar.sha1
@@ -0,0 +1 @@
+2127ceef9eceffe4fefe445fa8eae4609c51e145
diff --git a/solr/licenses/poi-3.8.jar.sha1 b/solr/licenses/poi-3.8.jar.sha1
deleted file mode 100644
index a811527..0000000
--- a/solr/licenses/poi-3.8.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-552a7703d32c57adb611df084b45f7158e8653f3
diff --git a/solr/licenses/poi-3.9.jar.sha1 b/solr/licenses/poi-3.9.jar.sha1
new file mode 100644
index 0000000..8dc4c71
--- /dev/null
+++ b/solr/licenses/poi-3.9.jar.sha1
@@ -0,0 +1 @@
+5d5e41354e88322e4bc590b31f3d2d1d52b3e6ac
diff --git a/solr/licenses/poi-ooxml-3.8.jar.sha1 b/solr/licenses/poi-ooxml-3.8.jar.sha1
deleted file mode 100644
index 9daaafa..0000000
--- a/solr/licenses/poi-ooxml-3.8.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-235d18adccc2b140fb3f90a2fa859b7ae29d57b8
diff --git a/solr/licenses/poi-ooxml-3.9.jar.sha1 b/solr/licenses/poi-ooxml-3.9.jar.sha1
new file mode 100644
index 0000000..3425b0e
--- /dev/null
+++ b/solr/licenses/poi-ooxml-3.9.jar.sha1
@@ -0,0 +1 @@
+bbe83c739d22eecfacd06d7e0b99ba13277040ed
diff --git a/solr/licenses/poi-ooxml-schemas-3.8.jar.sha1 b/solr/licenses/poi-ooxml-schemas-3.8.jar.sha1
deleted file mode 100644
index 9ade50e..0000000
--- a/solr/licenses/poi-ooxml-schemas-3.8.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-cb3b26809ec65eba22143acfddf654bcf70aa009
diff --git a/solr/licenses/poi-ooxml-schemas-3.9.jar.sha1 b/solr/licenses/poi-ooxml-schemas-3.9.jar.sha1
new file mode 100644
index 0000000..94341ce
--- /dev/null
+++ b/solr/licenses/poi-ooxml-schemas-3.9.jar.sha1
@@ -0,0 +1 @@
+4c514498f0e82cccfdd3208b9caff2f45158db4a
diff --git a/solr/licenses/poi-scratchpad-3.8.jar.sha1 b/solr/licenses/poi-scratchpad-3.8.jar.sha1
deleted file mode 100644
index b952b55..0000000
--- a/solr/licenses/poi-scratchpad-3.8.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-33ef3eb7bd97c0dcdf2873b0e0a0938f013d410c
diff --git a/solr/licenses/poi-scratchpad-3.9.jar.sha1 b/solr/licenses/poi-scratchpad-3.9.jar.sha1
new file mode 100644
index 0000000..ee48b1f
--- /dev/null
+++ b/solr/licenses/poi-scratchpad-3.9.jar.sha1
@@ -0,0 +1 @@
+0f07d54f88a9f6c743d133be8d4446879a1e6729
diff --git a/solr/licenses/protobuf-java-2.4.0a.jar.sha1 b/solr/licenses/protobuf-java-2.4.0a.jar.sha1
new file mode 100644
index 0000000..816bf03
--- /dev/null
+++ b/solr/licenses/protobuf-java-2.4.0a.jar.sha1
@@ -0,0 +1 @@
+7ef75b63dc8797d36cca1e3c08665117cc69e52f
diff --git a/solr/licenses/protobuf-java-LICENSE-BSD.txt b/solr/licenses/protobuf-java-LICENSE-BSD.txt
new file mode 100644
index 0000000..28888af
--- /dev/null
+++ b/solr/licenses/protobuf-java-LICENSE-BSD.txt
@@ -0,0 +1,9 @@
+Copyright (c) <YEAR>, <OWNER>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/solr/licenses/protobuf-java-NOTICE.txt b/solr/licenses/protobuf-java-NOTICE.txt
new file mode 100644
index 0000000..c5fd9f3
--- /dev/null
+++ b/solr/licenses/protobuf-java-NOTICE.txt
@@ -0,0 +1,3 @@
+Protocol Buffers - Google's data interchange format
+Copyright 2008 Google Inc.
+http://code.google.com/apis/protocolbuffers/
\ No newline at end of file
diff --git a/solr/licenses/tika-core-1.3.jar.sha1 b/solr/licenses/tika-core-1.3.jar.sha1
deleted file mode 100644
index b1b31e0..0000000
--- a/solr/licenses/tika-core-1.3.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9d660bfa2881e3ee38d3e2e2e8d54bc448e8073a
diff --git a/solr/licenses/tika-core-1.4.jar.sha1 b/solr/licenses/tika-core-1.4.jar.sha1
new file mode 100644
index 0000000..b22276e
--- /dev/null
+++ b/solr/licenses/tika-core-1.4.jar.sha1
@@ -0,0 +1 @@
+30fd5d2c4c21ed908d4b74064ac7f4f4fdbe9ae7
diff --git a/solr/licenses/tika-parsers-1.3.jar.sha1 b/solr/licenses/tika-parsers-1.3.jar.sha1
deleted file mode 100644
index a653f53..0000000
--- a/solr/licenses/tika-parsers-1.3.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-19bb1f65d52a41ef47583cd7deb0e4fcaf25cab9
diff --git a/solr/licenses/tika-parsers-1.4.jar.sha1 b/solr/licenses/tika-parsers-1.4.jar.sha1
new file mode 100644
index 0000000..7179010
--- /dev/null
+++ b/solr/licenses/tika-parsers-1.4.jar.sha1
@@ -0,0 +1 @@
+58aba11789dc508f004e3352948394c9e395fb50
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrServer.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrServer.java
index f5fd166..08a446b 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrServer.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrServer.java
@@ -91,6 +91,13 @@
       this.lbServer = new LBHttpSolrServer(myClient);
       this.updatesToLeaders = true;
   }
+  
+  public CloudSolrServer(String zkHost, boolean updatesToLeaders) throws MalformedURLException {
+    this.zkHost = zkHost;
+    this.myClient = HttpClientUtil.createClient(null);
+    this.lbServer = new LBHttpSolrServer(myClient);
+    this.updatesToLeaders = updatesToLeaders;
+}
 
   /**
    * @param zkHost The client endpoint of the zookeeper quorum containing the cloud state,
@@ -124,6 +131,11 @@
     this.defaultCollection = collection;
   }
 
+  /** Gets the default collection for request */
+  public String getDefaultCollection() {
+    return defaultCollection;
+  }
+
   /** Set the connect timeout to the zookeeper ensemble in ms */
   public void setZkConnectTimeout(int zkConnectTimeout) {
     this.zkConnectTimeout = zkConnectTimeout;
@@ -348,6 +360,10 @@
   public LBHttpSolrServer getLbServer() {
     return lbServer;
   }
+  
+  public boolean isUpdatesToLeaders() {
+    return updatesToLeaders;
+  }
 
   // for tests
   Map<String,List<String>> getUrlLists() {
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrServer.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrServer.java
index 888be90..da4e448 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrServer.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrServer.java
@@ -658,7 +658,7 @@
    * status code that may have been returned by the remote server or a 
    * proxy along the way.
    */
-  protected static class RemoteSolrException extends SolrException {
+  public static class RemoteSolrException extends SolrException {
     /**
      * @param code Arbitrary HTTP status code
      * @param msg Exception Message
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/StreamingBinaryResponseParser.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/StreamingBinaryResponseParser.java
index 8f7666e..14be1f3 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/StreamingBinaryResponseParser.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/StreamingBinaryResponseParser.java
@@ -16,17 +16,17 @@
  */
 package org.apache.solr.client.solrj.impl;
 
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.List;
+
 import org.apache.solr.client.solrj.StreamingResponseCallback;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrException;
-import org.apache.solr.common.util.FastInputStream;
-import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.DataInputInputStream;
 import org.apache.solr.common.util.JavaBinCodec;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.List;
+import org.apache.solr.common.util.NamedList;
 
 /**
  * A BinaryResponseParser that sends callback events rather then build
@@ -49,14 +49,14 @@
       JavaBinCodec codec = new JavaBinCodec() {
 
         @Override
-        public SolrDocument readSolrDocument(FastInputStream dis) throws IOException {
+        public SolrDocument readSolrDocument(DataInputInputStream dis) throws IOException {
           SolrDocument doc = super.readSolrDocument(dis);
           callback.streamSolrDocument( doc );
           return null;
         }
 
         @Override
-        public SolrDocumentList readSolrDocumentList(FastInputStream dis) throws IOException {
+        public SolrDocumentList readSolrDocumentList(DataInputInputStream dis) throws IOException {
           SolrDocumentList solrDocs = new SolrDocumentList();
           List list = (List) readVal(dis);
           solrDocs.setNumFound((Long) list.get(0));
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/JavaBinUpdateRequestCodec.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/JavaBinUpdateRequestCodec.java
index 8c0d5d0..e21bfa6 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/JavaBinUpdateRequestCodec.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/JavaBinUpdateRequestCodec.java
@@ -16,17 +16,20 @@
  */
 package org.apache.solr.client.solrj.request;
 
-import org.apache.solr.common.SolrInputDocument;
-import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.params.SolrParams;
-import org.apache.solr.common.util.FastInputStream;
-import org.apache.solr.common.util.JavaBinCodec;
-import org.apache.solr.common.util.NamedList;
-
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.DataInputInputStream;
+import org.apache.solr.common.util.JavaBinCodec;
+import org.apache.solr.common.util.NamedList;
 
 /**
  * Provides methods for marshalling an UpdateRequest to a NamedList which can be serialized in the javabin format and
@@ -94,7 +97,7 @@
       private boolean seenOuterMostDocIterator = false;
         
       @Override
-      public NamedList readNamedList(FastInputStream dis) throws IOException {
+      public NamedList readNamedList(DataInputInputStream dis) throws IOException {
         int sz = readSize(dis);
         NamedList nl = new NamedList();
         if (namedList[0] == null) {
@@ -109,7 +112,7 @@
       }
 
       @Override
-      public List readIterator(FastInputStream fis) throws IOException {
+      public List readIterator(DataInputInputStream fis) throws IOException {
 
         // default behavior for reading any regular Iterator in the stream
         if (seenOuterMostDocIterator) return super.readIterator(fis);
@@ -120,7 +123,7 @@
         return readOuterMostDocIterator(fis);
       }
 
-      private List readOuterMostDocIterator(FastInputStream fis) throws IOException {
+      private List readOuterMostDocIterator(DataInputInputStream fis) throws IOException {
         NamedList params = (NamedList) namedList[0].getVal(0);
         updateRequest.setParams(new ModifiableSolrParams(SolrParams.toSolrParams(params)));
         if (handler == null) return super.readIterator(fis);
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
index 7e79bce..123b0f2 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
@@ -165,15 +165,18 @@
     return Collections.unmodifiableSet(liveNodes);
   }
 
-  /**
-   * Get the slice/shardId for a core.
-   * @param coreNodeName in the form of nodeName_coreName (the name of the replica)
-   */
-  public String getShardId(String coreNodeName) {
-    //  System.out.println("###### getShardId("+coreNodeName+") in " + collectionStates);
+  public String getShardId(String baseUrl, String coreName) {
+    // System.out.println("###### getShardId(" + baseUrl + "," + coreName + ") in " + collectionStates);
     for (DocCollection coll : collectionStates.values()) {
       for (Slice slice : coll.getSlices()) {
-        if (slice.getReplicasMap().containsKey(coreNodeName)) return slice.getName();
+        for (Replica replica : slice.getReplicas()) {
+          // TODO: for really large clusters, we could 'index' on this
+          String rbaseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
+          String rcore = replica.getStr(ZkStateReader.CORE_NAME_PROP);
+          if (baseUrl.equals(rbaseUrl) && coreName.equals(rcore)) {
+            return slice.getName();
+          }
+        }
       }
     }
     return null;
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ConnectionManager.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ConnectionManager.java
index 8e5b093..1f8c40b 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ConnectionManager.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ConnectionManager.java
@@ -95,7 +95,7 @@
                 // if keeper does not replace oldKeeper we must be sure to close it
                 synchronized (connectionUpdateLock) {
                   try {
-                    waitForConnected(SolrZkClient.DEFAULT_CLIENT_CONNECT_TIMEOUT);
+                    waitForConnected(Long.MAX_VALUE);
                   } catch (Exception e1) {
                     closeKeeper(keeper);
                     throw new RuntimeException(e1);
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
index 6365eef..356f14b 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
@@ -508,6 +508,7 @@
   
   public List<ZkCoreNodeProps> getReplicaProps(String collection,
       String shardId, String thisCoreNodeName, String coreName, String mustMatchStateFilter, String mustNotMatchStateFilter) {
+    assert thisCoreNodeName != null;
     ClusterState clusterState = this.clusterState;
     if (clusterState == null) {
       return null;
@@ -540,7 +541,7 @@
       }
     }
     if (nodes.size() == 0) {
-      // no replicas - go local
+      // no replicas
       return null;
     }
 
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/DataInputInputStream.java b/solr/solrj/src/java/org/apache/solr/common/util/DataInputInputStream.java
new file mode 100644
index 0000000..d412f40
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/common/util/DataInputInputStream.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.common.util;
+
+import java.io.DataInput;
+import java.io.InputStream;
+
+/**
+ * An abstract DataInput that extends InputStream
+ */
+public abstract class DataInputInputStream extends InputStream implements DataInput {
+}
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/FastInputStream.java b/solr/solrj/src/java/org/apache/solr/common/util/FastInputStream.java
index 0c463e4..8a2ecee 100755
--- a/solr/solrj/src/java/org/apache/solr/common/util/FastInputStream.java
+++ b/solr/solrj/src/java/org/apache/solr/common/util/FastInputStream.java
@@ -22,7 +22,7 @@
 /** Single threaded buffered InputStream
  *  Internal Solr use only, subject to change.
  */
-public class FastInputStream extends InputStream implements DataInput {
+public class FastInputStream extends DataInputInputStream {
   protected final InputStream in;
   protected final byte[] buf;
   protected int pos;
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java b/solr/solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
index 807523f..d65bdc3 100755
--- a/solr/solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
+++ b/solr/solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
@@ -113,7 +113,7 @@
   }
 
 
-  public SimpleOrderedMap<Object> readOrderedMap(FastInputStream dis) throws IOException {
+  public SimpleOrderedMap<Object> readOrderedMap(DataInputInputStream dis) throws IOException {
     int sz = readSize(dis);
     SimpleOrderedMap<Object> nl = new SimpleOrderedMap<Object>();
     for (int i = 0; i < sz; i++) {
@@ -124,7 +124,7 @@
     return nl;
   }
 
-  public NamedList<Object> readNamedList(FastInputStream dis) throws IOException {
+  public NamedList<Object> readNamedList(DataInputInputStream dis) throws IOException {
     int sz = readSize(dis);
     NamedList<Object> nl = new NamedList<Object>();
     for (int i = 0; i < sz; i++) {
@@ -164,7 +164,7 @@
 
   protected byte tagByte;
 
-  public Object readVal(FastInputStream dis) throws IOException {
+  public Object readVal(DataInputInputStream dis) throws IOException {
     tagByte = dis.readByte();
 
     // if ((tagByte & 0xe0) == 0) {
@@ -304,7 +304,7 @@
     daos.write(arr, offset, len);
   }
 
-  public byte[] readByteArray(FastInputStream dis) throws IOException {
+  public byte[] readByteArray(DataInputInputStream dis) throws IOException {
     byte[] arr = new byte[readVInt(dis)];
     dis.readFully(arr);
     return arr;
@@ -321,7 +321,7 @@
     }
   }
 
-  public SolrDocument readSolrDocument(FastInputStream dis) throws IOException {
+  public SolrDocument readSolrDocument(DataInputInputStream dis) throws IOException {
     NamedList nl = (NamedList) readVal(dis);
     SolrDocument doc = new SolrDocument();
     for (int i = 0; i < nl.size(); i++) {
@@ -332,7 +332,7 @@
     return doc;
   }
 
-  public SolrDocumentList readSolrDocumentList(FastInputStream dis) throws IOException {
+  public SolrDocumentList readSolrDocumentList(DataInputInputStream dis) throws IOException {
     SolrDocumentList solrDocs = new SolrDocumentList();
     List list = (List) readVal(dis);
     solrDocs.setNumFound((Long) list.get(0));
@@ -356,7 +356,7 @@
     writeArray(docs);
   }
 
-  public SolrInputDocument readSolrInputDocument(FastInputStream dis) throws IOException {
+  public SolrInputDocument readSolrInputDocument(DataInputInputStream dis) throws IOException {
     int sz = readVInt(dis);
     float docBoost = (Float)readVal(dis);
     SolrInputDocument sdoc = new SolrInputDocument();
@@ -390,7 +390,7 @@
   }
 
 
-  public Map<Object,Object> readMap(FastInputStream dis)
+  public Map<Object,Object> readMap(DataInputInputStream dis)
           throws IOException {
     int sz = readVInt(dis);
     Map<Object,Object> m = new LinkedHashMap<Object,Object>();
@@ -411,7 +411,7 @@
     writeVal(END_OBJ);
   }
 
-  public List<Object> readIterator(FastInputStream fis) throws IOException {
+  public List<Object> readIterator(DataInputInputStream fis) throws IOException {
     ArrayList<Object> l = new ArrayList<Object>();
     while (true) {
       Object o = readVal(fis);
@@ -444,7 +444,7 @@
     }
   }
 
-  public List<Object> readArray(FastInputStream dis) throws IOException {
+  public List<Object> readArray(DataInputInputStream dis) throws IOException {
     int sz = readSize(dis);
     ArrayList<Object> l = new ArrayList<Object>(sz);
     for (int i = 0; i < sz; i++) {
@@ -473,7 +473,7 @@
   byte[] bytes;
   CharArr arr = new CharArr();
 
-  public String readStr(FastInputStream dis) throws IOException {
+  public String readStr(DataInputInputStream dis) throws IOException {
     int sz = readSize(dis);
     if (bytes == null || bytes.length < sz) bytes = new byte[sz];
     dis.readFully(bytes, 0, sz);
@@ -501,7 +501,7 @@
     }
   }
 
-  public int readSmallInt(FastInputStream dis) throws IOException {
+  public int readSmallInt(DataInputInputStream dis) throws IOException {
     int v = tagByte & 0x0F;
     if ((tagByte & 0x10) != 0)
       v = (readVInt(dis) << 4) | v;
@@ -525,7 +525,7 @@
     }
   }
 
-  public long readSmallLong(FastInputStream dis) throws IOException {
+  public long readSmallLong(DataInputInputStream dis) throws IOException {
     long v = tagByte & 0x0F;
     if ((tagByte & 0x10) != 0)
       v = (readVLong(dis) << 4) | v;
@@ -607,7 +607,7 @@
   }
 
 
-  public int readSize(FastInputStream in) throws IOException {
+  public int readSize(DataInputInputStream in) throws IOException {
     int sz = tagByte & 0x1f;
     if (sz == 0x1f) sz += readVInt(in);
     return sz;
@@ -634,7 +634,7 @@
    *
    * @throws IOException If there is a low-level I/O error.
    */
-  public static int readVInt(FastInputStream in) throws IOException {
+  public static int readVInt(DataInputInputStream in) throws IOException {
     byte b = in.readByte();
     int i = b & 0x7F;
     for (int shift = 7; (b & 0x80) != 0; shift += 7) {
@@ -653,7 +653,7 @@
     out.writeByte((byte) i);
   }
 
-  public static long readVLong(FastInputStream in) throws IOException {
+  public static long readVLong(DataInputInputStream in) throws IOException {
     byte b = in.readByte();
     long i = b & 0x7F;
     for (int shift = 7; (b & 0x80) != 0; shift += 7) {
@@ -683,7 +683,7 @@
 
   }
 
-  public String readExternString(FastInputStream fis) throws IOException {
+  public String readExternString(DataInputInputStream fis) throws IOException {
     int idx = readSize(fis);
     if (idx != 0) {// idx != 0 is the index of the extern string
       return stringsList.get(idx - 1);
diff --git a/solr/solrj/src/test-files/solrj/log4j.properties b/solr/solrj/src/test-files/solrj/log4j.properties
index fbc817f..9b74a5f 100644
--- a/solr/solrj/src/test-files/solrj/log4j.properties
+++ b/solr/solrj/src/test-files/solrj/log4j.properties
@@ -7,3 +7,4 @@
 log4j.appender.CONSOLE.layout.ConversionPattern=%-5p - %d{yyyy-MM-dd HH:mm:ss.SSS}; %C; %m\n
 
 log4j.logger.org.apache.zookeeper=WARN
+log4j.logger.org.apache.hadoop=WARN
diff --git a/solr/solrj/src/test-files/solrj/solr/collection1/conf/solrconfig-slave1.xml b/solr/solrj/src/test-files/solrj/solr/collection1/conf/solrconfig-slave1.xml
index 76db378..99bec04 100644
--- a/solr/solrj/src/test-files/solrj/solr/collection1/conf/solrconfig-slave1.xml
+++ b/solr/solrj/src/test-files/solrj/solr/collection1/conf/solrconfig-slave1.xml
@@ -19,11 +19,12 @@
 
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+  </indexConfig>
   <dataDir>${solr.data.dir:}</dataDir>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
 
-  <!-- <indexConfig> section could go here, but we want the defaults -->
-
   <updateHandler class="solr.DirectUpdateHandler2">
   </updateHandler>
 
diff --git a/solr/solrj/src/test-files/solrj/solr/collection1/conf/solrconfig.xml b/solr/solrj/src/test-files/solrj/solr/collection1/conf/solrconfig.xml
index f1a31de..7f683e5 100644
--- a/solr/solrj/src/test-files/solrj/solr/collection1/conf/solrconfig.xml
+++ b/solr/solrj/src/test-files/solrj/solr/collection1/conf/solrconfig.xml
@@ -22,6 +22,9 @@
 -->
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+  </indexConfig>
    <dataDir>${solr.data.dir:}</dataDir>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.StandardDirectoryFactory}"/>
 
diff --git a/solr/solrj/src/test-files/solrj/solr/shared/conf/solrconfig.xml b/solr/solrj/src/test-files/solrj/solr/shared/conf/solrconfig.xml
index 8cda0e6..31d8f1d 100644
--- a/solr/solrj/src/test-files/solrj/solr/shared/conf/solrconfig.xml
+++ b/solr/solrj/src/test-files/solrj/solr/shared/conf/solrconfig.xml
@@ -22,6 +22,9 @@
 -->
 <config>
   <luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
+  <indexConfig>
+    <useCompoundFile>${useCompoundFile:false}</useCompoundFile>
+  </indexConfig>
   <dataDir>${tempDir}/data/${l10n:}-${version:}</dataDir>
   <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
 
diff --git a/solr/solrj/src/test-files/solrj/solr/shared/solr.xml b/solr/solrj/src/test-files/solrj/solr/shared/solr.xml
index 48e091b..55c18eb 100644
--- a/solr/solrj/src/test-files/solrj/solr/shared/solr.xml
+++ b/solr/solrj/src/test-files/solrj/solr/shared/solr.xml
@@ -30,7 +30,7 @@
   adminPath: RequestHandler path to manage cores.  
     If 'null' (or absent), cores will not be manageable via REST
   -->
-  <cores adminPath="/admin/cores" defaultCoreName="core0" host="127.0.0.1" hostPort="${hostPort:8983}" hostContext="${hostContext:solr}" zkClientTimeout="8000">
+  <cores adminPath="/admin/cores" defaultCoreName="core0" host="127.0.0.1" hostPort="${hostPort:8983}" hostContext="${hostContext:solr}" zkClientTimeout="8000" genericCoreNodeNames="${genericCoreNodeNames:true}">
     <core name="collection1" instanceDir="." />
     <core name="core0" instanceDir="${theInstanceDir:./}" dataDir="${dataDir1}" collection="${collection:acollection}">
       <property name="version" value="3.5"/>
diff --git a/solr/solrj/src/test-files/solrj/solr/solr.xml b/solr/solrj/src/test-files/solrj/solr/solr.xml
index 44d0f78..73d1111 100644
--- a/solr/solrj/src/test-files/solrj/solr/solr.xml
+++ b/solr/solrj/src/test-files/solrj/solr/solr.xml
@@ -28,7 +28,7 @@
   adminPath: RequestHandler path to manage cores.  
     If 'null' (or absent), cores will not be manageable via request handler
   -->
-  <cores adminPath="/admin/cores" defaultCoreName="collection1" host="127.0.0.1" hostPort="${hostPort:8983}" hostContext="${hostContext:solr}" zkClientTimeout="8000" numShards="${numShards:3}">
+  <cores adminPath="/admin/cores" defaultCoreName="collection1" host="127.0.0.1" hostPort="${hostPort:8983}" hostContext="${hostContext:solr}" zkClientTimeout="8000" genericCoreNodeNames="${genericCoreNodeNames:true}">
     <core name="collection1" instanceDir="collection1" shard="${shard:}" collection="${collection:collection1}" config="${solrconfig:solrconfig.xml}" schema="${schema:schema.xml}"/>
   </cores>
 </solr>
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/MergeIndexesExampleTestBase.java b/solr/solrj/src/test/org/apache/solr/client/solrj/MergeIndexesExampleTestBase.java
index 55feeae..c16f117 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/MergeIndexesExampleTestBase.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/MergeIndexesExampleTestBase.java
@@ -17,10 +17,6 @@
 
 package org.apache.solr.client.solrj;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.Arrays;
-
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
 import org.apache.solr.client.solrj.request.CoreAdminRequest;
@@ -32,9 +28,12 @@
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.util.ExternalPaths;
-import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+
 /**
  * Abstract base class for testing merge indexes command
  *
@@ -42,7 +41,8 @@
  *
  */
 public abstract class MergeIndexesExampleTestBase extends SolrExampleTestBase {
-  protected static CoreContainer cores;
+
+  protected CoreContainer cores;
   private String saveProp;
   private File dataDir2;
 
@@ -56,13 +56,12 @@
     if (dataDir == null) {
       createTempDir();
     }
-    cores = new CoreContainer();
   }
-  
-  @AfterClass
-  public static void afterClass() {
-    cores.shutdown();
-    cores = null;
+
+  protected void setupCoreContainer() {
+    cores = new CoreContainer(getSolrHome());
+    cores.load();
+    //cores = CoreContainer.createAndLoad(getSolrHome(), new File(TEMP_DIR, "solr.xml"));
   }
   
   @Override
@@ -71,17 +70,19 @@
     System.setProperty("solr.directoryFactory", "solr.StandardDirectoryFactory");
     super.setUp();
 
-    SolrCore.log.info("CORES=" + cores + " : " + cores.getCoreNames());
-    cores.setPersistent(false);
-    
     // setup datadirs
-    System.setProperty( "solr.core0.data.dir", SolrTestCaseJ4.dataDir.getCanonicalPath() ); 
-    
+    System.setProperty( "solr.core0.data.dir", SolrTestCaseJ4.dataDir.getCanonicalPath() );
+
     dataDir2 = new File(TEMP_DIR, getClass().getName() + "-"
         + System.currentTimeMillis());
     dataDir2.mkdirs();
-    
-    System.setProperty( "solr.core1.data.dir", this.dataDir2.getCanonicalPath() ); 
+
+    System.setProperty( "solr.core1.data.dir", this.dataDir2.getCanonicalPath() );
+
+    setupCoreContainer();
+    SolrCore.log.info("CORES=" + cores + " : " + cores.getCoreNames());
+    cores.setPersistent(false);
+
   }
 
   @Override
@@ -96,6 +97,8 @@
         System.err.println("!!!! WARNING: best effort to remove " + dataDir2.getAbsolutePath() + " FAILED !!!!!");
       }
     }
+
+    cores.shutdown();
     
     if (saveProp == null) System.clearProperty("solr.directoryFactory");
     else System.setProperty("solr.directoryFactory", saveProp);
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/MultiCoreExampleTestBase.java b/solr/solrj/src/test/org/apache/solr/client/solrj/MultiCoreExampleTestBase.java
index 48fe1d9..15f2128 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/MultiCoreExampleTestBase.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/MultiCoreExampleTestBase.java
@@ -17,8 +17,6 @@
 
 package org.apache.solr.client.solrj;
 
-import java.io.File;
-
 import org.apache.solr.client.solrj.request.AbstractUpdateRequest.ACTION;
 import org.apache.solr.client.solrj.request.CoreAdminRequest;
 import org.apache.solr.client.solrj.request.CoreAdminRequest.Unload;
@@ -30,10 +28,10 @@
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.util.ExternalPaths;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
+import java.io.File;
+
 
 /**
  *
@@ -41,30 +39,21 @@
  */
 public abstract class MultiCoreExampleTestBase extends SolrExampleTestBase 
 {
-  protected static CoreContainer cores;
+  protected CoreContainer cores;
 
   private File dataDir2;
   private File dataDir1;
 
   @Override public String getSolrHome() { return ExternalPaths.EXAMPLE_MULTICORE_HOME; }
 
-  
-  @BeforeClass
-  public static void beforeThisClass2() throws Exception {
+  protected void setupCoreContainer() {
     cores = new CoreContainer();
-  }
-  
-  @AfterClass
-  public static void afterClass() {
-    cores.shutdown();
+    cores.load();
   }
   
   @Override public void setUp() throws Exception {
     super.setUp();
 
-    SolrCore.log.info("CORES=" + cores + " : " + cores.getCoreNames());
-    cores.setPersistent(false);
-    
     dataDir1 = new File(TEMP_DIR, getClass().getName() + "-core0-"
         + System.currentTimeMillis());
     dataDir1.mkdirs();
@@ -74,7 +63,14 @@
     dataDir2.mkdirs();
     
     System.setProperty( "solr.core0.data.dir", this.dataDir1.getCanonicalPath() ); 
-    System.setProperty( "solr.core1.data.dir", this.dataDir2.getCanonicalPath() ); 
+    System.setProperty( "solr.core1.data.dir", this.dataDir2.getCanonicalPath() );
+
+    setupCoreContainer();
+
+    SolrCore.log.info("CORES=" + cores + " : " + cores.getCoreNames());
+    cores.setPersistent(false);
+
+
   }
   
   @Override
@@ -89,6 +85,8 @@
         System.err.println("!!!! WARNING: best effort to remove " + dataDir2.getAbsolutePath() + " FAILED !!!!!");
       }
     }
+
+    cores.shutdown();
   }
 
   @Override
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/AbstractEmbeddedSolrServerTestCase.java b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/AbstractEmbeddedSolrServerTestCase.java
index 2b7a18b..dfa2e9a 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/AbstractEmbeddedSolrServerTestCase.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/AbstractEmbeddedSolrServerTestCase.java
@@ -51,6 +51,7 @@
     super.setUp();
 
     System.setProperty("solr.solr.home", SOLR_HOME.getAbsolutePath());
+    System.out.println("Solr home: " + SOLR_HOME.getAbsolutePath());
 
     //The index is always stored within a temporary directory
     createTempDir();
@@ -61,7 +62,8 @@
     System.setProperty("dataDir2", dataDir2.getAbsolutePath());
     System.setProperty("tempDir", tempDir.getAbsolutePath());
     System.setProperty("tests.shardhandler.randomSeed", Long.toString(random().nextLong()));
-    cores = new CoreContainer(SOLR_HOME.getAbsolutePath(), getSolrXml());
+    cores = CoreContainer.createAndLoad(SOLR_HOME.getAbsolutePath(), getSolrXml());
+    cores.setPersistent(false);
   }
   
   protected abstract File getSolrXml() throws Exception;
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/MergeIndexesEmbeddedTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/MergeIndexesEmbeddedTest.java
index 52fc425..75239ab 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/MergeIndexesEmbeddedTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/MergeIndexesEmbeddedTest.java
@@ -17,8 +17,6 @@
 
 package org.apache.solr.client.solrj.embedded;
 
-import java.io.File;
-
 import org.apache.solr.client.solrj.MergeIndexesExampleTestBase;
 import org.apache.solr.client.solrj.SolrServer;
 import org.apache.solr.core.SolrCore;
@@ -36,10 +34,6 @@
     // TODO: fix this test to use MockDirectoryFactory
     System.clearProperty("solr.directoryFactory");
     super.setUp();
-
-    File home = new File(getSolrHome());
-    File f = new File(home, "solr.xml");
-    cores.load(getSolrHome(), f);
   }
 
   @Override
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/MultiCoreEmbeddedTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/MultiCoreEmbeddedTest.java
index c3a1fb7..427a10d 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/MultiCoreEmbeddedTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/MultiCoreEmbeddedTest.java
@@ -17,8 +17,6 @@
 
 package org.apache.solr.client.solrj.embedded;
 
-import java.io.File;
-
 import org.apache.solr.client.solrj.MultiCoreExampleTestBase;
 import org.apache.solr.client.solrj.SolrServer;
 
@@ -35,10 +33,6 @@
     // TODO: fix this test to use MockDirectoryFactory
     System.clearProperty("solr.directoryFactory");
     super.setUp();
-    
-    File home = new File( getSolrHome() );
-    File f = new File( home, "solr.xml" );
-    cores.load( getSolrHome(), f );
   }
 
   @Override
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java
index 71a0716..ede1660 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java
@@ -17,20 +17,16 @@
 
 package org.apache.solr.client.solrj.embedded;
 
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.InputStreamReader;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.xpath.*;
-
+import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
+import com.google.common.io.ByteStreams;
 import org.apache.commons.io.IOUtils;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServer;
 import org.apache.solr.client.solrj.request.AbstractUpdateRequest.ACTION;
-import org.apache.solr.client.solrj.request.*;
+import org.apache.solr.client.solrj.request.CoreAdminRequest;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.CoreAdminResponse;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.core.CoreContainer;
@@ -46,7 +42,17 @@
 import org.w3c.dom.Document;
 import org.w3c.dom.Node;
 
-import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.xpath.XPath;
+import javax.xml.xpath.XPathConstants;
+import javax.xml.xpath.XPathExpressionException;
+import javax.xml.xpath.XPathFactory;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
 
 /**
  *
@@ -75,6 +81,17 @@
   public void tearDown() throws Exception {
    super.tearDown(); 
   }
+
+  private static void dumpFile(File fileToDump) throws IOException {
+    System.out.println("Dumping " + fileToDump.getAbsolutePath());
+    InputStream is = new FileInputStream(fileToDump);
+    try {
+      ByteStreams.copy(is, System.out);
+    }
+    finally {
+      IOUtils.closeQuietly(is);
+    }
+  }
   
   @Override
   protected File getSolrXml() throws Exception {
@@ -107,6 +124,10 @@
 
   @Test
   public void testProperties() throws Exception {
+
+    String persistedSolrXml = new File(tempDir, SOLR_PERSIST_XML).getAbsolutePath();
+    log.info("persistedSolrXml: {}", persistedSolrXml);
+
     UpdateRequest up = new UpdateRequest();
     up.setAction(ACTION.COMMIT, true, true);
     up.deleteByQuery("*:*");
@@ -176,14 +197,14 @@
     long after = mcr.getStartTime(name).getTime();
     assertTrue("should have more recent time: " + after + "," + before, after > before);
 
-    mcr = CoreAdminRequest.persist(SOLR_PERSIST_XML, coreadmin);
+    mcr = CoreAdminRequest.persist(persistedSolrXml, coreadmin);
 
     DocumentBuilder builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
-    FileInputStream fis = new FileInputStream(new File(tempDir, SOLR_PERSIST_XML));
+    FileInputStream fis = new FileInputStream(new File(persistedSolrXml));
     try {
       Document document = builder.parse(fis);
       fis.close();
-      fis = new FileInputStream(new File(tempDir, SOLR_PERSIST_XML));
+      fis = new FileInputStream(new File(persistedSolrXml));
       String solrPersistXml = IOUtils.toString(new InputStreamReader(fis, "UTF-8"));
       //System.out.println("xml:" + solrPersistXml);
       assertTrue("\"/solr/cores[@defaultCoreName='core0']\" doesn't match in:\n" + solrPersistXml,
@@ -196,21 +217,22 @@
                  exists("/solr/cores[@zkClientTimeout='8000']", document));
       assertTrue("\"/solr/cores[@hostContext='${hostContext:solr}']\" doesn't match in:\n" + solrPersistXml,
                  exists("/solr/cores[@hostContext='${hostContext:solr}']", document));
-      
+      assertTrue("\"/solr/cores[@genericCoreNodeNames='${genericCoreNodeNames:true}']\" doesn't match in:\n" + solrPersistXml,
+          exists("/solr/cores[@genericCoreNodeNames='${genericCoreNodeNames:true}']", document));
     } finally {
       fis.close();
     }
     
     CoreAdminRequest.renameCore(name, "renamed_core", coreadmin);
     
-    mcr = CoreAdminRequest.persist(SOLR_PERSIST_XML, getRenamedSolrAdmin());
+    mcr = CoreAdminRequest.persist(persistedSolrXml, getRenamedSolrAdmin());
     
 //    fis = new FileInputStream(new File(tempDir, SOLR_PERSIST_XML));
 //    String solrPersistXml = IOUtils.toString(fis);
 //    System.out.println("xml:" + solrPersistXml);
 //    fis.close();
     
-    fis = new FileInputStream(new File(tempDir, SOLR_PERSIST_XML));
+    fis = new FileInputStream(new File(persistedSolrXml));
     try {
       Document document = builder.parse(fis);
       assertTrue(exists("/solr/cores/core[@name='renamed_core']", document));
@@ -234,17 +256,17 @@
 //    System.out.println("xml:" + solrPersistXml);
 //    fis.close();
     
-    mcr = CoreAdminRequest.persist(SOLR_PERSIST_XML, getRenamedSolrAdmin());
+    mcr = CoreAdminRequest.persist(persistedSolrXml, getRenamedSolrAdmin());
     
 //    fis = new FileInputStream(new File(solrXml.getParent(), SOLR_PERSIST_XML));
 //    solrPersistXml = IOUtils.toString(fis);
 //    System.out.println("xml:" + solrPersistXml);
 //    fis.close();
     
-    fis = new FileInputStream(new File(tempDir, SOLR_PERSIST_XML));
+    fis = new FileInputStream(new File(persistedSolrXml));
     try {
       Document document = builder.parse(fis);
-      assertTrue(exists("/solr/cores/core[@name='collection1' and (@instanceDir='./' or @instanceDir='.\\')]", document));
+      assertTrue(exists("/solr/cores/core[@name='collection1' and @instanceDir='.']", document));
     } finally {
       fis.close();
     }
@@ -258,9 +280,9 @@
 //   System.out.println("xml:" + solrPersistXml);
 //   fis.close();
     
-    cores = new CoreContainer(SOLR_HOME.getAbsolutePath(), new File(tempDir, SOLR_PERSIST_XML));
+    cores = CoreContainer.createAndLoad(SOLR_HOME.getAbsolutePath(), new File(persistedSolrXml));
  
-    mcr = CoreAdminRequest.persist(SOLR_PERSIST_XML, getRenamedSolrAdmin());
+    //mcr = CoreAdminRequest.persist(SOLR_PERSIST_XML, getRenamedSolrAdmin());
     
 //     fis = new FileInputStream(new File(solrXml.getParent(),
 //     SOLR_PERSIST_XML));
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BasicHttpSolrServerTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BasicHttpSolrServerTest.java
index 9f2af65..7ff8ed9 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BasicHttpSolrServerTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BasicHttpSolrServerTest.java
@@ -495,6 +495,22 @@
     client.getConnectionManager().shutdown();
   }
 
+  /**
+   * A trivial test that verifies the example keystore used for SSL testing can be 
+   * found using the base class. this helps future-proof against hte possibility of 
+   * something moving/breaking thekeystore path in a way that results in the SSL 
+   * randomization logic being forced to silently never use SSL.  (We can't enforce 
+   * this type of check in the base class because then it would not be usable by client 
+   * code depending on the test framework
+   */
+  public void testExampleKeystorePath() {
+    assertNotNull("Example keystore is null, meaning that something has changed in the " +
+                  "structure of the example configs and/or ExternalPaths.java - " + 
+                  "SSL randomization is broken",
+                  getExampleKeystoreFile());
+  }
+
+
   private int findUnusedPort() {
     for (int port = 0; port < 65535; port++) {
       Socket s = new Socket();
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrServerTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrServerTest.java
index c6cb68f..3f99eaa 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrServerTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrServerTest.java
@@ -102,35 +102,39 @@
 
     indexr(id, 0, "a_t", "to come to the aid of their country.");
     
-    // compare leaders list
     CloudJettyRunner shard1Leader = shardToLeaderJetty.get("shard1");
     CloudJettyRunner shard2Leader = shardToLeaderJetty.get("shard2");
-    assertEquals(2, cloudClient.getLeaderUrlLists().get("collection1").size());
-    HashSet<String> leaderUrlSet = new HashSet<String>();
-    leaderUrlSet.addAll(cloudClient.getLeaderUrlLists().get("collection1"));
-    assertTrue("fail check for leader:" + shard1Leader.url + " in "
-        + leaderUrlSet, leaderUrlSet.contains(shard1Leader.url + "/"));
-    assertTrue("fail check for leader:" + shard2Leader.url + " in "
-        + leaderUrlSet, leaderUrlSet.contains(shard2Leader.url + "/"));
-
-    // compare replicas list
-    Set<String> replicas = new HashSet<String>();
-    List<CloudJettyRunner> jetties = shardToJetty.get("shard1");
-    for (CloudJettyRunner cjetty : jetties) {
-      replicas.add(cjetty.url);
-    }
-    jetties = shardToJetty.get("shard2");
-    for (CloudJettyRunner cjetty : jetties) {
-      replicas.add(cjetty.url);
-    }
-    replicas.remove(shard1Leader.url);
-    replicas.remove(shard2Leader.url);
     
-    assertEquals(replicas.size(), cloudClient.getReplicasLists().get("collection1").size());
-    
-    for (String url : cloudClient.getReplicasLists().get("collection1")) {
-      assertTrue("fail check for replica:" + url + " in " + replicas,
-          replicas.contains(stripTrailingSlash(url)));
+    if (cloudClient.isUpdatesToLeaders()) {
+      // compare leaders list
+      assertEquals(2, cloudClient.getLeaderUrlLists().get("collection1").size());
+      HashSet<String> leaderUrlSet = new HashSet<String>();
+      leaderUrlSet.addAll(cloudClient.getLeaderUrlLists().get("collection1"));
+      assertTrue("fail check for leader:" + shard1Leader.url + " in "
+          + leaderUrlSet, leaderUrlSet.contains(shard1Leader.url + "/"));
+      assertTrue("fail check for leader:" + shard2Leader.url + " in "
+          + leaderUrlSet, leaderUrlSet.contains(shard2Leader.url + "/"));
+      
+      // compare replicas list
+      Set<String> replicas = new HashSet<String>();
+      List<CloudJettyRunner> jetties = shardToJetty.get("shard1");
+      for (CloudJettyRunner cjetty : jetties) {
+        replicas.add(cjetty.url);
+      }
+      jetties = shardToJetty.get("shard2");
+      for (CloudJettyRunner cjetty : jetties) {
+        replicas.add(cjetty.url);
+      }
+      replicas.remove(shard1Leader.url);
+      replicas.remove(shard2Leader.url);
+      
+      assertEquals(replicas.size(),
+          cloudClient.getReplicasLists().get("collection1").size());
+      
+      for (String url : cloudClient.getReplicasLists().get("collection1")) {
+        assertTrue("fail check for replica:" + url + " in " + replicas,
+            replicas.contains(stripTrailingSlash(url)));
+      }
     }
     
   }
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestCoreAdmin.java b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestCoreAdmin.java
index e432bdc..681e5f9b 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestCoreAdmin.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestCoreAdmin.java
@@ -17,8 +17,8 @@
 
 package org.apache.solr.client.solrj.request;
 
-import java.io.File;
-
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import org.apache.commons.io.FileUtils;
 import org.apache.solr.SolrIgnoredThreadsFilter;
 import org.apache.solr.client.solrj.SolrServer;
 import org.apache.solr.client.solrj.embedded.AbstractEmbeddedSolrServerTestCase;
@@ -27,14 +27,13 @@
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.SolrCore;
-import org.apache.commons.io.FileUtils;
 import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import java.io.File;
 
 @ThreadLeakFilters(defaultFilters = true, filters = {SolrIgnoredThreadsFilter.class})
 public class TestCoreAdmin extends AbstractEmbeddedSolrServerTestCase {
@@ -67,6 +66,8 @@
     
     File tmp = new File(TEMP_DIR, "solrtest-" + getTestClass().getSimpleName() + "-" + System.currentTimeMillis());
     tmp.mkdirs();
+
+    log.info("Creating cores underneath {}", tmp);
     
     File dataDir = new File(tmp, this.getTestName()
         + System.currentTimeMillis() + "-" + "data");
@@ -103,7 +104,7 @@
 
     File logDir;
     try {
-      logDir = core.getUpdateHandler().getUpdateLog().getLogDir();
+      logDir = new File(core.getUpdateHandler().getUpdateLog().getLogDir());
     } finally {
       coreProveIt.close();
       core.close();
diff --git a/solr/test-framework/ivy.xml b/solr/test-framework/ivy.xml
index 3de0822..461d1cd 100644
--- a/solr/test-framework/ivy.xml
+++ b/solr/test-framework/ivy.xml
@@ -16,7 +16,10 @@
    specific language governing permissions and limitations
    under the License.    
 -->
-<ivy-module version="2.0">
+<!DOCTYPE ivy-module [
+  <!ENTITY hadoop.version "2.0.5-alpha">
+]>
+<ivy-module version="2.0" xmlns:m="http://ant.apache.org/ivy/maven">
     <info organisation="org.apache.solr" module="solr-test-framework"/>
 
     <configurations>
@@ -31,10 +34,24 @@
     <dependencies defaultconf="default">
       <dependency org="org.apache.ant" name="ant" rev="1.8.2" transitive="false" />
 
-      <dependency org="junit" name="junit" rev="4.10" transitive="false" conf="default->*;junit4-stdalone->*" />
+      <dependency org="junit" name="junit" rev="4.10" transitive="false" conf="default->*;junit4-stdalone->*">
+        <exclude org="org.hamcrest" module="hamcrest-core"/>
+      </dependency>
       <dependency org="com.carrotsearch.randomizedtesting" name="junit4-ant" rev="2.0.10" transitive="false" conf="default->*;junit4-stdalone->*" />
       <dependency org="com.carrotsearch.randomizedtesting" name="randomizedtesting-runner" rev="2.0.10" transitive="false" conf="default->*;junit4-stdalone->*" />
 
+      <!-- Hadoop DfsMiniCluster Dependencies-->
+      <dependency org="org.apache.hadoop" name="hadoop-common" transitive="false" rev="&hadoop.version;" conf="default->*;junit4-stdalone->*">
+        <artifact name="hadoop-common" type="tests" ext="jar" m:classifier="tests" />
+      </dependency>
+      <dependency org="org.apache.hadoop" name="hadoop-hdfs" transitive="false" rev="&hadoop.version;" conf="default->*;junit4-stdalone->*">
+        <artifact name="hadoop-hdfs" type="tests" ext="jar" m:classifier="tests" />
+      </dependency>
+      <dependency org="org.mortbay.jetty" name="jetty" rev="6.1.26" transitive="false"/>
+      <dependency org="org.mortbay.jetty" name="jetty-util" rev="6.1.26" transitive="false"/>
+      <dependency org="com.sun.jersey" name="jersey-core" rev="1.16" transitive="false"/>
+      <dependency org="commons-collections" name="commons-collections" rev="3.2.1" transitive="false"/>  
+      
       <exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/> 
     </dependencies>
 </ivy-module>
diff --git a/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java b/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java
index 743240a..1a8ebf4 100644
--- a/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java
+++ b/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java
@@ -45,6 +45,7 @@
 import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.UpdateResponse;
+import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrInputDocument;
@@ -424,6 +425,9 @@
     indexDoc(doc);
   }
 
+  /**
+   * Indexes the document in both the control client, and a randomly selected client
+   */
   protected void indexDoc(SolrInputDocument doc) throws IOException, SolrServerException {
     controlClient.add(doc);
 
@@ -432,6 +436,17 @@
     client.add(doc);
   }
   
+  /**
+   * Indexes the document in both the control client and the specified client asserting
+   * that the respones are equivilent
+   */
+  protected UpdateResponse indexDoc(SolrServer server, SolrParams params, SolrInputDocument... sdocs) throws IOException, SolrServerException {
+    UpdateResponse controlRsp = add(controlClient, params, sdocs);
+    UpdateResponse specificRsp = add(server, params, sdocs);
+    compareSolrResponses(specificRsp, controlRsp);
+    return specificRsp;
+  }
+
   protected UpdateResponse add(SolrServer server, SolrParams params, SolrInputDocument... sdocs) throws IOException, SolrServerException {
     UpdateRequest ureq = new UpdateRequest();
     ureq.setParams(new ModifiableSolrParams(params));
@@ -546,6 +561,9 @@
   }
   
   public QueryResponse queryAndCompare(SolrParams params, SolrServer... servers) throws SolrServerException {
+    return queryAndCompare(params, Arrays.<SolrServer>asList(servers));
+  }
+  public QueryResponse queryAndCompare(SolrParams params, Iterable<SolrServer> servers) throws SolrServerException {
     QueryResponse first = null;
     for (SolrServer server : servers) {
       QueryResponse rsp = server.query(new ModifiableSolrParams(params));
@@ -783,8 +801,14 @@
     return null;
   }
 
+  protected void compareSolrResponses(SolrResponse a, SolrResponse b) {
+    String cmp = compare(a.getResponse(), b.getResponse(), flags, handle);
+    if (cmp != null) {
+      log.error("Mismatched responses:\n" + a + "\n" + b);
+      Assert.fail(cmp);
+    }
+  }
   protected void compareResponses(QueryResponse a, QueryResponse b) {
-    String cmp;
     if (System.getProperty("remove.version.field") != null) {
       // we don't care if one has a version and the other doesnt -
       // control vs distrib
@@ -800,11 +824,7 @@
         }
       }
     }
-    cmp = compare(a.getResponse(), b.getResponse(), flags, handle);
-    if (cmp != null) {
-      log.error("Mismatched responses:\n" + a + "\n" + b);
-      Assert.fail(cmp);
-    }
+    compareSolrResponses(a, b);
   }
 
   @Test
diff --git a/solr/test-framework/src/java/org/apache/solr/SolrJettyTestBase.java b/solr/test-framework/src/java/org/apache/solr/SolrJettyTestBase.java
index 3d619c4..feae82a 100755
--- a/solr/test-framework/src/java/org/apache/solr/SolrJettyTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/SolrJettyTestBase.java
@@ -44,18 +44,32 @@
   public String getSolrHome() { return ExternalPaths.EXAMPLE_HOME; }
 
   private static boolean manageSslProps = true;
-  private static final File TEST_KEYSTORE = new File(ExternalPaths.SOURCE_HOME, 
-                                                     "example/etc/solrtest.keystore");
+  private static File TEST_KEYSTORE;
   private static final Map<String,String> SSL_PROPS = new HashMap<String,String>();
   static {
+    TEST_KEYSTORE = (null == ExternalPaths.SOURCE_HOME)
+      ? null : new File(ExternalPaths.SOURCE_HOME, "example/etc/solrtest.keystore");
+    String keystorePath = null == TEST_KEYSTORE ? null : TEST_KEYSTORE.getAbsolutePath();
+
     SSL_PROPS.put("tests.jettySsl","false");
     SSL_PROPS.put("tests.jettySsl.clientAuth","false");
-    SSL_PROPS.put("javax.net.ssl.keyStore", TEST_KEYSTORE.getAbsolutePath());
+    SSL_PROPS.put("javax.net.ssl.keyStore", keystorePath);
     SSL_PROPS.put("javax.net.ssl.keyStorePassword","secret");
-    SSL_PROPS.put("javax.net.ssl.trustStore", TEST_KEYSTORE.getAbsolutePath());
+    SSL_PROPS.put("javax.net.ssl.trustStore", keystorePath);
     SSL_PROPS.put("javax.net.ssl.trustStorePassword","secret");
   }
 
+  /**
+   * Returns the File object for the example keystore used when this baseclass randomly 
+   * uses SSL.  May be null ifthis test does not appear to be running as part of the 
+   * standard solr distribution and does not have access to the example configs.
+   *
+   * @lucene.internal 
+   */
+  protected static File getExampleKeystoreFile() {
+    return TEST_KEYSTORE;
+  }
+
   @BeforeClass
   public static void beforeSolrJettyTestBase() throws Exception {
 
@@ -63,20 +77,27 @@
     final boolean trySsl = random().nextBoolean();
     final boolean trySslClientAuth = random().nextBoolean();
     
+    // only randomize SSL if we are a solr test with access to the example keystore
+    if (null == getExampleKeystoreFile()) {
+      log.info("Solr's example keystore not defined (not a solr test?) skipping SSL randomization");
+      manageSslProps = false;
+      return;
+    }
+
+    assertTrue("test keystore does not exist, randomized ssl testing broken: " +
+               getExampleKeystoreFile().getAbsolutePath(), 
+               getExampleKeystoreFile().exists() );
+    
     // only randomize SSL if none of the SSL_PROPS are already set
     final Map<Object,Object> sysprops = System.getProperties();
     for (String prop : SSL_PROPS.keySet()) {
       if (sysprops.containsKey(prop)) {
         log.info("System property explicitly set, so skipping randomized ssl properties: " + prop);
         manageSslProps = false;
-        break;
+        return;
       }
     }
 
-    assertTrue("test keystore does not exist, can't be used for randomized " +
-               "ssl testing: " + TEST_KEYSTORE.getAbsolutePath(), 
-               TEST_KEYSTORE.exists() );
-
     if (manageSslProps) {
       log.info("Randomized ssl ({}) and clientAuth ({})", trySsl, trySslClientAuth);
       for (String prop : SSL_PROPS.keySet()) {
diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
index 972e601..3eda55f 100755
--- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
+++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
@@ -17,26 +17,28 @@
 
 package org.apache.solr;
 
-import java.io.*;
-import java.util.*;
-import java.util.logging.*;
-
-import javax.xml.xpath.XPathExpressionException;
-
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
 import org.apache.commons.io.FileUtils;
-import org.apache.lucene.util.Constants;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.QuickPatchThreadsFilter;
 import org.apache.solr.client.solrj.util.ClientUtils;
-import org.apache.solr.common.*;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.SolrInputField;
 import org.apache.solr.common.cloud.SolrZkClient;
-import org.apache.solr.common.params.*;
+import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.XML;
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.core.SolrConfig;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.handler.JsonUpdateRequestHandler;
-import org.apache.solr.request.*;
+import org.apache.solr.request.LocalSolrQueryRequest;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.request.SolrRequestHandler;
 import org.apache.solr.schema.IndexSchema;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.search.SolrIndexSearcher;
@@ -44,7 +46,10 @@
 import org.apache.solr.util.AbstractSolrTestCase;
 import org.apache.solr.util.RevertDefaultThreadHandlerRule;
 import org.apache.solr.util.TestHarness;
-import org.junit.*;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
 import org.junit.rules.RuleChain;
 import org.junit.rules.TestRule;
 import org.noggit.CharArr;
@@ -54,9 +59,22 @@
 import org.slf4j.LoggerFactory;
 import org.xml.sax.SAXException;
 
-import com.carrotsearch.randomizedtesting.RandomizedContext;
-import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
-import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
+import javax.xml.xpath.XPathExpressionException;
+import java.io.File;
+import java.io.IOException;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.logging.ConsoleHandler;
+import java.util.logging.Handler;
+import java.util.logging.Level;
+
+import static com.google.common.base.Preconditions.checkNotNull;
 
 /**
  * A junit4 Solr test harness that extends LuceneTestCaseJ4. To change which core is used when loading the schema and solrconfig.xml, simply
@@ -86,6 +104,9 @@
   @SuppressWarnings("unused")
   private static void beforeClass() {
     System.setProperty("jetty.testMode", "true");
+    
+    System.setProperty("useCompoundFile", Boolean.toString(random().nextBoolean()));
+    System.setProperty("enable.update.log", usually() ? "true" : "false");
     System.setProperty("tests.shardhandler.randomSeed", Long.toString(random().nextLong()));
     setupLogging();
     startTrackingSearchers();
@@ -104,6 +125,8 @@
     coreName = CoreContainer.DEFAULT_DEFAULT_CORE_NAME;
     System.clearProperty("jetty.testMode");
     System.clearProperty("tests.shardhandler.randomSeed");
+    System.clearProperty("enable.update.log");
+    System.clearProperty("useCompoundFile");
   }
 
   private static boolean changedFactory = false;
@@ -340,6 +363,9 @@
    * The directory used to story the index managed by the TestHarness h
    */
   protected static File dataDir;
+  
+  // hack due to File dataDir
+  protected static String hdfsDataDir;
 
   /**
    * Initializes things your test might need
@@ -390,14 +416,47 @@
   public static void createCore() {
     assertNotNull(testSolrHome);
     solrConfig = TestHarness.createConfig(testSolrHome, coreName, getSolrConfigFile());
-    h = new TestHarness( coreName,
-            dataDir.getAbsolutePath(),
+    h = new TestHarness( coreName, hdfsDataDir == null ? dataDir.getAbsolutePath() : hdfsDataDir,
             solrConfig,
             getSchemaFile());
     lrf = h.getRequestFactory
             ("standard",0,20,CommonParams.VERSION,"2.2");
   }
 
+  public static CoreContainer createCoreContainer(String solrHome, String solrXML) {
+    testSolrHome = checkNotNull(solrHome);
+    if (dataDir == null)
+      createTempDir();
+    h = new TestHarness(solrHome, solrXML);
+    lrf = h.getRequestFactory("standard", 0, 20, CommonParams.VERSION, "2.2");
+    return h.getCoreContainer();
+  }
+
+  public static CoreContainer createDefaultCoreContainer(String solrHome) {
+    testSolrHome = checkNotNull(solrHome);
+    if (dataDir == null)
+      createTempDir();
+    h = new TestHarness("collection1", dataDir.getAbsolutePath(), "solrconfig.xml", "schema.xml");
+    lrf = h.getRequestFactory("standard", 0, 20, CommonParams.VERSION, "2.2");
+    return h.getCoreContainer();
+  }
+
+  public static boolean hasInitException(String message) {
+    for (Map.Entry<String, Exception> entry : h.getCoreContainer().getCoreInitFailures().entrySet()) {
+      if (entry.getValue().getMessage().indexOf(message) != -1)
+        return true;
+    }
+    return false;
+  }
+
+  public static boolean hasInitException(Class<? extends Exception> exceptionType) {
+    for (Map.Entry<String, Exception> entry : h.getCoreContainer().getCoreInitFailures().entrySet()) {
+      if (exceptionType.isAssignableFrom(entry.getValue().getClass()))
+        return true;
+    }
+    return false;
+  }
+
   /** Subclasses that override setUp can optionally call this method
    * to log the fact that their setUp process has ended.
    */
@@ -1444,12 +1503,12 @@
     return result;
   }
 
-  public void assertXmlFile(final File file, String... xpath)
+  public static void assertXmlFile(final File file, String... xpath)
       throws IOException, SAXException {
 
     try {
       String xml = FileUtils.readFileToString(file, "UTF-8");
-      String results = h.validateXPath(xml, xpath);
+      String results = TestHarness.validateXPath(xml, xpath);
       if (null != results) {
         String msg = "File XPath failure: file=" + file.getPath() + " xpath="
             + results + "\n\nxml was: " + xml;
@@ -1465,8 +1524,9 @@
     File subHome = new File(dstRoot, "conf");
     assertTrue("Failed to make subdirectory ", dstRoot.mkdirs());
     String top = SolrTestCaseJ4.TEST_HOME() + "/collection1/conf";
-    FileUtils.copyFile(new File(top, "schema-tiny.xml"), new File(subHome, "schema-tiny.xml"));
-    FileUtils.copyFile(new File(top, "solrconfig-minimal.xml"), new File(subHome, "solrconfig-minimal.xml"));
+    FileUtils.copyFile(new File(top, "schema-tiny.xml"), new File(subHome, "schema.xml"));
+    FileUtils.copyFile(new File(top, "solrconfig-minimal.xml"), new File(subHome, "solrconfig.xml"));
+    FileUtils.copyFile(new File(top, "solrconfig.snippet.randomindexconfig.xml"), new File(subHome, "solrconfig.snippet.randomindexconfig.xml"));
   }
 
 }
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
index a391951..70f1c84 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractDistribZkTestBase.java
@@ -128,7 +128,7 @@
   
   protected void waitForRecoveriesToFinish(String collection, ZkStateReader zkStateReader, boolean verbose, boolean failOnTimeout)
       throws Exception {
-    waitForRecoveriesToFinish(collection, zkStateReader, verbose, failOnTimeout, 230);
+    waitForRecoveriesToFinish(collection, zkStateReader, verbose, failOnTimeout, 330);
   }
   
   protected void waitForRecoveriesToFinish(String collection,
@@ -151,8 +151,7 @@
           if (verbose) System.out.println("rstate:"
               + shard.getValue().getStr(ZkStateReader.STATE_PROP)
               + " live:"
-              + clusterState.liveNodesContain(shard.getValue().getStr(
-              ZkStateReader.NODE_NAME_PROP)));
+              + clusterState.liveNodesContain(shard.getValue().getNodeName()));
           String state = shard.getValue().getStr(ZkStateReader.STATE_PROP);
           if ((state.equals(ZkStateReader.RECOVERING) || state
               .equals(ZkStateReader.SYNC) || state.equals(ZkStateReader.DOWN))
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
index 26a2682..0b6169a 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
@@ -197,6 +197,7 @@
   @AfterClass
   public static void afterClass() {
     System.clearProperty("solrcloud.update.delay");
+    System.clearProperty("genericCoreNodeNames");
   }
   
   public AbstractFullDistribZkTestBase() {
@@ -211,6 +212,10 @@
     useExplicitNodeNames = random().nextBoolean();
   }
   
+  protected String getDataDir(String dataDir) throws IOException {
+    return dataDir;
+  }
+  
   protected void initCloud() throws Exception {
     assert(cloudInit == false);
     cloudInit = true;
@@ -230,7 +235,7 @@
   
   protected CloudSolrServer createCloudClient(String defaultCollection)
       throws MalformedURLException {
-    CloudSolrServer server = new CloudSolrServer(zkServer.getZkAddress());
+    CloudSolrServer server = new CloudSolrServer(zkServer.getZkAddress(), random().nextBoolean());
     if (defaultCollection != null) server.setDefaultCollection(defaultCollection);
     server.getLbServer().getHttpClient().getParams()
         .setParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 5000);
@@ -328,8 +333,8 @@
           getClass().getName() + "-jetty" + cnt + "-" + System.currentTimeMillis());
       jettyDir.mkdirs();
       setupJettySolrHome(jettyDir);
-      JettySolrRunner j = createJetty(jettyDir, testDir + "/jetty"
-          + cnt, null, "solrconfig.xml", null);
+      JettySolrRunner j = createJetty(jettyDir, getDataDir(testDir + "/jetty"
+          + cnt), null, "solrconfig.xml", null);
       jettys.add(j);
       SolrServer client = createNewSolrServer(j.getLocalPort());
       clients.add(client);
@@ -428,6 +433,28 @@
     return cnt;
   }
   
+  public JettySolrRunner createJetty(String dataDir, String ulogDir, String shardList,
+      String solrConfigOverride) throws Exception {
+    
+    JettySolrRunner jetty = new JettySolrRunner(getSolrHome(), context, 0,
+        solrConfigOverride, null, false, getExtraServlets());
+    jetty.setShards(shardList);
+    jetty.setDataDir(getDataDir(dataDir));
+    jetty.start();
+    
+    return jetty;
+  }
+  
+  public JettySolrRunner createJetty(File solrHome, String dataDir, String shardList, String solrConfigOverride, String schemaOverride) throws Exception {
+
+    JettySolrRunner jetty = new JettySolrRunner(solrHome.getAbsolutePath(), context, 0, solrConfigOverride, schemaOverride, false, getExtraServlets());
+    jetty.setShards(shardList);
+    jetty.setDataDir(getDataDir(dataDir));
+    jetty.start();
+    
+    return jetty;
+  }
+  
   protected void updateMappingsFromZk(List<JettySolrRunner> jettys,
       List<SolrServer> clients) throws Exception {
     ZkStateReader zkStateReader = cloudClient.getZkStateReader();
@@ -483,7 +510,7 @@
             cjr.jetty = jetty;
             cjr.info = replica;
             cjr.nodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP);
-            cjr.coreNodeName = replica.getName();
+            cjr.coreNodeName = replica.getNodeName();
             cjr.url = replica.getStr(ZkStateReader.BASE_URL_PROP) + "/" + replica.getStr(ZkStateReader.CORE_NAME_PROP);
             cjr.client = findClientByPort(port, theClients);
             list.add(cjr);
@@ -803,13 +830,68 @@
     }
   }
   
+  /**
+   * Executes a query against each live and active replica of the specified shard 
+   * and aserts that the results are identical.
+   *
+   * @see #queryAndCompare
+   */
+  public QueryResponse queryAndCompareReplicas(SolrParams params, String shard) 
+    throws Exception {
+
+    ArrayList<SolrServer> shardClients = new ArrayList<SolrServer>(7);
+
+    updateMappingsFromZk(jettys, clients);
+    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+    List<CloudJettyRunner> solrJetties = shardToJetty.get(shard);
+    assertNotNull("no jetties found for shard: " + shard, solrJetties);
+
+
+    for (CloudJettyRunner cjetty : solrJetties) {
+      ZkNodeProps props = cjetty.info;
+      String nodeName = props.getStr(ZkStateReader.NODE_NAME_PROP);
+      boolean active = props.getStr(ZkStateReader.STATE_PROP).equals(ZkStateReader.ACTIVE);
+      boolean live = zkStateReader.getClusterState().liveNodesContain(nodeName);
+      if (active && live) {
+        shardClients.add(cjetty.client.solrClient);
+      }
+    }
+    return queryAndCompare(params, shardClients);
+  }
+
+  /**
+   * For each Shard, executes a query against each live and active replica of that shard
+   * and asserts that the results are identical for each replica of the same shard.  
+   * Because results are not compared between replicas of different shards, this method 
+   * should be safe for comparing the results of any query, even if it contains 
+   * "distrib=false", because the replicas should all be identical.
+   *
+   * @see AbstractFullDistribZkTestBase#queryAndCompareReplicas(SolrParams, String)
+   */
+  public void queryAndCompareShards(SolrParams params) throws Exception {
+
+    updateMappingsFromZk(jettys, clients);
+    List<String> shards = new ArrayList<String>(shardToJetty.keySet());
+    for (String shard : shards) {
+      queryAndCompareReplicas(params, shard);
+    }
+  }
+
+  /** 
+   * Returns a non-null string if replicas within the same shard do not have a 
+   * consistent number of documents. 
+   */
   protected void checkShardConsistency(String shard) throws Exception {
     checkShardConsistency(shard, false, false);
   }
 
-  /* Returns a non-null string if replicas within the same shard are not consistent.
-   * If expectFailure==false, the exact differences found will be logged since this would be an unexpected failure.
-   * verbose causes extra debugging into to be displayed, even if everything is consistent.
+  /** 
+   * Returns a non-null string if replicas within the same shard do not have a 
+   * consistent number of documents.
+   * If expectFailure==false, the exact differences found will be logged since 
+   * this would be an unexpected failure.
+   * verbose causes extra debugging into to be displayed, even if everything is 
+   * consistent.
    */
   protected String checkShardConsistency(String shard, boolean expectFailure, boolean verbose)
       throws Exception {
@@ -889,6 +971,46 @@
     
   }
   
+  public void showCounts() {
+    Set<String> theShards = shardToJetty.keySet();
+    
+    for (String shard : theShards) {
+      List<CloudJettyRunner> solrJetties = shardToJetty.get(shard);
+      
+      for (CloudJettyRunner cjetty : solrJetties) {
+        ZkNodeProps props = cjetty.info;
+        System.err.println("PROPS:" + props);
+        
+        try {
+          SolrParams query = params("q", "*:*", "rows", "0", "distrib",
+              "false", "tests", "checkShardConsistency"); // "tests" is just a
+                                                          // tag that won't do
+                                                          // anything except be
+                                                          // echoed in logs
+          long num = cjetty.client.solrClient.query(query).getResults()
+              .getNumFound();
+          System.err.println("DOCS:" + num);
+        } catch (SolrServerException e) {
+          System.err.println("error contacting client: " + e.getMessage()
+              + "\n");
+          continue;
+        } catch (SolrException e) {
+          System.err.println("error contacting client: " + e.getMessage()
+              + "\n");
+          continue;
+        }
+        boolean live = false;
+        String nodeName = props.getStr(ZkStateReader.NODE_NAME_PROP);
+        ZkStateReader zkStateReader = cloudClient.getZkStateReader();
+        if (zkStateReader.getClusterState().liveNodesContain(nodeName)) {
+          live = true;
+        }
+        System.err.println(" live:" + live);
+        
+      }
+    }
+  }
+  
   private String toStr(SolrDocumentList lst, int maxSz) {
     if (lst.size() <= maxSz) return lst.toString();
 
@@ -1464,7 +1586,7 @@
       for (String sliceName : slices.keySet()) {
         for (Replica replica : slices.get(sliceName).getReplicas()) {
           if (nodesAllowedToRunShards != null && !nodesAllowedToRunShards.contains(replica.getStr(ZkStateReader.NODE_NAME_PROP))) {
-            return "Shard " + replica.getName() + " created on node " + replica.getStr(ZkStateReader.NODE_NAME_PROP) + " not allowed to run shards for the created collection " + collectionName;
+            return "Shard " + replica.getName() + " created on node " + replica.getNodeName() + " not allowed to run shards for the created collection " + collectionName;
           }
         }
         totalShards += slices.get(sliceName).getReplicas().size();
@@ -1505,7 +1627,7 @@
     if (commondCloudSolrServer == null) {
       synchronized(this) {
         try {
-          commondCloudSolrServer = new CloudSolrServer(zkServer.getZkAddress());
+          commondCloudSolrServer = new CloudSolrServer(zkServer.getZkAddress(), random().nextBoolean());
           commondCloudSolrServer.setDefaultCollection(DEFAULT_COLLECTION);
           commondCloudSolrServer.connect();
         } catch (MalformedURLException e) {
diff --git a/solr/test-framework/src/java/org/apache/solr/util/BaseTestHarness.java b/solr/test-framework/src/java/org/apache/solr/util/BaseTestHarness.java
index 0495c91..6e47809 100644
--- a/solr/test-framework/src/java/org/apache/solr/util/BaseTestHarness.java
+++ b/solr/test-framework/src/java/org/apache/solr/util/BaseTestHarness.java
@@ -33,10 +33,10 @@
 import java.io.UnsupportedEncodingException;
 
 abstract public class BaseTestHarness {
-  private final ThreadLocal<DocumentBuilder> builderTL = new ThreadLocal<DocumentBuilder>();
-  private final ThreadLocal<XPath> xpathTL = new ThreadLocal<XPath>();
+  private static final ThreadLocal<DocumentBuilder> builderTL = new ThreadLocal<DocumentBuilder>();
+  private static final ThreadLocal<XPath> xpathTL = new ThreadLocal<XPath>();
 
-  public DocumentBuilder getXmlDocumentBuilder() {
+  public static DocumentBuilder getXmlDocumentBuilder() {
     try {
       DocumentBuilder builder = builderTL.get();
       if (builder == null) {
@@ -49,7 +49,7 @@
     }
   }
 
-  public XPath getXpath() {
+  public static XPath getXpath() {
     try {
       XPath xpath = xpathTL.get();
       if (xpath == null) {
@@ -71,7 +71,7 @@
    * @param tests Array of XPath strings to test (in boolean mode) on the xml
    * @return null if all good, otherwise the first test that fails.
    */
-  public String validateXPath(String xml, String... tests)
+  public static String validateXPath(String xml, String... tests)
       throws XPathExpressionException, SAXException {
 
     if (tests==null || tests.length == 0) return null;
diff --git a/solr/test-framework/src/java/org/apache/solr/util/ExternalPaths.java b/solr/test-framework/src/java/org/apache/solr/util/ExternalPaths.java
index 79258ea..f12e3a4 100644
--- a/solr/test-framework/src/java/org/apache/solr/util/ExternalPaths.java
+++ b/solr/test-framework/src/java/org/apache/solr/util/ExternalPaths.java
@@ -25,30 +25,55 @@
  * @lucene.internal
  */
 public class ExternalPaths {
+
+  /**
+   * <p>
+   * The main directory path for the solr source being built if it can be determined.  If it 
+   * can not be determined -- possily because the current context is a client code base 
+   * using hte test frameowrk -- then this variable will be null.
+   * </p>
+   * <p>
+   * Note that all other static paths available in this class are derived from the source 
+   * home, and if it is null, those paths will just be relative to 'null' and may not be 
+   * meaningful.
+   */
   public static final String SOURCE_HOME = determineSourceHome();
+  /* @see #SOURCE_HOME */
   public static String WEBAPP_HOME = new File(SOURCE_HOME, "webapp/web").getAbsolutePath();
+  /* @see #SOURCE_HOME */
   public static String EXAMPLE_HOME = new File(SOURCE_HOME, "example/solr").getAbsolutePath();
+  /* @see #SOURCE_HOME */
   public static String EXAMPLE_MULTICORE_HOME = new File(SOURCE_HOME, "example/multicore").getAbsolutePath();
+  /* @see #SOURCE_HOME */
   public static String EXAMPLE_SCHEMA=EXAMPLE_HOME+"/collection1/conf/schema.xml";
+  /* @see #SOURCE_HOME */
   public static String EXAMPLE_CONFIG=EXAMPLE_HOME+"/collection1/conf/solrconfig.xml";
   
+  /**
+   * Ugly, ugly hack to determine the example home without depending on the CWD
+   * this is needed for example/multicore tests which reside outside the classpath.
+   * if the source home can't be determined, this method returns null.
+   */
   static String determineSourceHome() {
-    // ugly, ugly hack to determine the example home without depending on the CWD
-    // this is needed for example/multicore tests which reside outside the classpath
-    File file;
     try {
-      file = new File("solr/conf");
-      if (!file.exists()) {
-        file = new File(Thread.currentThread().getContextClassLoader().getResource("solr/conf").toURI());
+      File file;
+      try {
+        file = new File("solr/conf");
+        if (!file.exists()) {
+          file = new File(Thread.currentThread().getContextClassLoader().getResource("solr/conf").toURI());
+        }
+      } catch (Exception e) {
+        // If there is no "solr/conf" in the classpath, fall back to searching from the current directory.
+        file = new File(".");
       }
-    } catch (Exception e) {
-      // If there is no "solr/conf" in the classpath, fall back to searching from the current directory.
-      file = new File(".");
+      File base = file.getAbsoluteFile();
+      while (!(new File(base, "solr/CHANGES.txt").exists()) && null != base) {
+        base = base.getParentFile();
+      }
+      return (null == base) ? null : new File(base, "solr/").getAbsolutePath();
+    } catch (RuntimeException e) {
+      // all bets are off
+      return null;
     }
-    File base = file.getAbsoluteFile();
-    while (!new File(base, "solr/CHANGES.txt").exists()) {
-      base = base.getParentFile();
-    }
-    return new File(base, "solr/").getAbsolutePath();
   }
 }
diff --git a/solr/test-framework/src/java/org/apache/solr/util/RestTestBase.java b/solr/test-framework/src/java/org/apache/solr/util/RestTestBase.java
index 5a77b9f..9041f0b 100644
--- a/solr/test-framework/src/java/org/apache/solr/util/RestTestBase.java
+++ b/solr/test-framework/src/java/org/apache/solr/util/RestTestBase.java
@@ -127,7 +127,7 @@
       }
       */
 
-      String results = restTestHarness.validateXPath(response, tests);
+      String results = TestHarness.validateXPath(response, tests);
 
       if (null != results) {
         String msg = "REQUEST FAILED: xpath=" + results
diff --git a/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java b/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java
index 2df2f10..50cc576 100644
--- a/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java
+++ b/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java
@@ -17,20 +17,17 @@
 
 package org.apache.solr.util;
 
+import com.google.common.base.Charsets;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.util.NamedList;
-import org.apache.solr.core.Config;
-import org.apache.solr.core.ConfigSolrXmlOld;
+import org.apache.solr.common.util.NamedList.NamedListEntry;
+import org.apache.solr.core.ConfigSolr;
+import org.apache.solr.core.CoreContainer;
 import org.apache.solr.core.SolrConfig;
 import org.apache.solr.core.SolrCore;
-import org.apache.solr.core.CoreContainer;
-import org.apache.solr.core.CoreDescriptor;
 import org.apache.solr.core.SolrResourceLoader;
 import org.apache.solr.handler.UpdateRequestHandler;
-import org.apache.solr.logging.ListenerConfig;
-import org.apache.solr.logging.LogWatcher;
-import org.apache.solr.logging.jul.JulWatcher;
 import org.apache.solr.request.LocalSolrQueryRequest;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.request.SolrRequestHandler;
@@ -40,9 +37,6 @@
 import org.apache.solr.schema.IndexSchema;
 import org.apache.solr.schema.IndexSchemaFactory;
 import org.apache.solr.servlet.DirectSolrConnection;
-import org.apache.solr.common.util.NamedList.NamedListEntry;
-import org.xml.sax.InputSource;
-import org.xml.sax.SAXException;
 
 import java.io.ByteArrayInputStream;
 import java.io.File;
@@ -51,8 +45,6 @@
 import java.util.HashMap;
 import java.util.Map;
 
-import javax.xml.parsers.ParserConfigurationException;
-
 
 /**
  * This class provides a simple harness that may be useful when
@@ -106,19 +98,8 @@
                          SolrConfig solrConfig,
                          String schemaFile) {
     this( coreName, dataDirectory, solrConfig, IndexSchemaFactory.buildIndexSchema(schemaFile, solrConfig));
-  } 
-  /**
-   * @param coreName to initialize
-   * @param dataDirectory path for index data, will not be cleaned up
-   * @param solrConfig solrconfig instance
-   * @param indexSchema schema instance
-   */
-    public TestHarness( String coreName,
-                        String dataDirectory,
-                        SolrConfig solrConfig,
-                        IndexSchema indexSchema) {
-        this(coreName, new Initializer(coreName, dataDirectory, solrConfig, indexSchema));
-    }
+  }
+
    /**
     * @param dataDirectory path for index data, will not be cleaned up
     * @param solrConfig solronfig instance
@@ -137,18 +118,26 @@
   public TestHarness( String dataDirectory,
                       SolrConfig solrConfig,
                       IndexSchema indexSchema) {
-      this(null, new Initializer(null, dataDirectory, solrConfig, indexSchema));
+      this(CoreContainer.DEFAULT_DEFAULT_CORE_NAME, dataDirectory, solrConfig, indexSchema);
   }
-  
-  public TestHarness(String coreName, CoreContainer.Initializer init) {
-    try {
 
-      container = init.initialize();
+  /**
+   * @param coreName to initialize
+   * @param dataDir path for index data, will not be cleaned up
+   * @param solrConfig solrconfig resource name
+   * @param indexSchema schema resource name
+   */
+  public TestHarness(String coreName, String dataDir, String solrConfig, String indexSchema) {
+    try {
       if (coreName == null)
         coreName = CoreContainer.DEFAULT_DEFAULT_CORE_NAME;
-
       this.coreName = coreName;
 
+      SolrResourceLoader loader = new SolrResourceLoader(SolrResourceLoader.locateSolrHome());
+      ConfigSolr config = getTestHarnessConfig(coreName, dataDir, solrConfig, indexSchema);
+      container = new CoreContainer(loader, config);
+      container.load();
+
       updater = new UpdateRequestHandler();
       updater.init( null );
     } catch (Exception e) {
@@ -156,67 +145,50 @@
     }
   }
 
-  // Creates a container based on infos needed to create one core
-  static class Initializer extends CoreContainer.Initializer {
-    String coreName;
-    String dataDirectory;
-    SolrConfig solrConfig;
-    IndexSchema indexSchema;
-    public Initializer(String coreName,
-                      String dataDirectory,
-                      SolrConfig solrConfig,
-                      IndexSchema indexSchema) {
-      if (coreName == null)
-        coreName = CoreContainer.DEFAULT_DEFAULT_CORE_NAME;
-      this.coreName = coreName;
-      this.dataDirectory = dataDirectory;
-      this.solrConfig = solrConfig;
-      this.indexSchema = indexSchema;
-    }
-    public String getCoreName() {
-      return coreName;
-    }
-    @Override
-    public CoreContainer initialize() {
-      CoreContainer container;
-      try {
-        String solrHome = SolrResourceLoader.locateSolrHome();
-        container = new CoreContainer(new SolrResourceLoader(solrHome)) {
-          {
-            String hostPort = System.getProperty("hostPort", "8983");
-            String hostContext = System.getProperty("hostContext", "solr");
-            defaultCoreName = CoreContainer.DEFAULT_DEFAULT_CORE_NAME;
-            initShardHandler();
-            zkSys.initZooKeeper(this, solrHome, System.getProperty("zkHost"), 30000, hostPort, hostContext, null, "30000", 30000, 30000);
-            ByteArrayInputStream is = new ByteArrayInputStream(ConfigSolrXmlOld.DEF_SOLR_XML.getBytes("UTF-8"));
-            Config config = new Config(loader, null, new InputSource(is), null, false);
-            cfg = new ConfigSolrXmlOld(config, this);
-          }
-        };
-      } catch (ParserConfigurationException e) {
-        throw new RuntimeException(e);
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      } catch (SAXException e) {
-        throw new RuntimeException(e);
-      }
-      LogWatcher<?> logging = new JulWatcher("test");
-      logging.registerListener(new ListenerConfig());
-      container.setLogging(logging);
-      
-      CoreDescriptor dcore = new CoreDescriptor(container, coreName, solrConfig.getResourceLoader().getInstanceDir());
-      dcore.setConfigName(solrConfig.getResourceName());
-      dcore.setSchemaName(indexSchema.getResourceName());
-      SolrCore core = new SolrCore(coreName, dataDirectory, solrConfig, indexSchema, dcore);
-      container.register(coreName, core, false);
+  public TestHarness(String coreName, String dataDir, SolrConfig solrConfig, IndexSchema indexSchema) {
+    this(coreName, dataDir, solrConfig.getResourceName(), indexSchema.getResourceName());
+  }
 
-      // TODO: we should be exercising the *same* core container initialization code, not equivalent code!
-      if (container.getZkController() == null && core.getUpdateHandler().getUpdateLog() != null) {
-        // always kick off recovery if we are in standalone mode.
-        core.getUpdateHandler().getUpdateLog().recoverFromLog();
-      }
-      return container;
-    }
+  /**
+   * Create a TestHarness using a specific solr home directory and solr xml
+   * @param solrHome the solr home directory
+   * @param solrXml a File pointing to a solr.xml configuration
+   */
+  public TestHarness(String solrHome, String solrXml) {
+    this(new SolrResourceLoader(solrHome),
+          ConfigSolr.fromInputStream(null, new ByteArrayInputStream(solrXml.getBytes(Charsets.UTF_8))));
+  }
+
+  /**
+   * Create a TestHarness using a specific resource loader and config
+   * @param loader the SolrResourceLoader to use
+   * @param config the ConfigSolr to use
+   */
+  public TestHarness(SolrResourceLoader loader, ConfigSolr config) {
+    container = new CoreContainer(loader, config);
+    container.load();
+    updater = new UpdateRequestHandler();
+    updater.init(null);
+  }
+
+  private static ConfigSolr getTestHarnessConfig(String coreName, String dataDir,
+                                                 String solrConfig, String schema) {
+    String solrxml = "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"
+        + "<solr persistent=\"false\">\n"
+        + "  <logging enabled=\"true\"/>\n"
+        + "  <cores adminPath=\"/admin/cores\" defaultCoreName=\""
+        + CoreContainer.DEFAULT_DEFAULT_CORE_NAME
+        + "\""
+        + " host=\"${host:}\" hostPort=\"${hostPort:}\" hostContext=\"${hostContext:}\""
+        + " distribUpdateSoTimeout=\"30000\""
+        + " zkClientTimeout=\"${zkClientTimeout:30000}\" distribUpdateConnTimeout=\"30000\""
+        + ">\n"
+        + "    <core name=\"" + coreName + "\" config=\"" + solrConfig
+        + "\" schema=\"" + schema + "\" dataDir=\"" + dataDir
+        + "\" transient=\"false\" loadOnStartup=\"true\""
+        + " shard=\"${shard:shard1}\" collection=\"${collection:collection1}\" instanceDir=\"" + coreName + "/\" />\n"
+        + "  </cores>\n" + "</solr>";
+    return ConfigSolr.fromString(new SolrResourceLoader(dataDir), solrxml);
   }
   
   public CoreContainer getCoreContainer() {
diff --git a/solr/webapp/build.xml b/solr/webapp/build.xml
index 20ee072..4ad4b05 100644
--- a/solr/webapp/build.xml
+++ b/solr/webapp/build.xml
@@ -18,6 +18,9 @@
 <project name="solr-webapp" default="default">
   <description>Solr webapp</description>
 
+  <property name="rat.additional-includes" value="**"/>
+  <property name="rat.additional-excludes" value="web/img/**"/>
+
   <import file="../common-build.xml"/>
 
   <property name="exclude.from.war" value="*slf4j*,log4j-*" />
@@ -65,8 +68,9 @@
     </war>
   </target>
 
-  <target name="dist-maven" depends="dist, filter-pom-templates, install-maven-tasks, m2-deploy-solr-parent-pom">
-    <m2-deploy jar.file="${dist}/solr-${version}.war"
-               pom.xml="${filtered.pom.templates.dir}/solr/webapp/pom.xml"/>
-  </target>
+  <!-- nothing to do -->
+  <target name="dist-maven"/>
+
+  <!-- nothing to do -->
+  <target name="-validate-maven-dependencies"/>
 </project>
diff --git a/solr/webapp/web/WEB-INF/weblogic.xml b/solr/webapp/web/WEB-INF/weblogic.xml
index 4b3a667..b8645eb 100755
--- a/solr/webapp/web/WEB-INF/weblogic.xml
+++ b/solr/webapp/web/WEB-INF/weblogic.xml
@@ -1,4 +1,20 @@
 <?xml version='1.0' encoding='UTF-8'?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
 <weblogic-web-app
     xmlns="http://www.bea.com/ns/weblogic/90"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
diff --git a/solr/webapp/web/admin.html b/solr/webapp/web/admin.html
index 9f5a131..2db54e9 100644
--- a/solr/webapp/web/admin.html
+++ b/solr/webapp/web/admin.html
@@ -35,6 +35,7 @@
   <link rel="stylesheet" type="text/css" href="css/styles/logging.css?_=${version}">
   <link rel="stylesheet" type="text/css" href="css/styles/menu.css?_=${version}">
   <link rel="stylesheet" type="text/css" href="css/styles/plugins.css?_=${version}">
+  <link rel="stylesheet" type="text/css" href="css/styles/documents.css?_=${version}">
   <link rel="stylesheet" type="text/css" href="css/styles/query.css?_=${version}">
   <link rel="stylesheet" type="text/css" href="css/styles/replication.css?_=${version}">
   <link rel="stylesheet" type="text/css" href="css/styles/schema-browser.css?_=${version}">
@@ -138,6 +139,12 @@
     </div>
     
   </div>
+
+  <div id="connection_status">
+
+    <span>Connection lost …</span>
+
+  </div>
   
   <script type="text/javascript"> var require = { urlArgs: '_=${version}' }; </script>
   <script src="js/require.js?_=${version}" data-main="js/main"></script>
diff --git a/solr/webapp/web/css/chosen.css b/solr/webapp/web/css/chosen.css
index 706caf7..83f6b97 100755
--- a/solr/webapp/web/css/chosen.css
+++ b/solr/webapp/web/css/chosen.css
@@ -1,3 +1,32 @@
+/*
+
+Chosen
+
+- by Patrick Filler for Harvest http://getharvest.com
+- Copyright (c) 2011-2013 by Harvest
+
+Available for use under the MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
 /* @group Base */
 .chzn-container {
   font-size: 13px;
diff --git a/solr/webapp/web/css/styles/analysis.css b/solr/webapp/web/css/styles/analysis.css
index f28737b..2273f4f 100644
--- a/solr/webapp/web/css/styles/analysis.css
+++ b/solr/webapp/web/css/styles/analysis.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 #content #analysis-holder
 {
   background-image: url( ../../img/div.gif );
@@ -289,4 +308,4 @@
 }
 
 #content #analysis #field-analysis h2 { background-image: url( ../../img/ico/receipt.png ); }
-#content #analysis .analysis-result h2 { background-image: url( ../../img/ico/receipt-invoice.png ); }
\ No newline at end of file
+#content #analysis .analysis-result h2 { background-image: url( ../../img/ico/receipt-invoice.png ); }
diff --git a/solr/webapp/web/css/styles/cloud.css b/solr/webapp/web/css/styles/cloud.css
index 3c5967c..9121432 100644
--- a/solr/webapp/web/css/styles/cloud.css
+++ b/solr/webapp/web/css/styles/cloud.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 #content #cloud
 {
   position: relative;
@@ -387,4 +406,4 @@
 #content #graph-content .link.lvl-1
 {
   stroke: #fff;
-}
\ No newline at end of file
+}
diff --git a/solr/webapp/web/css/styles/common.css b/solr/webapp/web/css/styles/common.css
index 960834e..8e671ca 100644
--- a/solr/webapp/web/css/styles/common.css
+++ b/solr/webapp/web/css/styles/common.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 *
 {
   background-repeat: no-repeat;
@@ -605,4 +624,66 @@
 #content .tree a.jstree-search
 {
   color:aqua;
-}
\ No newline at end of file
+}
+
+#connection_status
+{
+  display: none;
+  padding: 30px;
+}
+
+#connection_status span
+{
+  background-image: url( ../../img/ico/network-status-busy.png );
+  background-position: 0 50%;
+  color: #800;
+  padding-left: 26px;
+}
+
+#connection_status.online span,
+#connection_status.online span a
+{
+  color: #080;
+}
+
+#connection_status.online span
+{
+  background-image: url( ../../img/ico/network-status.png );
+}
+
+#connection_status.online span a
+{
+  text-decoration: underline;
+}
+
+#connection_status.online span a:hover
+{
+  text-decoration: none;
+}
+
+#content .address-bar
+{
+  margin-bottom: 10px;
+  background-image: url( ../../img/ico/ui-address-bar.png );
+  background-position: 5px 50%;
+  border: 1px solid #f0f0f0;
+  box-shadow: 1px 1px 0 #f0f0f0;
+  -moz-box-shadow: 1px 1px 0 #f0f0f0;
+  -webkit-box-shadow: 1px 1px 0 #f0f0f0;
+  color: #c0c0c0;
+  display: block;
+  overflow: hidden;
+  padding: 5px;
+  padding-left: 26px;
+  white-space: nowrap;
+}
+
+#content .address-bar:focus,
+#content .address-bar:hover
+{
+  border-color: #c0c0c0;
+  box-shadow: 1px 1px 0 #d8d8d8;
+  -moz-box-shadow: 1px 1px 0 #d8d8d8;
+  -webkit-box-shadow: 1px 1px 0 #d8d8d8;
+  color: #333;
+}
diff --git a/solr/webapp/web/css/styles/cores.css b/solr/webapp/web/css/styles/cores.css
index 2481105..a93ad55 100644
--- a/solr/webapp/web/css/styles/cores.css
+++ b/solr/webapp/web/css/styles/cores.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 #content #cores
 {
   position: relative;
@@ -200,4 +219,4 @@
 #content #cores #data li dd.ico span
 {
   display: none;
-}
\ No newline at end of file
+}
diff --git a/solr/webapp/web/css/styles/dashboard.css b/solr/webapp/web/css/styles/dashboard.css
index 80dac86..f116624 100644
--- a/solr/webapp/web/css/styles/dashboard.css
+++ b/solr/webapp/web/css/styles/dashboard.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 #content #dashboard .block
 {
   background-image: none;
diff --git a/solr/webapp/web/css/styles/dataimport.css b/solr/webapp/web/css/styles/dataimport.css
index 21732b6..0ec196d 100644
--- a/solr/webapp/web/css/styles/dataimport.css
+++ b/solr/webapp/web/css/styles/dataimport.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 #content #dataimport
 {
   background-image: url( ../../img/div.gif );
@@ -381,4 +400,4 @@
 {
   color: #c0c0c0;
   font-style: normal;
-}
\ No newline at end of file
+}
diff --git a/solr/webapp/web/css/styles/documents.css b/solr/webapp/web/css/styles/documents.css
new file mode 100644
index 0000000..18c4efc
--- /dev/null
+++ b/solr/webapp/web/css/styles/documents.css
@@ -0,0 +1,197 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
+#content #documents
+{
+  background-image: url( ../../img/div.gif );
+  background-position: 45% 0;
+  background-repeat: repeat-y;
+}
+
+#content #documents #form
+{
+  float: left;
+  /*width: 21%;*/
+}
+
+#content #documents #form label
+{
+  cursor: pointer;
+  display: block;
+  margin-top: 5px;
+}
+
+#content #documents #form input,
+#content #documents #form select,
+#content #documents #form textarea
+{
+  margin-bottom: 2px;
+  /*width: 100%;*/
+}
+
+#content #documents #form input,
+#content #documents #form textarea
+{
+  margin-bottom: 2px;
+  /*width: 98%;*/
+}
+
+#content #documents #form #start
+{
+  float: left;
+  /*width: 45%;*/
+}
+
+#content #documents #form #rows
+{
+  float: right;
+ /* width: 45%;*/
+}
+
+#content #documents #form .checkbox input
+{
+  margin-bottom: 0;
+  width: auto;
+}
+
+#content #documents #form fieldset,
+#content #documents #form .optional.expanded
+{
+  border: 1px solid #fff;
+  border-top: 1px solid #c0c0c0;
+  margin-bottom: 5px;
+}
+
+#content #documents #form fieldset.common
+{
+  margin-top: 10px;
+}
+
+#content #documents #form fieldset legend,
+#content #documents #form .optional.expanded legend
+{
+  display: block;
+  margin-left: 10px;
+  padding: 0px 5px;
+}
+
+#content #documents #form fieldset legend label
+{
+  margin-top: 0;
+}
+
+#content #documents #form fieldset .fieldset
+{
+  border-bottom: 1px solid #f0f0f0;
+  margin-bottom: 5px;
+  padding-bottom: 10px;
+}
+
+#content #documents #form .optional
+{
+  border: 0;
+}
+
+#content #documents #form .optional .fieldset
+{
+  display: none;
+}
+
+#content #documents #form .optional legend
+{
+  margin-left: 0;
+  padding-left: 0;
+}
+
+#content #documents #form .optional.expanded .fieldset
+{
+  display: block;
+}
+
+#content #documents #file-upload{
+  display: none;
+}
+
+#content #documents #result
+{
+  display: none;
+  float: right;
+  width: 54%;
+}
+
+#content #documents #result #url
+{
+  margin-bottom: 10px;
+  background-image: url( ../../img/ico/ui-address-bar.png );
+  background-position: 5px 50%;
+  border: 1px solid #f0f0f0;
+  box-shadow: 1px 1px 0 #f0f0f0;
+  -moz-box-shadow: 1px 1px 0 #f0f0f0;
+  -webkit-box-shadow: 1px 1px 0 #f0f0f0;
+  color: #c0c0c0;
+  display: block;
+  overflow: hidden;
+  padding: 5px;
+  padding-left: 26px;
+  white-space: nowrap;
+}
+
+#content #documents #result #url:focus,
+#content #documents #result #url:hover
+{
+  border-color: #c0c0c0;
+  box-shadow: 1px 1px 0 #d8d8d8;
+  -moz-box-shadow: 1px 1px 0 #d8d8d8;
+  -webkit-box-shadow: 1px 1px 0 #d8d8d8;
+  color: #333;
+}
+
+#content #documents #result #response
+{
+}
+
+#content #documents #result #response pre
+{
+  padding-left: 20px;
+}
+
+.description{
+  font-weight: bold;
+}
+
+#upload-only{
+  display: none;
+}
+
+#document-type{
+  padding-bottom: 5px;
+}
+
+#wizard{
+  display: none;
+}
+
+#wizard-fields div{
+  padding-top: 5px;
+  padding-bottom: 5px;
+}
+
+#wiz-field-data, #wiz-field-data span{
+  vertical-align: top;
+}
diff --git a/solr/webapp/web/css/styles/index.css b/solr/webapp/web/css/styles/index.css
index 28d954f..4977cf5 100644
--- a/solr/webapp/web/css/styles/index.css
+++ b/solr/webapp/web/css/styles/index.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 #content #index .bar-desc
 {
   color: #c0c0c0;
@@ -187,4 +206,4 @@
 #content #index #jvm-memory-bar
 {
   margin-top: 20px;
-}
\ No newline at end of file
+}
diff --git a/solr/webapp/web/css/styles/java-properties.css b/solr/webapp/web/css/styles/java-properties.css
index 8a46223..ab5b67b 100644
--- a/solr/webapp/web/css/styles/java-properties.css
+++ b/solr/webapp/web/css/styles/java-properties.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 #content #java-properties .loader
 {
   background-position: 0 50%;
@@ -30,4 +49,4 @@
 #content #java-properties li dd.odd
 {
   color: #999;
-}
\ No newline at end of file
+}
diff --git a/solr/webapp/web/css/styles/logging.css b/solr/webapp/web/css/styles/logging.css
index b37ff6f..86a835f 100644
--- a/solr/webapp/web/css/styles/logging.css
+++ b/solr/webapp/web/css/styles/logging.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 #content #logging .loader
 {
   background-position: 0 50%;
@@ -333,4 +352,4 @@
 {
   background-image: url( ../../img/ico/cross-1.png );
   color: #800;
-}
\ No newline at end of file
+}
diff --git a/solr/webapp/web/css/styles/menu.css b/solr/webapp/web/css/styles/menu.css
index dfa54b2..23e39fc 100644
--- a/solr/webapp/web/css/styles/menu.css
+++ b/solr/webapp/web/css/styles/menu.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 #menu-wrapper
 {
   position: fixed;
@@ -239,6 +258,7 @@
 #core-menu .schema a { background-image: url( ../../img/ico/table.png ); }
 #core-menu .config a { background-image: url( ../../img/ico/gear.png ); }
 #core-menu .analysis a { background-image: url( ../../img/ico/funnel.png ); }
+#core-menu .documents a { background-image: url( ../../img/ico/documents-stack.png ); }
 #core-menu .schema-browser a { background-image: url( ../../img/ico/book-open-text.png ); }
 #core-menu .replication a { background-image: url( ../../img/ico/node.png ); }
 #core-menu .distribution a { background-image: url( ../../img/ico/node-select.png ); }
@@ -284,4 +304,4 @@
 {
   background-color: #e0e0e0;
   font-weight: bold;
-}
\ No newline at end of file
+}
diff --git a/solr/webapp/web/css/styles/plugins.css b/solr/webapp/web/css/styles/plugins.css
index bdcd8c4..85e5ab1 100644
--- a/solr/webapp/web/css/styles/plugins.css
+++ b/solr/webapp/web/css/styles/plugins.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 #content #plugins #navigation
 {
   width: 20%;
@@ -172,4 +191,4 @@
 #recording button span
 {
   background-image: url( ../../img/ico/new-text.png );
-}
\ No newline at end of file
+}
diff --git a/solr/webapp/web/css/styles/query.css b/solr/webapp/web/css/styles/query.css
index 0547e2c..9fd7913 100644
--- a/solr/webapp/web/css/styles/query.css
+++ b/solr/webapp/web/css/styles/query.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 #content #query
 {
   background-image: url( ../../img/div.gif );
@@ -32,6 +51,44 @@
   width: 98%;
 }
 
+#content #query #form .multiple input
+{
+  float: left;
+  width: 80%
+}
+
+#content #query #form .multiple .buttons
+{
+  float: right;
+  width: 16%;
+}
+
+
+#content #query #form .multiple a
+{
+  background-position: 50% 50%;
+  display: block;
+  height: 25px;
+  width: 49%;
+}
+
+#content #query #form .multiple a span
+{
+  display: none;
+}
+
+#content #query #form .multiple a.add
+{
+  background-image: url( ../../img/ico/plus-button.png );
+  float: right;
+}
+
+#content #query #form .multiple a.rem
+{
+  background-image: url( ../../img/ico/minus-button.png );
+  float: left;
+}
+
 #content #query #form #start
 {
   float: left;
@@ -111,33 +168,6 @@
   width: 77%;
 }
 
-#content #query #result #url
-{
-  margin-bottom: 10px;
-  background-image: url( ../../img/ico/ui-address-bar.png );
-  background-position: 5px 50%;
-  border: 1px solid #f0f0f0;
-  box-shadow: 1px 1px 0 #f0f0f0;
-  -moz-box-shadow: 1px 1px 0 #f0f0f0;
-  -webkit-box-shadow: 1px 1px 0 #f0f0f0;
-  color: #c0c0c0;
-  display: block;
-  overflow: hidden;
-  padding: 5px;
-  padding-left: 26px;
-  white-space: nowrap;
-}
-
-#content #query #result #url:focus,
-#content #query #result #url:hover
-{
-  border-color: #c0c0c0;
-  box-shadow: 1px 1px 0 #d8d8d8;
-  -moz-box-shadow: 1px 1px 0 #d8d8d8;
-  -webkit-box-shadow: 1px 1px 0 #d8d8d8;
-  color: #333;
-}
-
 #content #query #result #response
 {
-}
\ No newline at end of file
+}
diff --git a/solr/webapp/web/css/styles/replication.css b/solr/webapp/web/css/styles/replication.css
index 3d8f021..7379504 100644
--- a/solr/webapp/web/css/styles/replication.css
+++ b/solr/webapp/web/css/styles/replication.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 #content #replication
 {
   background-image: url( ../../img/div.gif );
@@ -493,4 +512,4 @@
 #content #replication #navigation .refresh-status span
 {
   background-image: url( ../../img/ico/arrow-circle.png );
-}
\ No newline at end of file
+}
diff --git a/solr/webapp/web/css/styles/schema-browser.css b/solr/webapp/web/css/styles/schema-browser.css
index 381cbe6..e43de40 100644
--- a/solr/webapp/web/css/styles/schema-browser.css
+++ b/solr/webapp/web/css/styles/schema-browser.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 #content #schema-browser .loader
 {
   background-position: 0 50%;
@@ -556,4 +575,4 @@
 #content #schema-browser #data #field .histogram-holder li:hover dt
 {
   color: #333;
-}
\ No newline at end of file
+}
diff --git a/solr/webapp/web/css/styles/threads.css b/solr/webapp/web/css/styles/threads.css
index e69edae..d8aa6ab 100644
--- a/solr/webapp/web/css/styles/threads.css
+++ b/solr/webapp/web/css/styles/threads.css
@@ -1,3 +1,22 @@
+/*
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*/
+
 #content #threads .loader
 {
   background-position: 0 50%;
@@ -150,4 +169,4 @@
 #content #threads.collapsed .controls .collapse
 {
   display: none;
-}
\ No newline at end of file
+}
diff --git a/solr/webapp/web/img/ico/documents-stack.png b/solr/webapp/web/img/ico/documents-stack.png
new file mode 100644
index 0000000..a397f60
--- /dev/null
+++ b/solr/webapp/web/img/ico/documents-stack.png
Binary files differ
diff --git a/solr/webapp/web/img/ico/minus-button.png b/solr/webapp/web/img/ico/minus-button.png
new file mode 100755
index 0000000..6dc019a
--- /dev/null
+++ b/solr/webapp/web/img/ico/minus-button.png
Binary files differ
diff --git a/solr/webapp/web/js/lib/ZeroClipboard.js b/solr/webapp/web/js/lib/ZeroClipboard.js
index 92364bd..5d7671a 100644
--- a/solr/webapp/web/js/lib/ZeroClipboard.js
+++ b/solr/webapp/web/js/lib/ZeroClipboard.js
@@ -1,3 +1,28 @@
+/*
+
+The MIT License (MIT)
+Copyright (c) 2012 Jon Rohan, James M. Greene, 
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
 // Simple Set Clipboard System
 // Author: Joseph Huckaby
 
diff --git a/solr/webapp/web/js/lib/chosen.js b/solr/webapp/web/js/lib/chosen.js
index eef29dd..bf250a2 100755
--- a/solr/webapp/web/js/lib/chosen.js
+++ b/solr/webapp/web/js/lib/chosen.js
@@ -1,3 +1,32 @@
+/*
+
+Chosen
+
+- by Patrick Filler for Harvest http://getharvest.com
+- Copyright (c) 2011-2013 by Harvest
+
+Available for use under the MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
 // Chosen, a Select Box Enhancer for jQuery and Protoype
 // by Patrick Filler for Harvest, http://getharvest.com
 // 
diff --git a/solr/webapp/web/js/lib/d3.js b/solr/webapp/web/js/lib/d3.js
index c1b6caf..5015603 100755
--- a/solr/webapp/web/js/lib/d3.js
+++ b/solr/webapp/web/js/lib/d3.js
@@ -1,3 +1,34 @@
+/*
+
+Copyright (c) 2013, Michael Bostock
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* The name Michael Bostock may not be used to endorse or promote products
+  derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
 (function(){if (!Date.now) Date.now = function() {
   return +new Date;
 };
diff --git a/solr/webapp/web/js/lib/highlight.js b/solr/webapp/web/js/lib/highlight.js
index 5916aa1..527c340 100644
--- a/solr/webapp/web/js/lib/highlight.js
+++ b/solr/webapp/web/js/lib/highlight.js
@@ -1 +1,31 @@
-var hljs=new function(){function l(o){return o.replace(/&/gm,"&amp;").replace(/</gm,"&lt;").replace(/>/gm,"&gt;")}function b(p){for(var o=p.firstChild;o;o=o.nextSibling){if(o.nodeName=="CODE"){return o}if(!(o.nodeType==3&&o.nodeValue.match(/\s+/))){break}}}function h(p,o){return Array.prototype.map.call(p.childNodes,function(q){if(q.nodeType==3){return o?q.nodeValue.replace(/\n/g,""):q.nodeValue}if(q.nodeName=="BR"){return"\n"}return h(q,o)}).join("")}function a(q){var p=(q.className+" "+q.parentNode.className).split(/\s+/);p=p.map(function(r){return r.replace(/^language-/,"")});for(var o=0;o<p.length;o++){if(e[p[o]]||p[o]=="no-highlight"){return p[o]}}}function c(q){var o=[];(function p(r,s){for(var t=r.firstChild;t;t=t.nextSibling){if(t.nodeType==3){s+=t.nodeValue.length}else{if(t.nodeName=="BR"){s+=1}else{if(t.nodeType==1){o.push({event:"start",offset:s,node:t});s=p(t,s);o.push({event:"stop",offset:s,node:t})}}}}return s})(q,0);return o}function j(x,v,w){var p=0;var y="";var r=[];function t(){if(x.length&&v.length){if(x[0].offset!=v[0].offset){return(x[0].offset<v[0].offset)?x:v}else{return v[0].event=="start"?x:v}}else{return x.length?x:v}}function s(A){function z(B){return" "+B.nodeName+'="'+l(B.value)+'"'}return"<"+A.nodeName+Array.prototype.map.call(A.attributes,z).join("")+">"}while(x.length||v.length){var u=t().splice(0,1)[0];y+=l(w.substr(p,u.offset-p));p=u.offset;if(u.event=="start"){y+=s(u.node);r.push(u.node)}else{if(u.event=="stop"){var o,q=r.length;do{q--;o=r[q];y+=("</"+o.nodeName.toLowerCase()+">")}while(o!=u.node);r.splice(q,1);while(q<r.length){y+=s(r[q]);q++}}}}return y+l(w.substr(p))}function f(q){function o(s,r){return RegExp(s,"m"+(q.cI?"i":"")+(r?"g":""))}function p(y,w){if(y.compiled){return}y.compiled=true;var s=[];if(y.k){var r={};function z(A,t){t.split(" ").forEach(function(B){var C=B.split("|");r[C[0]]=[A,C[1]?Number(C[1]):1];s.push(C[0])})}y.lR=o(y.l||hljs.IR,true);if(typeof y.k=="string"){z("keyword",y.k)}else{for(var x in y.k){if(!y.k.hasOwnProperty(x)){continue}z(x,y.k[x])}}y.k=r}if(w){if(y.bWK){y.b="\\b("+s.join("|")+")\\s"}y.bR=o(y.b?y.b:"\\B|\\b");if(!y.e&&!y.eW){y.e="\\B|\\b"}if(y.e){y.eR=o(y.e)}y.tE=y.e||"";if(y.eW&&w.tE){y.tE+=(y.e?"|":"")+w.tE}}if(y.i){y.iR=o(y.i)}if(y.r===undefined){y.r=1}if(!y.c){y.c=[]}for(var v=0;v<y.c.length;v++){if(y.c[v]=="self"){y.c[v]=y}p(y.c[v],y)}if(y.starts){p(y.starts,w)}var u=[];for(var v=0;v<y.c.length;v++){u.push(y.c[v].b)}if(y.tE){u.push(y.tE)}if(y.i){u.push(y.i)}y.t=u.length?o(u.join("|"),true):{exec:function(t){return null}}}p(q)}function d(D,E){function o(r,M){for(var L=0;L<M.c.length;L++){var K=M.c[L].bR.exec(r);if(K&&K.index==0){return M.c[L]}}}function s(K,r){if(K.e&&K.eR.test(r)){return K}if(K.eW){return s(K.parent,r)}}function t(r,K){return K.i&&K.iR.test(r)}function y(L,r){var K=F.cI?r[0].toLowerCase():r[0];return L.k.hasOwnProperty(K)&&L.k[K]}function G(){var K=l(w);if(!A.k){return K}var r="";var N=0;A.lR.lastIndex=0;var L=A.lR.exec(K);while(L){r+=K.substr(N,L.index-N);var M=y(A,L);if(M){v+=M[1];r+='<span class="'+M[0]+'">'+L[0]+"</span>"}else{r+=L[0]}N=A.lR.lastIndex;L=A.lR.exec(K)}return r+K.substr(N)}function z(){if(A.sL&&!e[A.sL]){return l(w)}var r=A.sL?d(A.sL,w):g(w);if(A.r>0){v+=r.keyword_count;B+=r.r}return'<span class="'+r.language+'">'+r.value+"</span>"}function J(){return A.sL!==undefined?z():G()}function I(L,r){var K=L.cN?'<span class="'+L.cN+'">':"";if(L.rB){x+=K;w=""}else{if(L.eB){x+=l(r)+K;w=""}else{x+=K;w=r}}A=Object.create(L,{parent:{value:A}});B+=L.r}function C(K,r){w+=K;if(r===undefined){x+=J();return 0}var L=o(r,A);if(L){x+=J();I(L,r);return L.rB?0:r.length}var M=s(A,r);if(M){if(!(M.rE||M.eE)){w+=r}x+=J();do{if(A.cN){x+="</span>"}A=A.parent}while(A!=M.parent);if(M.eE){x+=l(r)}w="";if(M.starts){I(M.starts,"")}return M.rE?0:r.length}if(t(r,A)){throw"Illegal"}w+=r;return r.length||1}var F=e[D];f(F);var A=F;var w="";var B=0;var v=0;var x="";try{var u,q,p=0;while(true){A.t.lastIndex=p;u=A.t.exec(E);if(!u){break}q=C(E.substr(p,u.index-p),u[0]);p=u.index+q}C(E.substr(p));return{r:B,keyword_count:v,value:x,language:D}}catch(H){if(H=="Illegal"){return{r:0,keyword_count:0,value:l(E)}}else{throw H}}}function g(s){var o={keyword_count:0,r:0,value:l(s)};var q=o;for(var p in e){if(!e.hasOwnProperty(p)){continue}var r=d(p,s);r.language=p;if(r.keyword_count+r.r>q.keyword_count+q.r){q=r}if(r.keyword_count+r.r>o.keyword_count+o.r){q=o;o=r}}if(q.language){o.second_best=q}return o}function i(q,p,o){if(p){q=q.replace(/^((<[^>]+>|\t)+)/gm,function(r,v,u,t){return v.replace(/\t/g,p)})}if(o){q=q.replace(/\n/g,"<br>")}return q}function m(r,u,p){var v=h(r,p);var t=a(r);if(t=="no-highlight"){return}var w=t?d(t,v):g(v);t=w.language;var o=c(r);if(o.length){var q=document.createElement("pre");q.innerHTML=w.value;w.value=j(o,c(q),v)}w.value=i(w.value,u,p);var s=r.className;if(!s.match("(\\s|^)(language-)?"+t+"(\\s|$)")){s=s?(s+" "+t):t}r.innerHTML=w.value;r.className=s;r.result={language:t,kw:w.keyword_count,re:w.r};if(w.second_best){r.second_best={language:w.second_best.language,kw:w.second_best.keyword_count,re:w.second_best.r}}}function n(){if(n.called){return}n.called=true;Array.prototype.map.call(document.getElementsByTagName("pre"),b).filter(Boolean).forEach(function(o){m(o,hljs.tabReplace)})}function k(){window.addEventListener("DOMContentLoaded",n,false);window.addEventListener("load",n,false)}var e={};this.LANGUAGES=e;this.highlight=d;this.highlightAuto=g;this.fixMarkup=i;this.highlightBlock=m;this.initHighlighting=n;this.initHighlightingOnLoad=k;this.IR="[a-zA-Z][a-zA-Z0-9_]*";this.UIR="[a-zA-Z_][a-zA-Z0-9_]*";this.NR="\\b\\d+(\\.\\d+)?";this.CNR="(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)";this.BNR="\\b(0b[01]+)";this.RSR="!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|\\.|-|-=|/|/=|:|;|<|<<|<<=|<=|=|==|===|>|>=|>>|>>=|>>>|>>>=|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~";this.BE={b:"\\\\[\\s\\S]",r:0};this.ASM={cN:"string",b:"'",e:"'",i:"\\n",c:[this.BE],r:0};this.QSM={cN:"string",b:'"',e:'"',i:"\\n",c:[this.BE],r:0};this.CLCM={cN:"comment",b:"//",e:"$"};this.CBLCLM={cN:"comment",b:"/\\*",e:"\\*/"};this.HCM={cN:"comment",b:"#",e:"$"};this.NM={cN:"number",b:this.NR,r:0};this.CNM={cN:"number",b:this.CNR,r:0};this.BNM={cN:"number",b:this.BNR,r:0};this.inherit=function(q,r){var o={};for(var p in q){o[p]=q[p]}if(r){for(var p in r){o[p]=r[p]}}return o}}();hljs.LANGUAGES.ruby=function(e){var a="[a-zA-Z_][a-zA-Z0-9_]*(\\!|\\?)?";var j="[a-zA-Z_]\\w*[!?=]?|[-+~]\\@|<<|>>|=~|===?|<=>|[<>]=?|\\*\\*|[-/+%^&*~`|]|\\[\\]=?";var g={keyword:"and false then defined module in return redo if BEGIN retry end for true self when next until do begin unless END rescue nil else break undef not super class case require yield alias while ensure elsif or include"};var c={cN:"yardoctag",b:"@[A-Za-z]+"};var k=[{cN:"comment",b:"#",e:"$",c:[c]},{cN:"comment",b:"^\\=begin",e:"^\\=end",c:[c],r:10},{cN:"comment",b:"^__END__",e:"\\n$"}];var d={cN:"subst",b:"#\\{",e:"}",l:a,k:g};var i=[e.BE,d];var b=[{cN:"string",b:"'",e:"'",c:i,r:0},{cN:"string",b:'"',e:'"',c:i,r:0},{cN:"string",b:"%[qw]?\\(",e:"\\)",c:i},{cN:"string",b:"%[qw]?\\[",e:"\\]",c:i},{cN:"string",b:"%[qw]?{",e:"}",c:i},{cN:"string",b:"%[qw]?<",e:">",c:i,r:10},{cN:"string",b:"%[qw]?/",e:"/",c:i,r:10},{cN:"string",b:"%[qw]?%",e:"%",c:i,r:10},{cN:"string",b:"%[qw]?-",e:"-",c:i,r:10},{cN:"string",b:"%[qw]?\\|",e:"\\|",c:i,r:10}];var h={cN:"function",bWK:true,e:" |$|;",k:"def",c:[{cN:"title",b:j,l:a,k:g},{cN:"params",b:"\\(",e:"\\)",l:a,k:g}].concat(k)};var f=k.concat(b.concat([{cN:"class",bWK:true,e:"$|;",k:"class module",c:[{cN:"title",b:"[A-Za-z_]\\w*(::\\w+)*(\\?|\\!)?",r:0},{cN:"inheritance",b:"<\\s*",c:[{cN:"parent",b:"("+e.IR+"::)?"+e.IR}]}].concat(k)},h,{cN:"constant",b:"(::)?(\\b[A-Z]\\w*(::)?)+",r:0},{cN:"symbol",b:":",c:b.concat([{b:j}]),r:0},{cN:"symbol",b:a+":",r:0},{cN:"number",b:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",r:0},{cN:"number",b:"\\?\\w"},{cN:"variable",b:"(\\$\\W)|((\\$|\\@\\@?)(\\w+))"},{b:"("+e.RSR+")\\s*",c:k.concat([{cN:"regexp",b:"/",e:"/[a-z]*",i:"\\n",c:[e.BE,d]}]),r:0}]));d.c=f;h.c[1].c=f;return{l:a,k:g,c:f}}(hljs);hljs.LANGUAGES.diff=function(a){return{c:[{cN:"chunk",b:"^\\@\\@ +\\-\\d+,\\d+ +\\+\\d+,\\d+ +\\@\\@$",r:10},{cN:"chunk",b:"^\\*\\*\\* +\\d+,\\d+ +\\*\\*\\*\\*$",r:10},{cN:"chunk",b:"^\\-\\-\\- +\\d+,\\d+ +\\-\\-\\-\\-$",r:10},{cN:"header",b:"Index: ",e:"$"},{cN:"header",b:"=====",e:"=====$"},{cN:"header",b:"^\\-\\-\\-",e:"$"},{cN:"header",b:"^\\*{3} ",e:"$"},{cN:"header",b:"^\\+\\+\\+",e:"$"},{cN:"header",b:"\\*{5}",e:"\\*{5}$"},{cN:"addition",b:"^\\+",e:"$"},{cN:"deletion",b:"^\\-",e:"$"},{cN:"change",b:"^\\!",e:"$"}]}}(hljs);hljs.LANGUAGES.javascript=function(a){return{k:{keyword:"in if for while finally var new function do return void else break catch instanceof with throw case default try this switch continue typeof delete let yield const",literal:"true false null undefined NaN Infinity"},c:[a.ASM,a.QSM,a.CLCM,a.CBLCLM,a.CNM,{b:"("+a.RSR+"|\\b(case|return|throw)\\b)\\s*",k:"return throw case",c:[a.CLCM,a.CBLCLM,{cN:"regexp",b:"/",e:"/[gim]*",i:"\\n",c:[{b:"\\\\/"}]},{b:"<",e:">;",sL:"xml"}],r:0},{cN:"function",bWK:true,e:"{",k:"function",c:[{cN:"title",b:"[A-Za-z$_][0-9A-Za-z$_]*"},{cN:"params",b:"\\(",e:"\\)",c:[a.CLCM,a.CBLCLM],i:"[\"'\\(]"}],i:"\\[|%"}]}}(hljs);hljs.LANGUAGES.xml=function(a){var c="[A-Za-z0-9\\._:-]+";var b={eW:true,c:[{cN:"attribute",b:c,r:0},{b:'="',rB:true,e:'"',c:[{cN:"value",b:'"',eW:true}]},{b:"='",rB:true,e:"'",c:[{cN:"value",b:"'",eW:true}]},{b:"=",c:[{cN:"value",b:"[^\\s/>]+"}]}]};return{cI:true,c:[{cN:"pi",b:"<\\?",e:"\\?>",r:10},{cN:"doctype",b:"<!DOCTYPE",e:">",r:10,c:[{b:"\\[",e:"\\]"}]},{cN:"comment",b:"<!--",e:"-->",r:10},{cN:"cdata",b:"<\\!\\[CDATA\\[",e:"\\]\\]>",r:10},{cN:"tag",b:"<style(?=\\s|>|$)",e:">",k:{title:"style"},c:[b],starts:{e:"</style>",rE:true,sL:"css"}},{cN:"tag",b:"<script(?=\\s|>|$)",e:">",k:{title:"script"},c:[b],starts:{e:"<\/script>",rE:true,sL:"javascript"}},{b:"<%",e:"%>",sL:"vbscript"},{cN:"tag",b:"</?",e:"/?>",c:[{cN:"title",b:"[^ />]+"},b]}]}}(hljs);hljs.LANGUAGES.php=function(a){var e={cN:"variable",b:"\\$+[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*"};var b=[a.inherit(a.ASM,{i:null}),a.inherit(a.QSM,{i:null}),{cN:"string",b:'b"',e:'"',c:[a.BE]},{cN:"string",b:"b'",e:"'",c:[a.BE]}];var c=[a.BNM,a.CNM];var d={cN:"title",b:a.UIR};return{cI:true,k:"and include_once list abstract global private echo interface as static endswitch array null if endwhile or const for endforeach self var while isset public protected exit foreach throw elseif include __FILE__ empty require_once do xor return implements parent clone use __CLASS__ __LINE__ else break print eval new catch __METHOD__ case exception php_user_filter default die require __FUNCTION__ enddeclare final try this switch continue endfor endif declare unset true false namespace trait goto instanceof insteadof __DIR__ __NAMESPACE__ __halt_compiler",c:[a.CLCM,a.HCM,{cN:"comment",b:"/\\*",e:"\\*/",c:[{cN:"phpdoc",b:"\\s@[A-Za-z]+"}]},{cN:"comment",eB:true,b:"__halt_compiler.+?;",eW:true},{cN:"string",b:"<<<['\"]?\\w+['\"]?$",e:"^\\w+;",c:[a.BE]},{cN:"preprocessor",b:"<\\?php",r:10},{cN:"preprocessor",b:"\\?>"},e,{cN:"function",bWK:true,e:"{",k:"function",i:"\\$|\\[|%",c:[d,{cN:"params",b:"\\(",e:"\\)",c:["self",e,a.CBLCLM].concat(b).concat(c)}]},{cN:"class",bWK:true,e:"{",k:"class",i:"[:\\(\\$]",c:[{bWK:true,eW:true,k:"extends",c:[d]},d]},{b:"=>"}].concat(b).concat(c)}}(hljs);hljs.LANGUAGES.python=function(a){var f={cN:"prompt",b:"^(>>>|\\.\\.\\.) "};var c=[{cN:"string",b:"(u|b)?r?'''",e:"'''",c:[f],r:10},{cN:"string",b:'(u|b)?r?"""',e:'"""',c:[f],r:10},{cN:"string",b:"(u|r|ur)'",e:"'",c:[a.BE],r:10},{cN:"string",b:'(u|r|ur)"',e:'"',c:[a.BE],r:10},{cN:"string",b:"(b|br)'",e:"'",c:[a.BE]},{cN:"string",b:'(b|br)"',e:'"',c:[a.BE]}].concat([a.ASM,a.QSM]);var e={cN:"title",b:a.UIR};var d={cN:"params",b:"\\(",e:"\\)",c:["self",a.CNM,f].concat(c)};var b={bWK:true,e:":",i:"[${=;\\n]",c:[e,d],r:10};return{k:{keyword:"and elif is global as in if from raise for except finally print import pass return exec else break not with class assert yield try while continue del or def lambda nonlocal|10",built_in:"None True False Ellipsis NotImplemented"},i:"(</|->|\\?)",c:c.concat([f,a.HCM,a.inherit(b,{cN:"function",k:"def"}),a.inherit(b,{cN:"class",k:"class"}),a.CNM,{cN:"decorator",b:"@",e:"$"},{b:"\\b(print|exec)\\("}])}}(hljs);hljs.LANGUAGES.json=function(a){var e={literal:"true false null"};var d=[a.QSM,a.CNM];var c={cN:"value",e:",",eW:true,eE:true,c:d,k:e};var b={b:"{",e:"}",c:[{cN:"attribute",b:'\\s*"',e:'"\\s*:\\s*',eB:true,eE:true,c:[a.BE],i:"\\n",starts:c}],i:"\\S"};var f={b:"\\[",e:"\\]",c:[a.inherit(c,{cN:null})],i:"\\S"};d.splice(d.length,0,b,f);return{c:d,k:e,i:"\\S"}}(hljs);
\ No newline at end of file
+/*
+
+Copyright (c) 2006, Ivan Sagalaev
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in the
+      documentation and/or other materials provided with the distribution.
+    * Neither the name of highlight.js nor the names of its contributors 
+      may be used to endorse or promote products derived from this software 
+      without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+var hljs=new function(){function l(o){return o.replace(/&/gm,"&amp;").replace(/</gm,"&lt;").replace(/>/gm,"&gt;")}function b(p){for(var o=p.firstChild;o;o=o.nextSibling){if(o.nodeName=="CODE"){return o}if(!(o.nodeType==3&&o.nodeValue.match(/\s+/))){break}}}function h(p,o){return Array.prototype.map.call(p.childNodes,function(q){if(q.nodeType==3){return o?q.nodeValue.replace(/\n/g,""):q.nodeValue}if(q.nodeName=="BR"){return"\n"}return h(q,o)}).join("")}function a(q){var p=(q.className+" "+q.parentNode.className).split(/\s+/);p=p.map(function(r){return r.replace(/^language-/,"")});for(var o=0;o<p.length;o++){if(e[p[o]]||p[o]=="no-highlight"){return p[o]}}}function c(q){var o=[];(function p(r,s){for(var t=r.firstChild;t;t=t.nextSibling){if(t.nodeType==3){s+=t.nodeValue.length}else{if(t.nodeName=="BR"){s+=1}else{if(t.nodeType==1){o.push({event:"start",offset:s,node:t});s=p(t,s);o.push({event:"stop",offset:s,node:t})}}}}return s})(q,0);return o}function j(x,v,w){var p=0;var y="";var r=[];function t(){if(x.length&&v.length){if(x[0].offset!=v[0].offset){return(x[0].offset<v[0].offset)?x:v}else{return v[0].event=="start"?x:v}}else{return x.length?x:v}}function s(A){function z(B){return" "+B.nodeName+'="'+l(B.value)+'"'}return"<"+A.nodeName+Array.prototype.map.call(A.attributes,z).join("")+">"}while(x.length||v.length){var u=t().splice(0,1)[0];y+=l(w.substr(p,u.offset-p));p=u.offset;if(u.event=="start"){y+=s(u.node);r.push(u.node)}else{if(u.event=="stop"){var o,q=r.length;do{q--;o=r[q];y+=("</"+o.nodeName.toLowerCase()+">")}while(o!=u.node);r.splice(q,1);while(q<r.length){y+=s(r[q]);q++}}}}return y+l(w.substr(p))}function f(q){function o(s,r){return RegExp(s,"m"+(q.cI?"i":"")+(r?"g":""))}function p(y,w){if(y.compiled){return}y.compiled=true;var s=[];if(y.k){var r={};function z(A,t){t.split(" ").forEach(function(B){var C=B.split("|");r[C[0]]=[A,C[1]?Number(C[1]):1];s.push(C[0])})}y.lR=o(y.l||hljs.IR,true);if(typeof y.k=="string"){z("keyword",y.k)}else{for(var x in y.k){if(!y.k.hasOwnProperty(x)){continue}z(x,y.k[x])}}y.k=r}if(w){if(y.bWK){y.b="\\b("+s.join("|")+")\\s"}y.bR=o(y.b?y.b:"\\B|\\b");if(!y.e&&!y.eW){y.e="\\B|\\b"}if(y.e){y.eR=o(y.e)}y.tE=y.e||"";if(y.eW&&w.tE){y.tE+=(y.e?"|":"")+w.tE}}if(y.i){y.iR=o(y.i)}if(y.r===undefined){y.r=1}if(!y.c){y.c=[]}for(var v=0;v<y.c.length;v++){if(y.c[v]=="self"){y.c[v]=y}p(y.c[v],y)}if(y.starts){p(y.starts,w)}var u=[];for(var v=0;v<y.c.length;v++){u.push(y.c[v].b)}if(y.tE){u.push(y.tE)}if(y.i){u.push(y.i)}y.t=u.length?o(u.join("|"),true):{exec:function(t){return null}}}p(q)}function d(D,E){function o(r,M){for(var L=0;L<M.c.length;L++){var K=M.c[L].bR.exec(r);if(K&&K.index==0){return M.c[L]}}}function s(K,r){if(K.e&&K.eR.test(r)){return K}if(K.eW){return s(K.parent,r)}}function t(r,K){return K.i&&K.iR.test(r)}function y(L,r){var K=F.cI?r[0].toLowerCase():r[0];return L.k.hasOwnProperty(K)&&L.k[K]}function G(){var K=l(w);if(!A.k){return K}var r="";var N=0;A.lR.lastIndex=0;var L=A.lR.exec(K);while(L){r+=K.substr(N,L.index-N);var M=y(A,L);if(M){v+=M[1];r+='<span class="'+M[0]+'">'+L[0]+"</span>"}else{r+=L[0]}N=A.lR.lastIndex;L=A.lR.exec(K)}return r+K.substr(N)}function z(){if(A.sL&&!e[A.sL]){return l(w)}var r=A.sL?d(A.sL,w):g(w);if(A.r>0){v+=r.keyword_count;B+=r.r}return'<span class="'+r.language+'">'+r.value+"</span>"}function J(){return A.sL!==undefined?z():G()}function I(L,r){var K=L.cN?'<span class="'+L.cN+'">':"";if(L.rB){x+=K;w=""}else{if(L.eB){x+=l(r)+K;w=""}else{x+=K;w=r}}A=Object.create(L,{parent:{value:A}});B+=L.r}function C(K,r){w+=K;if(r===undefined){x+=J();return 0}var L=o(r,A);if(L){x+=J();I(L,r);return L.rB?0:r.length}var M=s(A,r);if(M){if(!(M.rE||M.eE)){w+=r}x+=J();do{if(A.cN){x+="</span>"}A=A.parent}while(A!=M.parent);if(M.eE){x+=l(r)}w="";if(M.starts){I(M.starts,"")}return M.rE?0:r.length}if(t(r,A)){throw"Illegal"}w+=r;return r.length||1}var F=e[D];f(F);var A=F;var w="";var B=0;var v=0;var x="";try{var u,q,p=0;while(true){A.t.lastIndex=p;u=A.t.exec(E);if(!u){break}q=C(E.substr(p,u.index-p),u[0]);p=u.index+q}C(E.substr(p));return{r:B,keyword_count:v,value:x,language:D}}catch(H){if(H=="Illegal"){return{r:0,keyword_count:0,value:l(E)}}else{throw H}}}function g(s){var o={keyword_count:0,r:0,value:l(s)};var q=o;for(var p in e){if(!e.hasOwnProperty(p)){continue}var r=d(p,s);r.language=p;if(r.keyword_count+r.r>q.keyword_count+q.r){q=r}if(r.keyword_count+r.r>o.keyword_count+o.r){q=o;o=r}}if(q.language){o.second_best=q}return o}function i(q,p,o){if(p){q=q.replace(/^((<[^>]+>|\t)+)/gm,function(r,v,u,t){return v.replace(/\t/g,p)})}if(o){q=q.replace(/\n/g,"<br>")}return q}function m(r,u,p){var v=h(r,p);var t=a(r);if(t=="no-highlight"){return}var w=t?d(t,v):g(v);t=w.language;var o=c(r);if(o.length){var q=document.createElement("pre");q.innerHTML=w.value;w.value=j(o,c(q),v)}w.value=i(w.value,u,p);var s=r.className;if(!s.match("(\\s|^)(language-)?"+t+"(\\s|$)")){s=s?(s+" "+t):t}r.innerHTML=w.value;r.className=s;r.result={language:t,kw:w.keyword_count,re:w.r};if(w.second_best){r.second_best={language:w.second_best.language,kw:w.second_best.keyword_count,re:w.second_best.r}}}function n(){if(n.called){return}n.called=true;Array.prototype.map.call(document.getElementsByTagName("pre"),b).filter(Boolean).forEach(function(o){m(o,hljs.tabReplace)})}function k(){window.addEventListener("DOMContentLoaded",n,false);window.addEventListener("load",n,false)}var e={};this.LANGUAGES=e;this.highlight=d;this.highlightAuto=g;this.fixMarkup=i;this.highlightBlock=m;this.initHighlighting=n;this.initHighlightingOnLoad=k;this.IR="[a-zA-Z][a-zA-Z0-9_]*";this.UIR="[a-zA-Z_][a-zA-Z0-9_]*";this.NR="\\b\\d+(\\.\\d+)?";this.CNR="(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)";this.BNR="\\b(0b[01]+)";this.RSR="!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|\\.|-|-=|/|/=|:|;|<|<<|<<=|<=|=|==|===|>|>=|>>|>>=|>>>|>>>=|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~";this.BE={b:"\\\\[\\s\\S]",r:0};this.ASM={cN:"string",b:"'",e:"'",i:"\\n",c:[this.BE],r:0};this.QSM={cN:"string",b:'"',e:'"',i:"\\n",c:[this.BE],r:0};this.CLCM={cN:"comment",b:"//",e:"$"};this.CBLCLM={cN:"comment",b:"/\\*",e:"\\*/"};this.HCM={cN:"comment",b:"#",e:"$"};this.NM={cN:"number",b:this.NR,r:0};this.CNM={cN:"number",b:this.CNR,r:0};this.BNM={cN:"number",b:this.BNR,r:0};this.inherit=function(q,r){var o={};for(var p in q){o[p]=q[p]}if(r){for(var p in r){o[p]=r[p]}}return o}}();hljs.LANGUAGES.ruby=function(e){var a="[a-zA-Z_][a-zA-Z0-9_]*(\\!|\\?)?";var j="[a-zA-Z_]\\w*[!?=]?|[-+~]\\@|<<|>>|=~|===?|<=>|[<>]=?|\\*\\*|[-/+%^&*~`|]|\\[\\]=?";var g={keyword:"and false then defined module in return redo if BEGIN retry end for true self when next until do begin unless END rescue nil else break undef not super class case require yield alias while ensure elsif or include"};var c={cN:"yardoctag",b:"@[A-Za-z]+"};var k=[{cN:"comment",b:"#",e:"$",c:[c]},{cN:"comment",b:"^\\=begin",e:"^\\=end",c:[c],r:10},{cN:"comment",b:"^__END__",e:"\\n$"}];var d={cN:"subst",b:"#\\{",e:"}",l:a,k:g};var i=[e.BE,d];var b=[{cN:"string",b:"'",e:"'",c:i,r:0},{cN:"string",b:'"',e:'"',c:i,r:0},{cN:"string",b:"%[qw]?\\(",e:"\\)",c:i},{cN:"string",b:"%[qw]?\\[",e:"\\]",c:i},{cN:"string",b:"%[qw]?{",e:"}",c:i},{cN:"string",b:"%[qw]?<",e:">",c:i,r:10},{cN:"string",b:"%[qw]?/",e:"/",c:i,r:10},{cN:"string",b:"%[qw]?%",e:"%",c:i,r:10},{cN:"string",b:"%[qw]?-",e:"-",c:i,r:10},{cN:"string",b:"%[qw]?\\|",e:"\\|",c:i,r:10}];var h={cN:"function",bWK:true,e:" |$|;",k:"def",c:[{cN:"title",b:j,l:a,k:g},{cN:"params",b:"\\(",e:"\\)",l:a,k:g}].concat(k)};var f=k.concat(b.concat([{cN:"class",bWK:true,e:"$|;",k:"class module",c:[{cN:"title",b:"[A-Za-z_]\\w*(::\\w+)*(\\?|\\!)?",r:0},{cN:"inheritance",b:"<\\s*",c:[{cN:"parent",b:"("+e.IR+"::)?"+e.IR}]}].concat(k)},h,{cN:"constant",b:"(::)?(\\b[A-Z]\\w*(::)?)+",r:0},{cN:"symbol",b:":",c:b.concat([{b:j}]),r:0},{cN:"symbol",b:a+":",r:0},{cN:"number",b:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",r:0},{cN:"number",b:"\\?\\w"},{cN:"variable",b:"(\\$\\W)|((\\$|\\@\\@?)(\\w+))"},{b:"("+e.RSR+")\\s*",c:k.concat([{cN:"regexp",b:"/",e:"/[a-z]*",i:"\\n",c:[e.BE,d]}]),r:0}]));d.c=f;h.c[1].c=f;return{l:a,k:g,c:f}}(hljs);hljs.LANGUAGES.diff=function(a){return{c:[{cN:"chunk",b:"^\\@\\@ +\\-\\d+,\\d+ +\\+\\d+,\\d+ +\\@\\@$",r:10},{cN:"chunk",b:"^\\*\\*\\* +\\d+,\\d+ +\\*\\*\\*\\*$",r:10},{cN:"chunk",b:"^\\-\\-\\- +\\d+,\\d+ +\\-\\-\\-\\-$",r:10},{cN:"header",b:"Index: ",e:"$"},{cN:"header",b:"=====",e:"=====$"},{cN:"header",b:"^\\-\\-\\-",e:"$"},{cN:"header",b:"^\\*{3} ",e:"$"},{cN:"header",b:"^\\+\\+\\+",e:"$"},{cN:"header",b:"\\*{5}",e:"\\*{5}$"},{cN:"addition",b:"^\\+",e:"$"},{cN:"deletion",b:"^\\-",e:"$"},{cN:"change",b:"^\\!",e:"$"}]}}(hljs);hljs.LANGUAGES.javascript=function(a){return{k:{keyword:"in if for while finally var new function do return void else break catch instanceof with throw case default try this switch continue typeof delete let yield const",literal:"true false null undefined NaN Infinity"},c:[a.ASM,a.QSM,a.CLCM,a.CBLCLM,a.CNM,{b:"("+a.RSR+"|\\b(case|return|throw)\\b)\\s*",k:"return throw case",c:[a.CLCM,a.CBLCLM,{cN:"regexp",b:"/",e:"/[gim]*",i:"\\n",c:[{b:"\\\\/"}]},{b:"<",e:">;",sL:"xml"}],r:0},{cN:"function",bWK:true,e:"{",k:"function",c:[{cN:"title",b:"[A-Za-z$_][0-9A-Za-z$_]*"},{cN:"params",b:"\\(",e:"\\)",c:[a.CLCM,a.CBLCLM],i:"[\"'\\(]"}],i:"\\[|%"}]}}(hljs);hljs.LANGUAGES.xml=function(a){var c="[A-Za-z0-9\\._:-]+";var b={eW:true,c:[{cN:"attribute",b:c,r:0},{b:'="',rB:true,e:'"',c:[{cN:"value",b:'"',eW:true}]},{b:"='",rB:true,e:"'",c:[{cN:"value",b:"'",eW:true}]},{b:"=",c:[{cN:"value",b:"[^\\s/>]+"}]}]};return{cI:true,c:[{cN:"pi",b:"<\\?",e:"\\?>",r:10},{cN:"doctype",b:"<!DOCTYPE",e:">",r:10,c:[{b:"\\[",e:"\\]"}]},{cN:"comment",b:"<!--",e:"-->",r:10},{cN:"cdata",b:"<\\!\\[CDATA\\[",e:"\\]\\]>",r:10},{cN:"tag",b:"<style(?=\\s|>|$)",e:">",k:{title:"style"},c:[b],starts:{e:"</style>",rE:true,sL:"css"}},{cN:"tag",b:"<script(?=\\s|>|$)",e:">",k:{title:"script"},c:[b],starts:{e:"<\/script>",rE:true,sL:"javascript"}},{b:"<%",e:"%>",sL:"vbscript"},{cN:"tag",b:"</?",e:"/?>",c:[{cN:"title",b:"[^ />]+"},b]}]}}(hljs);hljs.LANGUAGES.php=function(a){var e={cN:"variable",b:"\\$+[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*"};var b=[a.inherit(a.ASM,{i:null}),a.inherit(a.QSM,{i:null}),{cN:"string",b:'b"',e:'"',c:[a.BE]},{cN:"string",b:"b'",e:"'",c:[a.BE]}];var c=[a.BNM,a.CNM];var d={cN:"title",b:a.UIR};return{cI:true,k:"and include_once list abstract global private echo interface as static endswitch array null if endwhile or const for endforeach self var while isset public protected exit foreach throw elseif include __FILE__ empty require_once do xor return implements parent clone use __CLASS__ __LINE__ else break print eval new catch __METHOD__ case exception php_user_filter default die require __FUNCTION__ enddeclare final try this switch continue endfor endif declare unset true false namespace trait goto instanceof insteadof __DIR__ __NAMESPACE__ __halt_compiler",c:[a.CLCM,a.HCM,{cN:"comment",b:"/\\*",e:"\\*/",c:[{cN:"phpdoc",b:"\\s@[A-Za-z]+"}]},{cN:"comment",eB:true,b:"__halt_compiler.+?;",eW:true},{cN:"string",b:"<<<['\"]?\\w+['\"]?$",e:"^\\w+;",c:[a.BE]},{cN:"preprocessor",b:"<\\?php",r:10},{cN:"preprocessor",b:"\\?>"},e,{cN:"function",bWK:true,e:"{",k:"function",i:"\\$|\\[|%",c:[d,{cN:"params",b:"\\(",e:"\\)",c:["self",e,a.CBLCLM].concat(b).concat(c)}]},{cN:"class",bWK:true,e:"{",k:"class",i:"[:\\(\\$]",c:[{bWK:true,eW:true,k:"extends",c:[d]},d]},{b:"=>"}].concat(b).concat(c)}}(hljs);hljs.LANGUAGES.python=function(a){var f={cN:"prompt",b:"^(>>>|\\.\\.\\.) "};var c=[{cN:"string",b:"(u|b)?r?'''",e:"'''",c:[f],r:10},{cN:"string",b:'(u|b)?r?"""',e:'"""',c:[f],r:10},{cN:"string",b:"(u|r|ur)'",e:"'",c:[a.BE],r:10},{cN:"string",b:'(u|r|ur)"',e:'"',c:[a.BE],r:10},{cN:"string",b:"(b|br)'",e:"'",c:[a.BE]},{cN:"string",b:'(b|br)"',e:'"',c:[a.BE]}].concat([a.ASM,a.QSM]);var e={cN:"title",b:a.UIR};var d={cN:"params",b:"\\(",e:"\\)",c:["self",a.CNM,f].concat(c)};var b={bWK:true,e:":",i:"[${=;\\n]",c:[e,d],r:10};return{k:{keyword:"and elif is global as in if from raise for except finally print import pass return exec else break not with class assert yield try while continue del or def lambda nonlocal|10",built_in:"None True False Ellipsis NotImplemented"},i:"(</|->|\\?)",c:c.concat([f,a.HCM,a.inherit(b,{cN:"function",k:"def"}),a.inherit(b,{cN:"class",k:"class"}),a.CNM,{cN:"decorator",b:"@",e:"$"},{b:"\\b(print|exec)\\("}])}}(hljs);hljs.LANGUAGES.json=function(a){var e={literal:"true false null"};var d=[a.QSM,a.CNM];var c={cN:"value",e:",",eW:true,eE:true,c:d,k:e};var b={b:"{",e:"}",c:[{cN:"attribute",b:'\\s*"',e:'"\\s*:\\s*',eB:true,eE:true,c:[a.BE],i:"\\n",starts:c}],i:"\\S"};var f={b:"\\[",e:"\\]",c:[a.inherit(c,{cN:null})],i:"\\S"};d.splice(d.length,0,b,f);return{c:d,k:e,i:"\\S"}}(hljs);
diff --git a/solr/webapp/web/js/lib/jquery-1.7.2.min.js b/solr/webapp/web/js/lib/jquery-1.7.2.min.js
index c941a5f..438ff79 100644
--- a/solr/webapp/web/js/lib/jquery-1.7.2.min.js
+++ b/solr/webapp/web/js/lib/jquery-1.7.2.min.js
@@ -1,3 +1,27 @@
+/*
+
+Copyright (c) 2011 John Resig, http://jquery.com/
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
 /*!
  * jQuery JavaScript Library v1.4.3
  * http://jquery.com/
diff --git a/solr/webapp/web/js/lib/jquery.ajaxfileupload.js b/solr/webapp/web/js/lib/jquery.ajaxfileupload.js
new file mode 100644
index 0000000..eb53853
--- /dev/null
+++ b/solr/webapp/web/js/lib/jquery.ajaxfileupload.js
@@ -0,0 +1,182 @@
+/*
+* Copyright (c) 2011 Jordan Feldstein
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Original code from: https://github.com/jfeldstein/jQuery.AjaxFileUpload.js https://github.com/jfeldstein/jQuery.AjaxFileUpload.js/commit/9dd56b4161cbed138287d3ae29a476bb59eb5fc4
+// All modifications are BSD licensed
+// GSI: Modifications made to support immediate upload
+/*
+ //
+ //  - Ajaxifies an individual <input type="file">
+ //  - Files are sandboxed. Doesn't matter how many, or where they are, on the page.
+ //  - Allows for extra parameters to be included with the file
+ //  - onStart callback can cancel the upload by returning false
+ */
+
+
+(function ($) {
+  $.fn.ajaxfileupload = function (options) {
+    var settings = {
+      params: {},
+      action: '',
+      onStart: function () {
+        console.log('starting upload');
+        console.log(this);
+      },
+      onComplete: function (response) {
+        console.log('got response: ');
+        console.log(response);
+        console.log(this);
+      },
+      onCancel: function () {
+        console.log('cancelling: ');
+        console.log(this);
+      },
+      validate_extensions: true,
+      valid_extensions: ['gif', 'png', 'jpg', 'jpeg'],
+      submit_button: null,
+      upload_now: false
+    };
+
+    var uploading_file = false;
+
+    if (options) {
+      $.extend(settings, options);
+    }
+
+
+    // 'this' is a jQuery collection of one or more (hopefully)
+    //  file elements, but doesn't check for this yet
+    return this.each(function () {
+      var $element = $(this);
+      /*
+       // Internal handler that tries to parse the response
+       //  and clean up after ourselves.
+       */
+      var handleResponse = function (loadedFrame, element) {
+        var response, responseStr = loadedFrame.contentWindow.document.body.innerHTML;
+        try {
+          //response = $.parseJSON($.trim(responseStr));
+          response = JSON.parse(responseStr);
+        } catch (e) {
+          response = responseStr;
+        }
+
+        // Tear-down the wrapper form
+        element.siblings().remove();
+        element.unwrap();
+
+        uploading_file = false;
+
+        // Pass back to the user
+        settings.onComplete.apply(element, [response, settings.params]);
+      };
+      /*
+       // Wraps element in a <form> tag, and inserts hidden inputs for each
+       //  key:value pair in settings.params so they can be sent along with
+       //  the upload. Then, creates an iframe that the whole thing is
+       //  uploaded through.
+       */
+      var wrapElement = function (element) {
+        // Create an iframe to submit through, using a semi-unique ID
+        var frame_id = 'ajaxUploader-iframe-' + Math.round(new Date().getTime() / 1000)
+        $('body').after('<iframe width="0" height="0" style="display:none;" name="' + frame_id + '" id="' + frame_id + '"/>');
+        $('#' + frame_id).load(function () {
+          handleResponse(this, element);
+        });
+        console.log("settings.action: " + settings.action);
+        // Wrap it in a form
+        element.wrap(function () {
+          return '<form action="' + settings.action + '" method="POST" enctype="multipart/form-data" target="' + frame_id + '" />'
+        })
+          // Insert <input type='hidden'>'s for each param
+            .before(function () {
+              var key, html = '';
+              for (key in settings.params) {
+                var paramVal = settings.params[key];
+                if (typeof paramVal === 'function') {
+                  paramVal = paramVal();
+                }
+                html += '<input type="hidden" name="' + key + '" value="' + paramVal + '" />';
+              }
+              return html;
+            });
+      }
+
+      var upload_file = function () {
+        if ($element.val() == '') return settings.onCancel.apply($element, [settings.params]);
+
+        // make sure extension is valid
+        var ext = $element.val().split('.').pop().toLowerCase();
+        if (true === settings.validate_extensions && $.inArray(ext, settings.valid_extensions) == -1) {
+          // Pass back to the user
+          settings.onComplete.apply($element, [
+            {status: false, message: 'The select file type is invalid. File must be ' + settings.valid_extensions.join(', ') + '.'},
+            settings.params
+          ]);
+        } else {
+          uploading_file = true;
+
+          // Creates the form, extra inputs and iframe used to
+          //  submit / upload the file
+          wrapElement($element);
+
+          // Call user-supplied (or default) onStart(), setting
+          //  it's this context to the file DOM element
+          var ret = settings.onStart.apply($element, [settings.params]);
+
+          // let onStart have the option to cancel the upload
+          if (ret !== false) {
+            $element.parent('form').submit(function (e) {
+              e.stopPropagation();
+            }).submit();
+          }
+        }
+      };
+      if (settings.upload_now) {
+        if (!uploading_file) {
+          console.log("uploading now");
+          upload_file();
+        }
+      }
+      // Skip elements that are already setup. May replace this
+      //  with uninit() later, to allow updating that settings
+      if ($element.data('ajaxUploader-setup') === true) return;
+
+      $element.change(function () {
+        // since a new image was selected, reset the marker
+        uploading_file = false;
+
+        // only update the file from here if we haven't assigned a submit button
+        if (settings.submit_button == null) {
+          console.log("uploading");
+          upload_file();
+        }
+      });
+
+      if (settings.submit_button == null) {
+        // do nothing
+      } else {
+        settings.submit_button.click(function () {
+          console.log("uploading: " + uploading_file);
+          // only attempt to upload file if we're not uploading
+          if (!uploading_file) {
+            upload_file();
+          }
+        });
+      }
+
+
+      // Mark this element as setup
+      $element.data('ajaxUploader-setup', true);
+
+
+    });
+  }
+})(jQuery)
\ No newline at end of file
diff --git a/solr/webapp/web/js/lib/jquery.autogrow.js b/solr/webapp/web/js/lib/jquery.autogrow.js
deleted file mode 100755
index 1a41a62..0000000
--- a/solr/webapp/web/js/lib/jquery.autogrow.js
+++ /dev/null
@@ -1,132 +0,0 @@
-/* 
- * Auto Expanding Text Area (1.2.2)
- * by Chrys Bader (www.chrysbader.com)
- * chrysb@gmail.com
- *
- * Special thanks to:
- * Jake Chapa - jake@hybridstudio.com
- * John Resig - jeresig@gmail.com
- *
- * Copyright (c) 2008 Chrys Bader (www.chrysbader.com)
- * Licensed under the GPL (GPL-LICENSE.txt) license. 
- *
- *
- * NOTE: This script requires jQuery to work.  Download jQuery at www.jquery.com
- *
- */
- 
-(function(jQuery) {
-		  
-	var self = null;
- 
-	jQuery.fn.autogrow = function(o)
-	{	
-		return this.each(function() {
-			new jQuery.autogrow(this, o);
-		});
-	};
-	
-
-    /**
-     * The autogrow object.
-     *
-     * @constructor
-     * @name jQuery.autogrow
-     * @param Object e The textarea to create the autogrow for.
-     * @param Hash o A set of key/value pairs to set as configuration properties.
-     * @cat Plugins/autogrow
-     */
-	
-	jQuery.autogrow = function (e, o)
-	{
-		this.options		  	= o || {};
-		this.dummy			  	= null;
-		this.interval	 	  	= null;
-		this.line_height	  	= this.options.lineHeight || parseInt(jQuery(e).css('line-height'), 10);
-		this.min_height		  	= this.options.minHeight || parseInt(jQuery(e).css('min-height'), 10);
-		this.max_height		  	= this.options.maxHeight || parseInt(jQuery(e).css('max-height'), 10);
-		this.textarea		  	= jQuery(e);
-		
-		if(this.line_height == NaN)
-		  this.line_height = 0;
-		
-		// Only one textarea activated at a time, the one being used
-		this.init();
-	};
-	
-	jQuery.autogrow.fn = jQuery.autogrow.prototype = {
-    autogrow: '1.2.2'
-  };
-	
- 	jQuery.autogrow.fn.extend = jQuery.autogrow.extend = jQuery.extend;
-	
-	jQuery.autogrow.fn.extend({
-						 
-		init: function() {			
-			var self = this;			
-			this.textarea.css({overflow: 'hidden', display: 'block'});
-			this.textarea.bind('focus', function() { self.startExpand(); } ).bind('blur', function() { self.stopExpand(); });
-			this.checkExpand();	
-		},
-						 
-		startExpand: function() {				
-		  var self = this;
-			this.interval = window.setInterval(function() {self.checkExpand();}, 400);
-		},
-		
-		stopExpand: function() {
-			clearInterval(this.interval);	
-		},
-		
-		checkExpand: function() {
-			
-			if (this.dummy == null)
-			{
-				this.dummy = jQuery('<div></div>');
-				this.dummy.css({
-												'font-size'  : this.textarea.css('font-size'),
-												'font-family': this.textarea.css('font-family'),
-												'width'      : this.textarea.css('width'),
-												// IE shits its pants if you uncomment the next line!
-												//'padding'    : this.textarea.css('padding'),
-												'line-height': this.line_height + 'px',
-												'overflow-x' : 'hidden',
-												'position'   : 'absolute',
-												'top'        : 0,
-												'left'		 : -9999
-												}).appendTo('body');
-			}
-			
-			// Strip HTML tags
-			var html = this.textarea.val().replace(/(<|>)/g, '');
-			
-			// IE is different, as per usual
-			if ($.browser.msie)
-			{
-				html = html.replace(/\n/g, '<BR>new');
-			}
-			else
-			{
-				html = html.replace(/\n/g, '<br>new');
-			}
-			
-			if (this.dummy.html() != html)
-			{
-				this.dummy.html(html);	
-				
-				if (this.max_height > 0 && (this.dummy.height() + this.line_height > this.max_height))
-				{
-					this.textarea.css('overflow-y', 'auto');	
-				}
-				else
-				{
-					this.textarea.css('overflow-y', 'hidden');
-					if (this.textarea.height() < this.dummy.height() + this.line_height || (this.dummy.height() < this.textarea.height()))
-					{	
-						this.textarea.animate({height: (this.dummy.height() + this.line_height) + 'px'}, 100);	
-					}
-				}
-			}
-		}
-	 });
-})(jQuery);
diff --git a/solr/webapp/web/js/lib/jquery.blockUI.js b/solr/webapp/web/js/lib/jquery.blockUI.js
index 55b52a9..38e6410 100644
--- a/solr/webapp/web/js/lib/jquery.blockUI.js
+++ b/solr/webapp/web/js/lib/jquery.blockUI.js
@@ -1,4 +1,28 @@
-/*!
+/*
+
+The MIT License (MIT)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
+/*!
  * jQuery blockUI plugin
  * Version 2.39 (23-MAY-2011)
  * @requires jQuery v1.2.3 or later
diff --git a/solr/webapp/web/js/lib/jquery.cookie.js b/solr/webapp/web/js/lib/jquery.cookie.js
index 6d5974a..03079a5 100644
--- a/solr/webapp/web/js/lib/jquery.cookie.js
+++ b/solr/webapp/web/js/lib/jquery.cookie.js
@@ -1,3 +1,27 @@
+/*
+
+The MIT License (MIT)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
 /*!
  * jQuery Cookie Plugin
  * https://github.com/carhartl/jquery-cookie
diff --git a/solr/webapp/web/js/lib/jquery.form.js b/solr/webapp/web/js/lib/jquery.form.js
index b36ceb9d..63ee4c1 100644
--- a/solr/webapp/web/js/lib/jquery.form.js
+++ b/solr/webapp/web/js/lib/jquery.form.js
@@ -1,3 +1,27 @@
+/*
+
+The MIT License (MIT)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
 /*!
  * jQuery Form Plugin
  * version: 2.47 (04-SEP-2010)
diff --git a/solr/webapp/web/js/lib/jquery.jstree.js b/solr/webapp/web/js/lib/jquery.jstree.js
index 0eea2f6..b86db6e 100644
--- a/solr/webapp/web/js/lib/jquery.jstree.js
+++ b/solr/webapp/web/js/lib/jquery.jstree.js
@@ -1,4 +1,28 @@
 /*
+
+The MIT License (MIT)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
+/*
  * jsTree 1.0-rc1
  * http://jstree.com/
  *
@@ -3507,4 +3531,4 @@
 		}
 	});
 })(jQuery);
-//*/
\ No newline at end of file
+//*/
diff --git a/solr/webapp/web/js/lib/jquery.sammy.js b/solr/webapp/web/js/lib/jquery.sammy.js
index d655c8d..ffa3dcc 100644
--- a/solr/webapp/web/js/lib/jquery.sammy.js
+++ b/solr/webapp/web/js/lib/jquery.sammy.js
@@ -1,20 +1,27 @@
 // name: sammy
 // version: 0.6.2
 /*
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
 
-     http://www.apache.org/licenses/LICENSE-2.0
+Copyright (c) 2008 Aaron Quint, Quirkey NYC, LLC
 
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
 */
 
 (function($, window) {
diff --git a/solr/webapp/web/js/lib/jquery.timeago.js b/solr/webapp/web/js/lib/jquery.timeago.js
index 6df7742..32a5ac7 100644
--- a/solr/webapp/web/js/lib/jquery.timeago.js
+++ b/solr/webapp/web/js/lib/jquery.timeago.js
@@ -1,4 +1,28 @@
 /*
+
+The MIT License (MIT)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
+/*
  * timeago: a jQuery plugin, version: 0.9.3 (2011-01-21)
  * @requires jQuery v1.2.3 or later
  *
diff --git a/solr/webapp/web/js/lib/order.js b/solr/webapp/web/js/lib/order.js
index 5edd5ce..169ccfb 100644
--- a/solr/webapp/web/js/lib/order.js
+++ b/solr/webapp/web/js/lib/order.js
@@ -1,3 +1,30 @@
+/*
+
+MIT License
+-----------
+
+Copyright (c) 2010-2011, The Dojo Foundation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
 /**
  * @license RequireJS order 1.0.5 Copyright (c) 2010-2011, The Dojo Foundation All Rights Reserved.
  * Available via the MIT or new BSD license.
diff --git a/solr/webapp/web/js/main.js b/solr/webapp/web/js/main.js
index 635f15c..792a185 100644
--- a/solr/webapp/web/js/main.js
+++ b/solr/webapp/web/js/main.js
@@ -20,12 +20,12 @@
   [
     'lib/order!lib/console',
     'lib/order!jquery',
-    'lib/order!lib/jquery.autogrow',
     'lib/order!lib/jquery.cookie',
     'lib/order!lib/jquery.form',
     'lib/order!lib/jquery.jstree',
     'lib/order!lib/jquery.sammy',
     'lib/order!lib/jquery.timeago',
+    'lib/order!lib/jquery.ajaxfileupload',
     'lib/order!lib/jquery.blockUI',
     'lib/order!lib/highlight',
     'lib/order!lib/linker',
@@ -37,6 +37,7 @@
     'lib/order!scripts/analysis',
     'lib/order!scripts/cloud',
     'lib/order!scripts/cores',
+    'lib/order!scripts/documents',
     'lib/order!scripts/dataimport',
     'lib/order!scripts/dashboard',
     'lib/order!scripts/file',
@@ -54,4 +55,4 @@
   {
     app.run();
   }
-);
\ No newline at end of file
+);
diff --git a/solr/webapp/web/js/require.js b/solr/webapp/web/js/require.js
index 1846b2e..8ee0051 100644
--- a/solr/webapp/web/js/require.js
+++ b/solr/webapp/web/js/require.js
@@ -1,3 +1,30 @@
+/*
+
+MIT License
+-----------
+
+Copyright (c) 2010-2011, The Dojo Foundation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
 /** vim: et:ts=4:sw=4:sts=4
  * @license RequireJS 1.0.6 Copyright (c) 2010-2012, The Dojo Foundation All Rights Reserved.
  * Available via the MIT or new BSD license.
diff --git a/solr/webapp/web/js/scripts/analysis.js b/solr/webapp/web/js/scripts/analysis.js
index 9186439..8aee129 100644
--- a/solr/webapp/web/js/scripts/analysis.js
+++ b/solr/webapp/web/js/scripts/analysis.js
@@ -125,11 +125,11 @@
               var fields = 0;
               for( var key in context.params )
               {
-                if( 'string' === typeof context.params[key] )
+                if( 'string' === typeof context.params[key] && 0 !== context.params[key].length )
                 {
                   fields++;
                   $( '[name="' + key + '"]', analysis_form )
-                    .val( context.params[key].replace( /\+/g, ' ' ) );
+                    .val( context.params[key] );
                 }
               }
 
@@ -250,7 +250,8 @@
               var params = $.param( compute_analysis_params() )
                             .replace( /[\w\.]+=\+*(&)/g, '$1' ) // remove empty parameters
                             .replace( /(&)+/, '$1' )            // reduce multiple ampersands
-                            .replace( /^&/, '' );               // remove leading ampersand
+                            .replace( /^&/, '' )                // remove leading ampersand
+                            .replace( /\+/g, '%20' );           // replace plus-signs with encoded whitespaces
 
               context.redirect( context.path.split( '?' ).shift() + '?' + params );
               return false;
diff --git a/solr/webapp/web/js/scripts/app.js b/solr/webapp/web/js/scripts/app.js
index e7d5157..8eea97f 100644
--- a/solr/webapp/web/js/scripts/app.js
+++ b/solr/webapp/web/js/scripts/app.js
@@ -351,23 +351,25 @@
               {
                 var core_name = $( 'option:selected', this ).text();
 
-                if( core_name )
-                {
-                  that.core_menu
+                that.core_menu
                     .html
-                    (
-                      '<li class="overview"><a href="#/' + core_name + '"><span>Overview</span></a></li>' + "\n" +
-                      '<li class="ping"><a rel="' + that.config.solr_path + '/' + core_name + '/admin/ping"><span>Ping</span></a></li>' + "\n" +
-                      '<li class="query"><a href="#/' + core_name + '/query"><span>Query</span></a></li>' + "\n" +
-                      '<li class="schema"><a href="#/' + core_name + '/schema"><span>Schema</span></a></li>' + "\n" +
-                      '<li class="config"><a href="#/' + core_name + '/config"><span>Config</span></a></li>' + "\n" +
-                      '<li class="replication"><a href="#/' + core_name + '/replication"><span>Replication</span></a></li>' + "\n" +
-                      '<li class="analysis"><a href="#/' + core_name + '/analysis"><span>Analysis</span></a></li>' + "\n" +
-                      '<li class="schema-browser"><a href="#/' + core_name + '/schema-browser"><span>Schema Browser</span></a></li>' + "\n" + 
-                      '<li class="plugins"><a href="#/' + core_name + '/plugins"><span>Plugins / Stats</span></a></li>' + "\n" +
-                      '<li class="dataimport"><a href="#/' + core_name + '/dataimport"><span>Dataimport</span></a></li>' + "\n"
+                    (//Keep this in alphabetical order after the overview
+                        '<li class="overview"><a href="#/' + core_name + '"><span>Overview</span></a></li>' + "\n" +
+                            '<li class="analysis"><a href="#/' + core_name + '/analysis"><span>Analysis</span></a></li>' + "\n" +
+                            '<li class="config"><a href="#/' + core_name + '/config"><span>Config</span></a></li>' + "\n" +
+                            '<li class="dataimport"><a href="#/' + core_name + '/dataimport"><span>Dataimport</span></a></li>' + "\n" +
+                            '<li class="documents"><a href="#/' + core_name + '/documents"><span>Documents</span></a></li>' + "\n" +
+                            '<li class="ping"><a rel="' + that.config.solr_path + '/' + core_name + '/admin/ping"><span>Ping</span></a></li>' + "\n" +
+                            '<li class="plugins"><a href="#/' + core_name + '/plugins"><span>Plugins / Stats</span></a></li>' + "\n" +
+                            '<li class="query"><a href="#/' + core_name + '/query"><span>Query</span></a></li>' + "\n" +
+                            '<li class="replication"><a href="#/' + core_name + '/replication"><span>Replication</span></a></li>' + "\n" +
+                            '<li class="schema"><a href="#/' + core_name + '/schema"><span>Schema</span></a></li>' + "\n" +
+                            '<li class="schema-browser"><a href="#/' + core_name + '/schema-browser"><span>Schema Browser</span></a></li>' + "\n"
+
                     )
                     .show();
+                if( core_name )
+                {
                 }
                 else
                 {
@@ -585,5 +587,74 @@
 
 };
 
+var connection_check_delay = 1000;
+var connection_working = true;
+
+var connection_check = function connection_check()
+{
+  $.ajax
+  (
+    {
+      url : config.solr_path + config.core_admin_path + '?wt=json&indexInfo=false',
+      dataType : 'json',
+      context : $( '.blockUI #connection_status span' ),
+      beforeSend : function( arr, form, options )
+      {               
+        this
+          .addClass( 'loader' );
+      },
+      success : function( response )
+      {
+        connection_working = true;
+
+        this
+          .html( 'Instance is available - <a href="javascript:location.reload();">Reload the page</a>' );
+
+        this.parents( '#connection_status' )
+          .addClass( 'online' );
+
+        this.parents( '.blockUI' )
+          .css( 'borderColor', '#080' );
+      },
+      error : function()
+      {
+        connection_check_delay += connection_check_delay;
+        window.setTimeout( connection_check, connection_check_delay );
+      },
+      complete : function()
+      {
+        this
+          .removeClass( 'loader' );
+      }
+    }
+  );
+};
+
+var connection_error = function connection_error()
+{
+  connection_working = false;
+
+  $.blockUI
+  (
+    {
+      message: $( '#connection_status' ),
+      css: { width: '450px', borderColor: '#f00' }
+    }
+  );
+
+  window.setTimeout( connection_check, connection_check_delay );
+}
+
+$( document ).ajaxError
+(
+  function( event, xhr, settings, thrownError )
+  {
+    if( connection_working && 0 === xhr.status )
+    {
+      connection_error();
+    }
+  }
+);
+
 $.ajaxSetup( { cache: false } );
 var app = new solr_admin( app_config );
diff --git a/solr/webapp/web/js/scripts/dataimport.js b/solr/webapp/web/js/scripts/dataimport.js
index 1b2b7f1..28beb0a 100644
--- a/solr/webapp/web/js/scripts/dataimport.js
+++ b/solr/webapp/web/js/scripts/dataimport.js
@@ -323,9 +323,6 @@
                 submit_span
                   .data( 'original', submit_span.text() )
                   .text( submit_span.data( 'debugmode' ) );
-
-                $( 'textarea', block )
-                  .autogrow()
               }
               else
               {
@@ -812,4 +809,4 @@
       }
     );
   }
-);
\ No newline at end of file
+);
diff --git a/solr/webapp/web/js/scripts/documents.js b/solr/webapp/web/js/scripts/documents.js
new file mode 100644
index 0000000..9d12e23
--- /dev/null
+++ b/solr/webapp/web/js/scripts/documents.js
@@ -0,0 +1,370 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+//helper for formatting JSON and others
+var content_generator = {
+
+  _default: function (toEsc) {
+    return toEsc.esc();
+  },
+
+  json: function (toEsc) {
+    return app.format_json(toEsc);
+  }
+
+};
+
+//Utiltity function for turning on/off various elements
+function toggles(documents_form, show_json, show_file, show_doc, doc_text, show_wizard) {
+  var json_only = $('#json-only');
+  var the_document = $('#document', documents_form);
+  if (show_doc) {
+    //console.log("doc: " + doc_text);
+    the_document.val(doc_text);
+    the_document.show();
+  } else {
+    the_document.hide();
+  }
+  if (show_json) {
+    json_only.show();
+  } else {
+    json_only.hide();
+  }
+  var file_upload = $('#file-upload', documents_form);
+  var upload_only = $('#upload-only', documents_form);
+  if (show_file) {
+    file_upload.show();
+    upload_only.show();
+  } else {
+    file_upload.hide();
+    upload_only.hide();
+  }
+  var wizard = $('#wizard', documents_form);
+  if (show_wizard) {
+    wizard.show();
+  } else {
+    wizard.hide();
+  }
+}
+// #/:core/documents
+
+//Utiltity function for setting up the wizard fields
+function addWizardFields(active_core, wizard) {
+  var core_basepath = active_core.attr('data-basepath');
+  var select_options = "";
+  //Populate the select options based off the Fields REST API
+  $.getJSON(window.location.protocol + '//' + window.location.host
+          + core_basepath + "/schema/fields").done(
+      //TODO: handle dynamic fields
+      //TODO: get the unique key, too
+      function (data) {
+        var field_select = $("#wiz-field-select", wizard);
+        field_select.empty();
+        $.each(data.fields,
+            function (i, item) {
+              //console.log("i[" + i + "]=" + item.name);
+              if (item.name != "_version_"){
+                select_options += '<option name="' + item.name + '">'
+                  + item.name + '</option>';
+              }
+            });
+        //console.log("select_options: " + select_options);
+        //fill in the select options
+        field_select.append(select_options);
+      });
+  var wizard_doc = $("#wizard-doc", wizard);
+  wizard_doc.die('focusin')
+      .live('focusin', function (event) {
+        $("#wizard-doc", wizard).text("");
+      }
+  );
+  //Add the click handler for the "Add Field" target, which
+  //takes the field content and moves it into the document target
+  var add_field = $("#add-field-href", wizard);
+  add_field.die("click")
+      .live("click",
+      function (event) {
+        //take the field and the contents and append it to the document
+        var wiz_select = $("#wiz-field-select", wizard);
+        var selected = $("option:selected", wiz_select);
+        console.log("selected field: " + selected);
+        var wiz_doc = $("#wizard-doc", wizard);
+        var the_document = $("#document");
+        var current_doc = the_document.val();
+        console.log("current_text: " + current_doc + " wiz_doc: " + wiz_doc.val());
+        var index = current_doc.lastIndexOf("}");
+        var new_entry = '"' + selected.val() + '":"' + wiz_doc.val() + '"';
+        if (index >= 0) {
+          current_doc = current_doc.substring(0, index) + ', ' + new_entry + "}";
+        } else {
+          //we don't have a doc at all
+          current_doc = "{" + new_entry + "}";
+        }
+        current_doc = content_generator['json'](current_doc);
+        the_document.val(current_doc);
+        //clear the wiz doc window
+        wiz_doc.val("");
+        return false;
+      }
+  );
+
+  //console.log("adding " + i + " child: " + child);
+
+}
+
+//The main program for adding the docs
+sammy.get
+(
+    new RegExp(app.core_regex_base + '\\/(documents)$'),
+    function (context) {
+      var active_core = this.active_core;
+      var core_basepath = active_core.attr('data-basepath');
+      var content_element = $('#content');
+
+
+      $.post
+      (
+          'tpl/documents.html',
+          function (template) {
+
+            content_element
+                .html(template);
+            var documents_element = $('#documents', content_element);
+            var documents_form = $('#form form', documents_element);
+            var url_element = $('#url', documents_element);
+            var result_element = $('#result', documents_element);
+            var response_element = $('#response', documents_element);
+            var doc_type_select = $('#document-type', documents_form);
+            //Since we are showing "example" docs, when the area receives the focus
+            // remove the example content.
+            $('#document', documents_form).die('focusin')
+                .live('focusin',
+                function (event) {
+                  var document_type = $('#document-type', documents_form).val();
+                  if (document_type != "wizard"){
+                    //Don't clear the document when in wizard mode.
+                    var the_document = $('#document', documents_form);
+                    the_document.text("");
+                  }
+                }
+            );
+
+            /*response_element.html("");*/
+            //Setup the handlers for toggling the various display options for the "Document Type" select
+            doc_type_select
+                .die('change')
+                .live
+            (
+                'change',
+                function (event) {
+                  var document_type = $('#document-type', documents_form).val();
+                  var file_upload = $('#file-upload', documents_form);
+
+                  //need to clear out any old file upload by forcing a redraw so that
+                  //we don't try to upload an old file
+                  file_upload.html(file_upload.html());
+                  if (document_type == "json") {
+                    toggles(documents_form, true, false, true, '{"id":"change.me","title":"change.me"}', false);
+                    $("#attribs").show();
+                  } else if (document_type == "upload") {
+                    toggles(documents_form, false, true, false, "", false);
+                    $("#attribs").show();
+                  } else if (document_type == "csv") {
+                    toggles(documents_form, false, false, true, "id,title\nchange.me,change.me", false);
+                    $("#attribs").show();
+                  } else if (document_type == "solr") {
+                    toggles(documents_form, false, false, true, '<add>\n' +
+                        '<doc>\n' +
+                        '<field name="id">change.me</field>\n' +
+                        '<field name="title" >chang.me</field>\n' +
+                        '</doc>\n' +
+                        '</add>\n', false);
+                    $("#attribs").hide();
+                  } else if (document_type == "wizard") {
+                    var wizard = $('#wizard', documents_form);
+                    addWizardFields(active_core, wizard);
+                    //$("#wizard-doc", wizard).text('Enter your field text here and then click "Add Field" to add the field to the document.');
+                    toggles(documents_form, false, false, true, "", true);
+                    $("#attribs").show();
+                  } else if (document_type == "xml") {
+                    toggles(documents_form, false, false, true, '<doc>\n' +
+                        '<field name="id">change.me</field>' +
+                        '<field name="title">change.me</field>' +
+                        '</doc>', false);
+                    $("#attribs").show();
+                  }
+                  return false;
+                }
+            );
+            doc_type_select.chosen().trigger('change');
+            //Setup the submit option handling.
+            documents_form
+                .die('submit')
+                .live
+            (
+                'submit',
+                function (event) {
+                  var form_values = [];
+                  var handler_path = $('#qt', documents_form).val();
+                  if ('/' !== handler_path[0]) {
+                    form_values.push({ name: 'qt', value: handler_path.esc() });
+                    handler_path = '/update';
+                  }
+
+                  var document_url = window.location.protocol + '//' + window.location.host
+                      + core_basepath + handler_path + '?wt=json';
+
+                  url_element
+                      .attr('href', document_url)
+                      .text(document_url)
+                      .trigger('change');
+                  var the_document = $('#document', documents_form).val();
+                  var commit_within = $('#commitWithin', documents_form).val();
+                  var boost = $('#boost', documents_form).val();
+                  var overwrite = $('#overwrite', documents_form).val();
+                  var the_command = "";
+                  var content_type = "";
+                  var document_type = $('#document-type', documents_form).val();
+                  var doingFileUpload = false;
+                  //Both JSON and Wizard use the same pathway for submission
+                  //New entries primarily need to fill the_command and set the content_type
+                  if (document_type == "json" || document_type == "wizard") {
+                    //create a JSON command
+                    the_command = "{"
+                        + '"add":{ "doc":' + the_document + ","
+                        + '"boost":' + boost + ","
+                        + '"overwrite":' + overwrite + ","
+                        + '"commitWithin":' + commit_within
+                        + "}}";
+                    content_type = "application/json";
+                  } else if (document_type == "csv") {
+                    the_command = the_document;
+                    document_url += "&commitWithin=" + commit_within + "&overwrite=" + overwrite;
+                    content_type = "application/csv";
+                  } else if (document_type == "xml") {
+                    the_command = '<add commitWithin="' + commit_within
+                        + '" overwrite="' + overwrite + '"'
+                        + ">"
+                        + the_document + "</add>";
+                    content_type = "text/xml";
+                  } else if (document_type == "upload") {
+                    doingFileUpload = true;
+                  } else if (document_type == "solr") {
+                    //guess content_type
+                    the_command = the_document;
+                    if (the_document.indexOf("<") >= 0) {
+                      //XML
+                      content_type = "text/xml";
+                    } else if (the_document.indexOf("{") >= 0) {
+                      //JSON
+                      content_type = "application/json";
+                    } //TODO: do we need to handle others?
+                  } else {
+                    //How to handle other?
+                  }
+
+                  //Handle the submission of the form in the case where we are not uploading a file
+                  if (doingFileUpload == false) {
+                    $.ajax(
+                        {
+                          url: document_url,
+                          //dataType : 'json',
+                          processData: false,
+                          type: 'POST',
+                          contentType: content_type,
+                          data: the_command,
+                          context: response_element,
+                          beforeSend: function (xhr, settings) {
+                            console.log("beforeSend: Vals: " + document_url + " content-type: " + document_type + " the cmd: " + the_command);
+
+                          },
+                          success: function (response, text_status, xhr) {
+                            console.log("success:  " + response + " status: " + text_status + " xhr: " + xhr.responseText);
+                            this.html('<div><span class="description">Status</span>: ' + text_status + '</div>'
+                                + '<div><span class="description">Response:</span>' + '<pre class="syntax language-json"><code>' + content_generator['json'](xhr.responseText) + "</code></pre></div>");
+                            result_element.show();
+                          },
+                          error: function (xhr, text_status, error_thrown) {
+                            console.log("error: " + text_status + " thrown: " + error_thrown);
+                            this.html('<div><span class="description">Status</span>: ' + text_status + '</div><div><span class="description">Error:</span> '
+                                + '' + error_thrown
+                                + '</div>'
+                                + '<div><span class="description">Error</span>:' + '<pre class="syntax language-json"><code>'
+                                + content_generator['json'](xhr.responseText) +
+                                '</code></pre></div>');
+                            result_element.show();
+                          },
+                          complete: function (xhr, text_status) {
+                            //console.log("complete: " + text_status + " xhr: " + xhr.responseText + " doc type: " + document_type);
+
+                            //alert(text_status + ": " + xhr.responseText);
+                            /*this
+                             .removeClass( 'loader' );*/
+                          }
+                        }
+                    );
+                  } else {
+                    //upload the file
+                    var the_file = $('#the-file', documents_form);
+                    var erh_params = $('#erh-params', documents_form).val();
+                    if (erh_params != "") {
+                      if (erh_params.substring(0,1) != "&"){
+                        erh_params = "&" + erh_params;
+                      }
+                      document_url = document_url + erh_params;
+                    }
+                    console.log("uploading file to: " + document_url);
+                    the_file.ajaxfileupload({
+                      'action': document_url,
+                      'validate_extensions': false,
+                      'upload_now': true,
+                      'params': {
+                        'extra': 'info'
+                      },
+                      'onComplete': function (response) {
+                        response = response.replace('<pre style="word-wrap: break-word; white-space: pre-wrap;">', "");
+                        response = response.replace("</pre>", "");
+                        console.log('completed upload: ' + response);
+                        response_element.html('<div><span class="description">Response:</span>' + '<pre class="syntax language-json"><code>' + content_generator['json'](response) + "</code></pre></div>");
+                        result_element.show();
+
+                      },
+                      'onStart': function () {
+                        console.log("starting file upload");
+                        //if (weWantedTo) return false; // cancels upload
+                      },
+                      'onCancel': function () {
+                        console.log('no file selected');
+                      }
+                    });
+                  }
+                  return false;
+                }
+            );
+          }
+      )
+    }
+)
+/*
+ Sample docs:
+ <doc boost="2.5">
+ <field name="id">05991</field>
+ <field name="title" boost="2.0">Bridgewater</field>
+ </doc>
+
+ {"id":"foo","title":"blah"}
+
+ */
\ No newline at end of file
diff --git a/solr/webapp/web/js/scripts/file.js b/solr/webapp/web/js/scripts/file.js
index a1ff877..95d201f 100644
--- a/solr/webapp/web/js/scripts/file.js
+++ b/solr/webapp/web/js/scripts/file.js
@@ -22,34 +22,51 @@
   function( context )
   {
     var core_basepath = this.active_core.attr( 'data-basepath' );
-	var filetype = context.params.splat[1]; // either schema or config	
-	var filename = this.active_core.attr( filetype );
+    var content_element = $( '#content' );
 
-    $.ajax
+    var url = window.location.protocol + '//' + window.location.host + core_basepath + '/admin/file'
+            + '?file=' + this.active_core.attr( context.params.splat[1] )
+            + '&contentType=text/xml;charset=utf-8';
+
+    $.get
     (
+      'tpl/file.html',
+      function( template )
       {
-        url : core_basepath + "/admin/file?file=" + filename + "&contentType=text/xml;charset=utf-8",
-        dataType : 'xml',
-        context : $( '#content' ),
-        beforeSend : function( xhr, settings )
-        {
-          this
-          .html( '<div class="loader">Loading ...</div>' );
-        },
-        complete : function( xhr, text_status )
-        {
-          var code = $(
-            '<pre class="syntax language-xml"><code>' +
-            xhr.responseText.esc() +
-            '</code></pre>'
-          );
-          this.html( code );
+        content_element
+          .html( template );
 
-          if( 'success' === text_status )
+        $( '#url', content_element )
+          .text( url )
+          .attr( 'href', url );
+
+        $.ajax
+        (
           {
-            hljs.highlightBlock( code.get(0) );
+            url : url,
+            dataType : 'xml',
+            context : $( '#response' ,content_element ),
+            beforeSend : function( xhr, settings )
+            {
+              this
+              .html( '<div class="loader">Loading ...</div>' );
+            },
+            complete : function( xhr, text_status )
+            {
+              var code = $(
+                '<pre class="syntax language-xml"><code>' +
+                xhr.responseText.esc() +
+                '</code></pre>'
+              );
+              this.html( code );
+
+              if( 'success' === text_status )
+              {
+                hljs.highlightBlock( code.get(0) );
+              }
+            }
           }
-        }
+        );
       }
     );
   }
diff --git a/solr/webapp/web/js/scripts/query.js b/solr/webapp/web/js/scripts/query.js
index 3d7bcba..417fef9 100644
--- a/solr/webapp/web/js/scripts/query.js
+++ b/solr/webapp/web/js/scripts/query.js
@@ -106,6 +106,39 @@
             }
           );
 
+        $( '.multiple a', query_form )
+          .die( 'click' )
+          .live
+          (
+            'click',
+            function( event )
+            {
+              var self = $( this );
+              var row = self.closest( '.row' );
+              var container = self.closest( '.multiple' );
+              
+              var add = parseInt( self.data( 'action' ), 10 );
+              if( add )
+              {
+                var new_row = row.clone();
+                new_row.find( 'input' ).val( '' );
+                row.after( new_row );
+                row.next().find( 'input' ).focus();
+              }
+              else if( 1 === $( '.row', container ).size() )
+              {
+                row.find( 'input' ).val( '' ).focus();
+              }
+              else
+              {
+                row.remove();
+                container.find( 'input:last' ).focus();
+              }
+
+              return false;
+            }
+          )
+
         query_form
           .die( 'submit' )
           .live
diff --git a/solr/webapp/web/tpl/dataimport.html b/solr/webapp/web/tpl/dataimport.html
index 17f3e1d..03b2d3f 100644
--- a/solr/webapp/web/tpl/dataimport.html
+++ b/solr/webapp/web/tpl/dataimport.html
@@ -133,12 +133,12 @@
       </label>
 
       <label for="clean" class="checkbox">
-        <input type="checkbox" name="clean" id="clean" value="true">
+        <input type="checkbox" name="clean" id="clean" value="true" checked="checked">
         <a rel="help">Clean</a>
       </label>
 
       <label for="commit" class="checkbox">
-        <input type="checkbox" name="commit" id="commit" value="true">
+        <input type="checkbox" name="commit" id="commit" value="true" checked="checked">
         <a rel="help">Commit</a>
       </label>
 
diff --git a/solr/webapp/web/tpl/documents.html b/solr/webapp/web/tpl/documents.html
new file mode 100644
index 0000000..bd953a4
--- /dev/null
+++ b/solr/webapp/web/tpl/documents.html
@@ -0,0 +1,107 @@
+<!--
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+-->
+
+<div id="documents" class="clearfix">
+  <div id="form">
+    <form action="#" method="post">
+      <label for="qt">
+        <a rel="help">Request-Handler (qt)</a>
+      </label>
+      <input type="text" id="qt" value="/update" title="Request handler in solrconfig.xml.">
+      <!-- TODO: Add support for uploading docs and a doc wizard -->
+      <label for="document-type">
+        <a rel="help">Document Type</a>
+      </label>
+
+      <div><select name="document-type" id="document-type" title="The type of the document field">
+        <!-- TODO: support the Builder -->
+        <option value="csv">CSV</option>
+        <option value="wizard">Document Builder</option>
+        <option value="upload">File Upload</option>
+        <option selected="true" value="json">JSON</option>
+        <option value="solr">Solr Command (raw XML or JSON)</option>
+        <option value="xml">XML</option>
+        <!-- TODO: If other, then, show a text box -->
+        <!--<option>Other</option>-->
+      </select></div>
+
+      <div id="document-container">
+        <div id="wizard">
+          <div id="wizard-fields">
+            <div><span class="description">Field</span>: <select id="wiz-field-select" name="wiz-field-select"></select>
+            </div>
+            <div><span id="wiz-field-data"><span class="description">Field Data</span>:</span> <textarea id="wizard-doc"
+                                                                                                         name="wizard-doc"
+                                                                                                         rows="10"
+                                                                                                         cols="40">Enter your field text here and then click "Add Field" to add the field to the document.</textarea></div>
+          </div>
+          <div id="wizard-add"><a id="add-field-href" href="#"><img border="0" src="./img/ico/plus-button.png"/>Add
+            Field</a></div>
+        </div>
+        <label for="document">
+          <a rel="help">Document(s)</a>
+        </label>
+        <textarea name="document" id="document" title="The Document" rows="10"
+                  cols="70">{"id":"change.me","title":"change.me"}</textarea>
+
+        <div id="file-upload">
+          <input type="file" id="the-file" name="the-file"/>
+        </div>
+      </div>
+
+      <div id="advanced">
+        <!-- TODO: only show for JSON/XML-->
+        <div id="attribs">
+          <div id="upload-only">
+            <label for="erh-params"><!-- TODO: cleaner way to do this? -->
+              <a rel="help">Extracting Req. Handler Params</a>
+            </label>
+            <input type="text" id="erh-params" value="&literal.id=change.me"
+                   title="Extracting Request Handler Parameters" size="50">
+          </div>
+          <div id="general-attribs">
+            <label for="commitWithin">
+              <a rel="help">Commit Within</a>
+            </label>
+            <input type="text" id="commitWithin" value="1000" title="Commit Within (ms)">
+            <label for="overwrite">
+              <a rel="help">Overwrite</a>
+            </label>
+            <input type="text" id="overwrite" value="true" title="Overwrite">
+          </div>
+          <!-- Boost is json only, since the XML has it embedded -->
+          <div id="json-only">
+            <label for="boost">
+              <a rel="help">Boost</a>
+            </label>
+            <input type="text" id="boost" value="1.0" title="Document Boost">
+          </div>
+        </div>
+      </div>
+
+      <button type="submit" id="submit">Submit Document</button>
+    </form>
+  </div>
+  <div id="result">
+    <div id="response">
+      <!--<iframe src="about:blank"></iframe>-->
+    </div>
+
+  </div>
+</div>
\ No newline at end of file
diff --git a/solr/webapp/web/tpl/file.html b/solr/webapp/web/tpl/file.html
new file mode 100755
index 0000000..25fa03d
--- /dev/null
+++ b/solr/webapp/web/tpl/file.html
@@ -0,0 +1,23 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<div id="file" class="clearfix">
+
+  <a id="url" class="address-bar" href="#"></a>
+
+  <div id="response"></div>
+
+</div>
\ No newline at end of file
diff --git a/solr/webapp/web/tpl/query.html b/solr/webapp/web/tpl/query.html
index 1bcce1f..bd0cdb0 100644
--- a/solr/webapp/web/tpl/query.html
+++ b/solr/webapp/web/tpl/query.html
@@ -37,12 +37,20 @@
         <label for="fq">
           <a rel="help">fq</a>
         </label>
-        <input type="text" id="fq" name="fq" class="multiple" title="Filter query.">
+        <div class="multiple">
+          <div class="row clearfix">
+            <input type="text" id="fq" name="fq" title="Filter query.">
+            <div class="buttons">
+              <a class="rem" data-action="0"><span>[-]</span></a>
+              <a class="add" data-action="1"><span>[+]</span></a>
+            </div>
+          </div>
+        </div>
 
         <label for="sort">
           <a rel="help">sort</a>
         </label>
-        <input type="text" id="sort" name="sort" class="multiple" title="Sort field or function with asc|desc.">
+        <input type="text" id="sort" name="sort" title="Sort field or function with asc|desc.">
 
         <label for="start">
           <a rel="help">start</a>,
@@ -72,8 +80,8 @@
           <a rel="help">wt</a>
         </label>
         <select name="wt" id="wt" title="The writer type (response format).">
-          <option>xml</option>
           <option>json</option>
+          <option>xml</option>
           <option>python</option>
           <option>ruby</option>
           <option>php</option>
@@ -340,7 +348,7 @@
 
   <div id="result">
 
-    <a id="url" href="#"></a>
+    <a id="url" class="address-bar" href="#"></a>
 
     <div id="response">