update

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene4258@1481840 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/build.xml b/build.xml
index 8b65cc1..5b1a950 100644
--- a/build.xml
+++ b/build.xml
@@ -281,7 +281,6 @@
   <target name="nightly-smoke" description="Builds an unsigned release and smoke tests it" depends="clean,-env-JAVA7_HOME">
    <fail unless="JAVA7_HOME">JAVA7_HOME property or environment variable is not defined.</fail>
    <property name="svnversion.exe" value="svnversion" />
-   <exec dir="." executable="${svnversion.exe}" outputproperty="fakeReleaseSvnRevision" failifexecutionfails="false"/>
    <subant target="prepare-release-no-sign" inheritall="false" failonerror="true">
      <fileset dir="lucene" includes="build.xml" />
      <fileset dir="solr" includes="build.xml" />
@@ -303,7 +302,7 @@
      <arg value="-B"/>
      <arg file="dev-tools/scripts/smokeTestRelease.py"/>
      <arg value="${fakeRelease.uri}"/>
-     <arg value="${fakeReleaseSvnRevision}"/>
+     <arg value="skip"/>
      <arg value="${fakeReleaseVersion}"/>
      <arg file="${fakeReleaseTmp}"/>
      <arg value="false"/>
diff --git a/dev-tools/idea/.idea/libraries/JUnit.xml b/dev-tools/idea/.idea/libraries/JUnit.xml
index 4b44c5a..184f0d5 100644
--- a/dev-tools/idea/.idea/libraries/JUnit.xml
+++ b/dev-tools/idea/.idea/libraries/JUnit.xml
@@ -2,7 +2,7 @@
   <library name="JUnit">
     <CLASSES>
       <root url="jar://$PROJECT_DIR$/lucene/test-framework/lib/junit-4.10.jar!/" />
-      <root url="jar://$PROJECT_DIR$/lucene/test-framework/lib/randomizedtesting-runner-2.0.9.jar!/" />
+      <root url="jar://$PROJECT_DIR$/lucene/test-framework/lib/randomizedtesting-runner-2.0.10.jar!/" />
     </CLASSES>
     <JAVADOC />
     <SOURCES />
diff --git a/dev-tools/maven/lucene/replicator/pom.xml.template b/dev-tools/maven/lucene/replicator/pom.xml.template
new file mode 100644
index 0000000..e538235
--- /dev/null
+++ b/dev-tools/maven/lucene/replicator/pom.xml.template
@@ -0,0 +1,75 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+  -->
+	<modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.lucene</groupId>
+    <artifactId>lucene-parent</artifactId>
+    <version>@version@</version>
+    <relativePath>../pom.xml</relativePath>
+  </parent>
+  <groupId>org.apache.lucene</groupId>
+  <artifactId>lucene-replicator</artifactId>
+  <packaging>jar</packaging>
+  <name>Lucene Replicator</name>
+  <description>Lucene Replicator Module</description>
+  <properties>
+    <module-directory>lucene/replicator</module-directory>
+    <relative-top-level>../../..</relative-top-level>
+    <module-path>${relative-top-level}/${module-directory}</module-path>
+  </properties>
+  <scm>
+    <connection>scm:svn:${vc-anonymous-base-url}/${module-directory}</connection>
+    <developerConnection>scm:svn:${vc-dev-base-url}/${module-directory}</developerConnection>
+    <url>${vc-browse-base-url}/${module-directory}</url>
+  </scm>
+  <dependencies>
+    <dependency> 
+      <!-- lucene-test-framework dependency must be declared before lucene-core -->
+      <groupId>${project.groupId}</groupId>
+      <artifactId>lucene-test-framework</artifactId>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>${project.groupId}</groupId>
+      <artifactId>lucene-core</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>${project.groupId}</groupId>
+      <artifactId>lucene-facet</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+  </dependencies>
+  <build>
+    <sourceDirectory>${module-path}/src/java</sourceDirectory>
+    <testSourceDirectory>${module-path}/src/test</testSourceDirectory>
+    <testResources>
+      <testResource>
+        <directory>${project.build.testSourceDirectory}</directory>
+        <excludes>
+          <exclude>**/*.java</exclude>
+        </excludes>
+      </testResource>
+    </testResources>
+  </build>
+</project>
diff --git a/dev-tools/maven/pom.xml.template b/dev-tools/maven/pom.xml.template
index efc91c6..57ac6da 100644
--- a/dev-tools/maven/pom.xml.template
+++ b/dev-tools/maven/pom.xml.template
@@ -461,7 +461,7 @@
       <dependency>
         <groupId>com.carrotsearch.randomizedtesting</groupId>
         <artifactId>randomizedtesting-runner</artifactId>
-        <version>2.0.9</version>
+        <version>2.0.10</version>
       </dependency>
     </dependencies>
   </dependencyManagement>
diff --git a/dev-tools/scripts/smokeTestRelease.py b/dev-tools/scripts/smokeTestRelease.py
index 162a908..686300a 100644
--- a/dev-tools/scripts/smokeTestRelease.py
+++ b/dev-tools/scripts/smokeTestRelease.py
@@ -198,8 +198,6 @@
       'Ant-Version: Apache Ant 1.8',
       # Make sure .class files are 1.7 format:
       'X-Compile-Target-JDK: 1.7',
-      # Make sure this matches the version and svn revision we think we are releasing:
-      'Implementation-Version: %s %s ' % (version, svnRevision),
       'Specification-Version: %s' % version,
       # Make sure the release was compiled with 1.7:
       'Created-By: 1.7'):
@@ -207,6 +205,13 @@
         raise RuntimeError('%s is missing "%s" inside its META-INF/MANIFEST.MF' % \
                            (desc, verify))
 
+    if svnRevision != 'skip':
+      # Make sure this matches the version and svn revision we think we are releasing:
+      verifyRevision = 'Implementation-Version: %s %s ' % (version, svnRevision)
+      if s.find(verifyRevision) == -1:
+        raise RuntimeError('%s is missing "%s" inside its META-INF/MANIFEST.MF (wrong svn revision?)' % \
+                           (desc, verifyRevision))
+
     notice = decodeUTF8(z.read(NOTICE_FILE_NAME))
     license = decodeUTF8(z.read(LICENSE_FILE_NAME))
 
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index e8915c6..a40ba38 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -69,8 +69,18 @@
   suggesters, you now need to call setPreservePositionIncrements(false) instead
   of configuring the token filters to not increment positions. (Adrien Grand)
 
+* LUCENE-3907: EdgeNGramTokenizer now supports maxGramSize > 1024, doesn't trim
+  the input, sets position increment = 1 for all tokens and doesn't support
+  backward grams anymore. (Adrien Grand)
+
+* LUCENE-3907: EdgeNGramTokenFilter does not support backward grams and does
+  not update offsets anymore. (Adrien Grand)
+
 Bug Fixes
 
+* LUCENE-4997: Internal test framework's tests are sensitive to previous 
+  test failures and tests.failfast. (Dawid Weiss, Shai Erera)
+
 * LUCENE-4935: CustomScoreQuery wrongly applied its query boost twice 
   (boost^2).  (Robert Muir)
 
@@ -105,6 +115,28 @@
   
 * LUCENE-949: AnalyzingQueryParser can't work with leading wildcards.
   (Tim Allison, Robert Muir, Steve Rowe)
+
+* LUCENE-4980: Fix issues preventing mixing of RangeFacetRequest and
+  non-RangeFacetRequest when using DrillSideways.  (Mike McCandless,
+  Shai Erera)
+
+* LUCENE-4986: Fixed case where a newly opened near-real-time reader
+  fails to reflect a delete from IndexWriter.tryDeleteDocument (Reg,
+  Mike McCandless)
+  
+* LUCENE-4994: Fix PatternKeywordMarkerFilter to have public constructor.
+  (Uwe Schindler)
+  
+* LUCENE-4993: Fix BeiderMorseFilter to preserve custom attributes when
+  inserting tokens with position increment 0.  (Uwe Schindler)
+
+* LUCENE-4996: Ensure DocInverterPerField always includes field name
+  in exception messages.  (Markus Jelsma via Robert Muir)
+
+* LUCENE-4991: Fix handling of synonyms in classic QueryParser.getFieldQuery for 
+  terms not separated by whitespace. PositionIncrementAttribute was ignored, so with 
+  default AND synonyms wrongly became mandatory clauses, and with OR, the 
+  coordination factor was wrong.  (李威, Robert Muir)
   
 Optimizations
 
@@ -146,6 +178,25 @@
 * LUCENE-4965: Add dynamic (no taxonomy index used) numeric range
   faceting to Lucene's facet module (Mike McCandless, Shai Erera)
 
+* LUCENE-4979: LiveFieldFields can work with any ReferenceManager, not
+  just ReferenceManager<IndexSearcher> (Mike McCandless).
+
+* LUCENE-4975: Added a new Replicator module which can replicate index 
+  revisions between server and client. (Shai Erera, Mike McCandless)
+
+Build
+
+* LUCENE-4987: Upgrade randomized testing to version 2.0.10: 
+  Test framework may fail internally due to overly aggresive J9 optimizations. 
+  (Dawid Weiss, Shai Erera)
+
+
+======================= Lucene 4.3.1 =======================
+
+Bug Fixes
+
+
+
 ======================= Lucene 4.3.0 =======================
 
 Changes in backwards compatibility policy
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternKeywordMarkerFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternKeywordMarkerFilter.java
index 886f19f..2e055bb 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternKeywordMarkerFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternKeywordMarkerFilter.java
@@ -42,7 +42,7 @@
    * @param pattern
    *          the pattern to apply to the incoming term buffer
    **/
-  protected PatternKeywordMarkerFilter(TokenStream in, Pattern pattern) {
+  public PatternKeywordMarkerFilter(TokenStream in, Pattern pattern) {
     super(in);
     this.matcher = pattern.matcher("");
   }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramFilterFactory.java
index 8b3d269..f464724 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramFilterFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramFilterFactory.java
@@ -27,21 +27,19 @@
  * &lt;fieldType name="text_edgngrm" class="solr.TextField" positionIncrementGap="100"&gt;
  *   &lt;analyzer&gt;
  *     &lt;tokenizer class="solr.WhitespaceTokenizerFactory"/&gt;
- *     &lt;filter class="solr.EdgeNGramFilterFactory" side="front" minGramSize="1" maxGramSize="1"/&gt;
+ *     &lt;filter class="solr.EdgeNGramFilterFactory" minGramSize="1" maxGramSize="1"/&gt;
  *   &lt;/analyzer&gt;
  * &lt;/fieldType&gt;</pre>
  */
 public class EdgeNGramFilterFactory extends TokenFilterFactory {
   private final int maxGramSize;
   private final int minGramSize;
-  private final String side;
 
   /** Creates a new EdgeNGramFilterFactory */
   public EdgeNGramFilterFactory(Map<String, String> args) {
     super(args);
     minGramSize = getInt(args, "minGramSize", EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE);
     maxGramSize = getInt(args, "maxGramSize", EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE);
-    side = get(args, "side", EdgeNGramTokenFilter.Side.FRONT.getLabel());
     if (!args.isEmpty()) {
       throw new IllegalArgumentException("Unknown parameters: " + args);
     }
@@ -49,6 +47,6 @@
 
   @Override
   public EdgeNGramTokenFilter create(TokenStream input) {
-    return new EdgeNGramTokenFilter(input, side, minGramSize, maxGramSize);
+    return new EdgeNGramTokenFilter(luceneMatchVersion, input, minGramSize, maxGramSize);
   }
 }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
index 788d056..10aaf16 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
@@ -17,63 +17,31 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-
-import java.io.IOException;
+import org.apache.lucene.util.Version;
 
 /**
  * Tokenizes the given token into n-grams of given size(s).
  * <p>
- * This {@link TokenFilter} create n-grams from the beginning edge or ending edge of a input token.
- * </p>
+ * This {@link TokenFilter} create n-grams from the beginning edge of a input token.
  */
 public final class EdgeNGramTokenFilter extends TokenFilter {
-  public static final Side DEFAULT_SIDE = Side.FRONT;
   public static final int DEFAULT_MAX_GRAM_SIZE = 1;
   public static final int DEFAULT_MIN_GRAM_SIZE = 1;
 
-  /** Specifies which side of the input the n-gram should be generated from */
-  public static enum Side {
-
-    /** Get the n-gram from the front of the input */
-    FRONT {
-      @Override
-      public String getLabel() { return "front"; }
-    },
-
-    /** Get the n-gram from the end of the input */
-    BACK  {
-      @Override
-      public String getLabel() { return "back"; }
-    };
-
-    public abstract String getLabel();
-
-    // Get the appropriate Side from a string
-    public static Side getSide(String sideName) {
-      if (FRONT.getLabel().equals(sideName)) {
-        return FRONT;
-      }
-      if (BACK.getLabel().equals(sideName)) {
-        return BACK;
-      }
-      return null;
-    }
-  }
-
   private final int minGram;
   private final int maxGram;
-  private Side side;
   private char[] curTermBuffer;
   private int curTermLength;
   private int curGramSize;
   private int tokStart;
   private int tokEnd; // only used if the length changed before this filter
-  private boolean hasIllegalOffsets; // only if the length changed before this filter
   private int savePosIncr;
   private boolean isFirstToken = true;
   
@@ -84,16 +52,16 @@
   /**
    * Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
    *
+   * @param version the Lucene match version
    * @param input {@link TokenStream} holding the input to be tokenized
-   * @param side the {@link Side} from which to chop off an n-gram
    * @param minGram the smallest n-gram to generate
    * @param maxGram the largest n-gram to generate
    */
-  public EdgeNGramTokenFilter(TokenStream input, Side side, int minGram, int maxGram) {
+  public EdgeNGramTokenFilter(Version version, TokenStream input, int minGram, int maxGram) {
     super(input);
 
-    if (side == null) {
-      throw new IllegalArgumentException("sideLabel must be either front or back");
+    if (version == null) {
+      throw new IllegalArgumentException("version must not be null");
     }
 
     if (minGram < 1) {
@@ -106,19 +74,6 @@
 
     this.minGram = minGram;
     this.maxGram = maxGram;
-    this.side = side;
-  }
-
-  /**
-   * Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
-   *
-   * @param input {@link TokenStream} holding the input to be tokenized
-   * @param sideLabel the name of the {@link Side} from which to chop off an n-gram
-   * @param minGram the smallest n-gram to generate
-   * @param maxGram the largest n-gram to generate
-   */
-  public EdgeNGramTokenFilter(TokenStream input, String sideLabel, int minGram, int maxGram) {
-    this(input, Side.getSide(sideLabel), minGram, maxGram);
   }
 
   @Override
@@ -133,23 +88,14 @@
           curGramSize = minGram;
           tokStart = offsetAtt.startOffset();
           tokEnd = offsetAtt.endOffset();
-          // if length by start + end offsets doesn't match the term text then assume
-          // this is a synonym and don't adjust the offsets.
-          hasIllegalOffsets = (tokStart + curTermLength) != tokEnd;
           savePosIncr = posIncrAtt.getPositionIncrement();
         }
       }
       if (curGramSize <= maxGram) {         // if we have hit the end of our n-gram size range, quit
         if (curGramSize <= curTermLength) { // if the remaining input is too short, we can't generate any n-grams
           // grab gramSize chars from front or back
-          int start = side == Side.FRONT ? 0 : curTermLength - curGramSize;
-          int end = start + curGramSize;
           clearAttributes();
-          if (hasIllegalOffsets) {
-            offsetAtt.setOffset(tokStart, tokEnd);
-          } else {
-            offsetAtt.setOffset(tokStart + start, tokStart + end);
-          }
+          offsetAtt.setOffset(tokStart, tokEnd);
           // first ngram gets increment, others don't
           if (curGramSize == minGram) {
             //  Leave the first token position increment at the cleared-attribute value of 1
@@ -159,7 +105,7 @@
           } else {
             posIncrAtt.setPositionIncrement(0);
           }
-          termAtt.copyBuffer(curTermBuffer, start, curGramSize);
+          termAtt.copyBuffer(curTermBuffer, 0, curGramSize);
           curGramSize++;
           isFirstToken = false;
           return true;
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java
index ba2c56e..e41d940 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java
@@ -24,118 +24,60 @@
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.Version;
 
 /**
  * Tokenizes the input from an edge into n-grams of given size(s).
  * <p>
- * This {@link Tokenizer} create n-grams from the beginning edge or ending edge of a input token.
- * MaxGram can't be larger than 1024 because of limitation.
- * </p>
+ * This {@link Tokenizer} create n-grams from the beginning edge of a input token.
  */
 public final class EdgeNGramTokenizer extends Tokenizer {
-  public static final Side DEFAULT_SIDE = Side.FRONT;
   public static final int DEFAULT_MAX_GRAM_SIZE = 1;
   public static final int DEFAULT_MIN_GRAM_SIZE = 1;
-  
+
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
   private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
 
-  /** Specifies which side of the input the n-gram should be generated from */
-  public static enum Side {
-
-    /** Get the n-gram from the front of the input */
-    FRONT {
-      @Override
-      public String getLabel() { return "front"; }
-    },
-
-    /** Get the n-gram from the end of the input */
-    BACK  {
-      @Override
-      public String getLabel() { return "back"; }
-    };
-
-    public abstract String getLabel();
-
-    // Get the appropriate Side from a string
-    public static Side getSide(String sideName) {
-      if (FRONT.getLabel().equals(sideName)) {
-        return FRONT;
-      }
-      if (BACK.getLabel().equals(sideName)) {
-        return BACK;
-      }
-      return null;
-    }
-  }
-
   private int minGram;
   private int maxGram;
   private int gramSize;
-  private Side side;
   private boolean started;
   private int inLen; // length of the input AFTER trim()
   private int charsRead; // length of the input
   private String inStr;
 
-
   /**
    * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
    *
+   * @param version the Lucene match version
    * @param input {@link Reader} holding the input to be tokenized
-   * @param side the {@link Side} from which to chop off an n-gram
    * @param minGram the smallest n-gram to generate
    * @param maxGram the largest n-gram to generate
    */
-  public EdgeNGramTokenizer(Reader input, Side side, int minGram, int maxGram) {
+  public EdgeNGramTokenizer(Version version, Reader input, int minGram, int maxGram) {
     super(input);
-    init(side, minGram, maxGram);
+    init(version, minGram, maxGram);
   }
 
   /**
    * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
-   * 
-   * @param factory {@link org.apache.lucene.util.AttributeSource.AttributeFactory} to use
-   * @param input {@link Reader} holding the input to be tokenized
-   * @param side the {@link Side} from which to chop off an n-gram
-   * @param minGram the smallest n-gram to generate
-   * @param maxGram the largest n-gram to generate
-   */
-  public EdgeNGramTokenizer(AttributeFactory factory, Reader input, Side side, int minGram, int maxGram) {
-    super(factory, input);
-    init(side, minGram, maxGram);
-  }
-  
-  /**
-   * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
    *
-   * @param input {@link Reader} holding the input to be tokenized
-   * @param sideLabel the name of the {@link Side} from which to chop off an n-gram
-   * @param minGram the smallest n-gram to generate
-   * @param maxGram the largest n-gram to generate
-   */
-  public EdgeNGramTokenizer(Reader input, String sideLabel, int minGram, int maxGram) {
-    this(input, Side.getSide(sideLabel), minGram, maxGram);
-  }
-
-  /**
-   * Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
-   * 
+   * @param version the Lucene match version
    * @param factory {@link org.apache.lucene.util.AttributeSource.AttributeFactory} to use
    * @param input {@link Reader} holding the input to be tokenized
-   * @param sideLabel the name of the {@link Side} from which to chop off an n-gram
    * @param minGram the smallest n-gram to generate
    * @param maxGram the largest n-gram to generate
    */
-  public EdgeNGramTokenizer(AttributeFactory factory, Reader input, String sideLabel, int minGram, int maxGram) {
-    this(factory, input, Side.getSide(sideLabel), minGram, maxGram);
+  public EdgeNGramTokenizer(Version version, AttributeFactory factory, Reader input, int minGram, int maxGram) {
+    super(factory, input);
+    init(version, minGram, maxGram);
   }
-  
-  private void init(Side side, int minGram, int maxGram) {
-    if (side == null) {
-      throw new IllegalArgumentException("sideLabel must be either front or back");
+
+  private void init(Version version, int minGram, int maxGram) {
+    if (version == null) {
+      throw new IllegalArgumentException("version must not be null");
     }
 
     if (minGram < 1) {
@@ -148,7 +90,6 @@
 
     this.minGram = minGram;
     this.maxGram = maxGram;
-    this.side = side;
   }
 
   /** Returns the next token in the stream, or null at EOS. */
@@ -159,20 +100,25 @@
     if (!started) {
       started = true;
       gramSize = minGram;
-      char[] chars = new char[1024];
+      char[] chars = new char[Math.min(1024, maxGram)];
       charsRead = 0;
       // TODO: refactor to a shared readFully somewhere:
-      while (charsRead < chars.length) {
+      boolean exhausted = false;
+      while (charsRead < maxGram) {
         final int inc = input.read(chars, charsRead, chars.length-charsRead);
         if (inc == -1) {
+          exhausted = true;
           break;
         }
         charsRead += inc;
+        if (charsRead == chars.length && charsRead < maxGram) {
+          chars = ArrayUtil.grow(chars);
+        }
       }
 
-      inStr = new String(chars, 0, charsRead).trim();  // remove any trailing empty strings 
+      inStr = new String(chars, 0, charsRead);
 
-      if (charsRead == chars.length) {
+      if (!exhausted) {
         // Read extra throwaway chars so that on end() we
         // report the correct offset:
         char[] throwaway = new char[1024];
@@ -191,7 +137,7 @@
       }
       posIncrAtt.setPositionIncrement(1);
     } else {
-      posIncrAtt.setPositionIncrement(0);
+      posIncrAtt.setPositionIncrement(1);
     }
 
     // if the remaining input is too short, we can't generate any n-grams
@@ -200,15 +146,13 @@
     }
 
     // if we have hit the end of our n-gram size range, quit
-    if (gramSize > maxGram) {
+    if (gramSize > maxGram || gramSize > inLen) {
       return false;
     }
 
     // grab gramSize chars from front or back
-    int start = side == Side.FRONT ? 0 : inLen - gramSize;
-    int end = start + gramSize;
-    termAtt.setEmpty().append(inStr, start, end);
-    offsetAtt.setOffset(correctOffset(start), correctOffset(end));
+    termAtt.setEmpty().append(inStr, 0, gramSize);
+    offsetAtt.setOffset(correctOffset(0), correctOffset(gramSize));
     gramSize++;
     return true;
   }
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerFactory.java
index 5a7f83a..9104262 100755
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerFactory.java
@@ -28,21 +28,19 @@
  * <pre class="prettyprint">
  * &lt;fieldType name="text_edgngrm" class="solr.TextField" positionIncrementGap="100"&gt;
  *   &lt;analyzer&gt;
- *     &lt;tokenizer class="solr.EdgeNGramTokenizerFactory" side="front" minGramSize="1" maxGramSize="1"/&gt;
+ *     &lt;tokenizer class="solr.EdgeNGramTokenizerFactory" minGramSize="1" maxGramSize="1"/&gt;
  *   &lt;/analyzer&gt;
  * &lt;/fieldType&gt;</pre>
  */
 public class EdgeNGramTokenizerFactory extends TokenizerFactory {
   private final int maxGramSize;
   private final int minGramSize;
-  private final String side;
 
   /** Creates a new EdgeNGramTokenizerFactory */
   public EdgeNGramTokenizerFactory(Map<String, String> args) {
     super(args);
     minGramSize = getInt(args, "minGramSize", EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE);
     maxGramSize = getInt(args, "maxGramSize", EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE);
-    side = get(args, "side", EdgeNGramTokenFilter.Side.FRONT.getLabel());
     if (!args.isEmpty()) {
       throw new IllegalArgumentException("Unknown parameters: " + args);
     }
@@ -50,6 +48,6 @@
   
   @Override
   public EdgeNGramTokenizer create(AttributeFactory factory, Reader input) {
-    return new EdgeNGramTokenizer(factory, input, side, minGramSize, maxGramSize);
+    return new EdgeNGramTokenizer(luceneMatchVersion, factory, input, minGramSize, maxGramSize);
   }
 }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
index 8e9e810..4baefbc 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java
@@ -153,17 +153,6 @@
           // Not broken: we forcefully add this, so we shouldn't
           // also randomly pick it:
           ValidatingTokenFilter.class,
-          // NOTE: these by themselves won't cause any 'basic assertions' to fail.
-          // but see https://issues.apache.org/jira/browse/LUCENE-3920, if any 
-          // tokenfilter that combines words (e.g. shingles) comes after them,
-          // this will create bogus offsets because their 'offsets go backwards',
-          // causing shingle or whatever to make a single token with a 
-          // startOffset thats > its endOffset
-          // (see LUCENE-3738 for a list of other offenders here)
-          // broken!
-          EdgeNGramTokenizer.class,
-          // broken!
-          EdgeNGramTokenFilter.class,
           // broken!
           WordDelimiterFilter.class)) {
         for (Constructor<?> ctor : c.getConstructors()) {
@@ -195,6 +184,8 @@
           CJKBigramFilter.class,
           // TODO: doesn't handle graph inputs (or even look at positionIncrement)
           HyphenatedWordsFilter.class,
+          // TODO: LUCENE-4983
+          CommonGramsFilter.class,
           // TODO: doesn't handle graph inputs
           CommonGramsQueryFilter.class)) {
         for (Constructor<?> ctor : c.getConstructors()) {
@@ -440,20 +431,6 @@
         }
       }
     });
-    put(EdgeNGramTokenizer.Side.class, new ArgProducer() {
-      @Override public Object create(Random random) {
-        return random.nextBoolean() 
-            ? EdgeNGramTokenizer.Side.FRONT 
-            : EdgeNGramTokenizer.Side.BACK;
-      }
-    });
-    put(EdgeNGramTokenFilter.Side.class, new ArgProducer() {
-      @Override public Object create(Random random) {
-        return random.nextBoolean() 
-            ? EdgeNGramTokenFilter.Side.FRONT 
-            : EdgeNGramTokenFilter.Side.BACK;
-      }
-    });
     put(HyphenationTree.class, new ArgProducer() {
       @Override public Object create(Random random) {
         // TODO: make nastier
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
index 6a76cc6..6139323 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
@@ -19,13 +19,11 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockTokenizer;
-import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.core.KeywordTokenizer;
 import org.apache.lucene.analysis.core.WhitespaceTokenizer;
-import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
 import org.apache.lucene.analysis.position.PositionFilter;
 
 import java.io.Reader;
@@ -47,7 +45,7 @@
   public void testInvalidInput() throws Exception {
     boolean gotException = false;
     try {        
-      new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 0, 0);
+      new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, 0, 0);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -57,7 +55,7 @@
   public void testInvalidInput2() throws Exception {
     boolean gotException = false;
     try {        
-      new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 2, 1);
+      new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, 2, 1);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -67,7 +65,7 @@
   public void testInvalidInput3() throws Exception {
     boolean gotException = false;
     try {        
-      new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, -1, 2);
+      new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, -1, 2);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -75,45 +73,27 @@
   }
 
   public void testFrontUnigram() throws Exception {
-    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 1, 1);
-    assertTokenStreamContents(tokenizer, new String[]{"a"}, new int[]{0}, new int[]{1});
-  }
-
-  public void testBackUnigram() throws Exception {
-    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.BACK, 1, 1);
-    assertTokenStreamContents(tokenizer, new String[]{"e"}, new int[]{4}, new int[]{5});
+    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, 1, 1);
+    assertTokenStreamContents(tokenizer, new String[]{"a"}, new int[]{0}, new int[]{5});
   }
 
   public void testOversizedNgrams() throws Exception {
-    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 6, 6);
+    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, 6, 6);
     assertTokenStreamContents(tokenizer, new String[0], new int[0], new int[0]);
   }
 
   public void testFrontRangeOfNgrams() throws Exception {
-    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
-    assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
-  }
-
-  public void testBackRangeOfNgrams() throws Exception {
-    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.BACK, 1, 3);
-    assertTokenStreamContents(tokenizer,
-                              new String[]{"e","de","cde"},
-                              new int[]{4,3,2},
-                              new int[]{5,5,5},
-                              null,
-                              null,
-                              null,
-                              null,
-                              false);
+    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, 1, 3);
+    assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{5,5,5});
   }
 
   public void testFilterPositions() throws Exception {
     TokenStream ts = new MockTokenizer(new StringReader("abcde vwxyz"), MockTokenizer.WHITESPACE, false);
-    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(ts, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
+    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, ts, 1, 3);
     assertTokenStreamContents(tokenizer,
                               new String[]{"a","ab","abc","v","vw","vwx"},
                               new int[]{0,0,0,6,6,6},
-                              new int[]{1,2,3,7,8,9},
+                              new int[]{5,5,5,11,11,11},
                               null,
                               new int[]{1,0,0,1,0,0},
                               null,
@@ -124,63 +104,30 @@
   public void testFirstTokenPositionIncrement() throws Exception {
     TokenStream ts = new MockTokenizer(new StringReader("a abc"), MockTokenizer.WHITESPACE, false);
     ts = new PositionFilter(ts, 0); // All but first token will get 0 position increment
-    EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(ts, EdgeNGramTokenFilter.Side.FRONT, 2, 3);
+    EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, ts, 2, 3);
     // The first token "a" will not be output, since it's smaller than the mingram size of 2.
     // The second token on input to EdgeNGramTokenFilter will have position increment of 0,
     // which should be increased to 1, since this is the first output token in the stream.
     assertTokenStreamContents(filter,
         new String[] { "ab", "abc" },
         new int[]    {    2,     2 },
-        new int[]    {    4,     5 },
+        new int[]    {    5,     5 },
         new int[]    {    1,     0 }
     );
   }
-
-  public void testTokenizerPositions() throws Exception {
-    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(new StringReader("abcde"), EdgeNGramTokenizer.Side.FRONT, 1, 3);
-    assertTokenStreamContents(tokenizer,
-                              new String[]{"a","ab","abc"},
-                              new int[]{0,0,0},
-                              new int[]{1,2,3},
-                              null,
-                              new int[]{1,0,0},
-                              null,
-                              null,
-                              false);
-  }
   
   public void testSmallTokenInStream() throws Exception {
     input = new MockTokenizer(new StringReader("abc de fgh"), MockTokenizer.WHITESPACE, false);
-    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 3, 3);
+    EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, 3, 3);
     assertTokenStreamContents(tokenizer, new String[]{"abc","fgh"}, new int[]{0,7}, new int[]{3,10});
   }
   
   public void testReset() throws Exception {
     WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
-    EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
-    assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
+    EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tokenizer, 1, 3);
+    assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{5,5,5});
     tokenizer.setReader(new StringReader("abcde"));
-    assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
-  }
-  
-  // LUCENE-3642
-  // EdgeNgram blindly adds term length to offset, but this can take things out of bounds
-  // wrt original text if a previous filter increases the length of the word (in this case æ -> ae)
-  // so in this case we behave like WDF, and preserve any modified offsets
-  public void testInvalidOffsets() throws Exception {
-    Analyzer analyzer = new Analyzer() {
-      @Override
-      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
-        Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-        TokenFilter filters = new ASCIIFoldingFilter(tokenizer);
-        filters = new EdgeNGramTokenFilter(filters, EdgeNGramTokenFilter.Side.FRONT, 2, 15);
-        return new TokenStreamComponents(tokenizer, filters);
-      }
-    };
-    assertAnalyzesTo(analyzer, "mosfellsbær",
-        new String[] { "mo", "mos", "mosf", "mosfe", "mosfel", "mosfell", "mosfells", "mosfellsb", "mosfellsba", "mosfellsbae", "mosfellsbaer" },
-        new int[]    {    0,     0,      0,       0,        0,         0,          0,           0,            0,             0,              0 },
-        new int[]    {   11,    11,     11,      11,       11,        11,         11,          11,           11,            11,             11 });
+    assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{5,5,5});
   }
   
   /** blast some random strings through the analyzer */
@@ -190,20 +137,10 @@
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
         Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
         return new TokenStreamComponents(tokenizer, 
-            new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.FRONT, 2, 4));
+            new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tokenizer, 2, 4));
       }    
     };
     checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
-    
-    Analyzer b = new Analyzer() {
-      @Override
-      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
-        Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-        return new TokenStreamComponents(tokenizer, 
-            new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.BACK, 2, 4));
-      }    
-    };
-    checkRandomData(random(), b, 1000*RANDOM_MULTIPLIER, 20, false, false);
   }
   
   public void testEmptyTerm() throws Exception {
@@ -213,19 +150,9 @@
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
         Tokenizer tokenizer = new KeywordTokenizer(reader);
         return new TokenStreamComponents(tokenizer, 
-            new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.FRONT, 2, 15));
+            new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tokenizer, 2, 15));
       }    
     };
     checkAnalysisConsistency(random, a, random.nextBoolean(), "");
-    
-    Analyzer b = new Analyzer() {
-      @Override
-      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
-        Tokenizer tokenizer = new KeywordTokenizer(reader);
-        return new TokenStreamComponents(tokenizer, 
-            new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.BACK, 2, 15));
-      }    
-    };
-    checkAnalysisConsistency(random, b, random.nextBoolean(), "");
   }
 }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
index a3a3ad1..4db7efe 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
@@ -18,13 +18,17 @@
  */
 
 
+import java.io.IOException;
 import java.io.Reader;
 import java.io.StringReader;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.Analyzer.TokenStreamComponents;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.util._TestUtil;
 
 /**
  * Tests {@link EdgeNGramTokenizer} for correctness.
@@ -41,7 +45,7 @@
   public void testInvalidInput() throws Exception {
     boolean gotException = false;
     try {        
-      new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 0, 0);
+      new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, 0, 0);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -51,7 +55,7 @@
   public void testInvalidInput2() throws Exception {
     boolean gotException = false;
     try {        
-      new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 2, 1);
+      new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, 2, 1);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -61,7 +65,7 @@
   public void testInvalidInput3() throws Exception {
     boolean gotException = false;
     try {        
-      new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, -1, 2);
+      new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, -1, 2);
     } catch (IllegalArgumentException e) {
       gotException = true;
     }
@@ -69,32 +73,22 @@
   }
 
   public void testFrontUnigram() throws Exception {
-    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 1, 1);
+    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, 1, 1);
     assertTokenStreamContents(tokenizer, new String[]{"a"}, new int[]{0}, new int[]{1}, 5 /* abcde */);
   }
 
-  public void testBackUnigram() throws Exception {
-    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.BACK, 1, 1);
-    assertTokenStreamContents(tokenizer, new String[]{"e"}, new int[]{4}, new int[]{5}, 5 /* abcde */);
-  }
-
   public void testOversizedNgrams() throws Exception {
-    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 6, 6);
+    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, 6, 6);
     assertTokenStreamContents(tokenizer, new String[0], new int[0], new int[0], 5 /* abcde */);
   }
 
   public void testFrontRangeOfNgrams() throws Exception {
-    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 1, 3);
+    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, 1, 3);
     assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3}, 5 /* abcde */);
   }
-
-  public void testBackRangeOfNgrams() throws Exception {
-    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.BACK, 1, 3);
-    assertTokenStreamContents(tokenizer, new String[]{"e","de","cde"}, new int[]{4,3,2}, new int[]{5,5,5}, null, null, null, 5 /* abcde */, false);
-  }
   
   public void testReset() throws Exception {
-    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 1, 3);
+    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, 1, 3);
     assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3}, 5 /* abcde */);
     tokenizer.setReader(new StringReader("abcde"));
     assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3}, 5 /* abcde */);
@@ -105,21 +99,46 @@
     Analyzer a = new Analyzer() {
       @Override
       protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
-        Tokenizer tokenizer = new EdgeNGramTokenizer(reader, EdgeNGramTokenizer.Side.FRONT, 2, 4);
+        Tokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, reader, 2, 4);
         return new TokenStreamComponents(tokenizer, tokenizer);
       }    
     };
     checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER, 20, false, false);
     checkRandomData(random(), a, 100*RANDOM_MULTIPLIER, 8192, false, false);
-    
-    Analyzer b = new Analyzer() {
-      @Override
-      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
-        Tokenizer tokenizer = new EdgeNGramTokenizer(reader, EdgeNGramTokenizer.Side.BACK, 2, 4);
-        return new TokenStreamComponents(tokenizer, tokenizer);
-      }    
-    };
-    checkRandomData(random(), b, 1000*RANDOM_MULTIPLIER, 20, false, false);
-    checkRandomData(random(), b, 100*RANDOM_MULTIPLIER, 8192, false, false);
   }
+
+  public void testTokenizerPositions() throws Exception {
+    EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"), 1, 3);
+    assertTokenStreamContents(tokenizer,
+                              new String[]{"a","ab","abc"},
+                              new int[]{0,0,0},
+                              new int[]{1,2,3},
+                              null,
+                              new int[]{1,1,1},
+                              null,
+                              null,
+                              false);
+  }
+
+  public void testLargeInput() throws IOException {
+    final String input = _TestUtil.randomSimpleString(random(), 1024 * 5);
+    final int minGram = _TestUtil.nextInt(random(), 1, 1024);
+    final int maxGram = _TestUtil.nextInt(random(), minGram, 5 * 1024);
+    EdgeNGramTokenizer tk = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, new StringReader(input), minGram, maxGram);
+    final CharTermAttribute charTermAtt = tk.addAttribute(CharTermAttribute.class);
+    final OffsetAttribute offsetAtt = tk.addAttribute(OffsetAttribute.class);
+    final PositionIncrementAttribute posIncAtt = tk.addAttribute(PositionIncrementAttribute.class);
+    tk.reset();
+    for (int i = minGram; i <= maxGram && i <= input.length(); ++i) {
+      assertTrue(tk.incrementToken());
+      assertEquals(0, offsetAtt.startOffset());
+      assertEquals(i, offsetAtt.endOffset());
+      assertEquals(1, posIncAtt.getPositionIncrement());
+      assertEquals(input.substring(0, i), charTermAtt.toString());
+    }
+    assertFalse(tk.incrementToken());
+    tk.end();
+    assertEquals(input.length(), offsetAtt.startOffset());
+  }
+
 }
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramFilters.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramFilters.java
index 2256af6..47829cb 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramFilters.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramFilters.java
@@ -97,17 +97,6 @@
   }
 
   /**
-   * Test EdgeNGramTokenizerFactory with side option
-   */
-  public void testEdgeNGramTokenizer3() throws Exception {
-    Reader reader = new StringReader("ready");
-    TokenStream stream = tokenizerFactory("EdgeNGram",
-        "side", "back").create(reader);
-    assertTokenStreamContents(stream, 
-        new String[] { "y" });
-  }
-
-  /**
    * Test EdgeNGramFilterFactory
    */
   public void testEdgeNGramFilter() throws Exception {
@@ -130,18 +119,6 @@
     assertTokenStreamContents(stream, 
         new String[] { "t", "te" });
   }
-
-  /**
-   * Test EdgeNGramFilterFactory with side option
-   */
-  public void testEdgeNGramFilter3() throws Exception {
-    Reader reader = new StringReader("ready");
-    TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
-    stream = tokenFilterFactory("EdgeNGram",
-        "side", "back").create(stream);
-    assertTokenStreamContents(stream, 
-        new String[] { "y" });
-  }
   
   /** Test that bogus arguments result in exception */
   public void testBogusArguments() throws Exception {
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/BaseTokenStreamFactoryTestCase.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/BaseTokenStreamFactoryTestCase.java
index ee32226..746d1fd 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/BaseTokenStreamFactoryTestCase.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/BaseTokenStreamFactoryTestCase.java
@@ -72,14 +72,23 @@
     }
     return factory;
   }
-  
+
   /** 
    * Returns a fully initialized TokenizerFactory with the specified name and key-value arguments.
    * {@link ClasspathResourceLoader} is used for loading resources, so any required ones should
    * be on the test classpath.
    */
   protected TokenizerFactory tokenizerFactory(String name, String... keysAndValues) throws Exception {
-    return tokenizerFactory(name, TEST_VERSION_CURRENT, new ClasspathResourceLoader(getClass()), keysAndValues);
+    return tokenizerFactory(name, TEST_VERSION_CURRENT, keysAndValues);
+  }
+
+  /** 
+   * Returns a fully initialized TokenizerFactory with the specified name and key-value arguments.
+   * {@link ClasspathResourceLoader} is used for loading resources, so any required ones should
+   * be on the test classpath.
+   */
+  protected TokenizerFactory tokenizerFactory(String name, Version version, String... keysAndValues) throws Exception {
+    return tokenizerFactory(name, version, new ClasspathResourceLoader(getClass()), keysAndValues);
   }
   
   /** 
@@ -89,14 +98,23 @@
   protected TokenizerFactory tokenizerFactory(String name, Version matchVersion, ResourceLoader loader, String... keysAndValues) throws Exception {
     return (TokenizerFactory) analysisFactory(TokenizerFactory.lookupClass(name), matchVersion, loader, keysAndValues);
   }
-  
+
+  /** 
+   * Returns a fully initialized TokenFilterFactory with the specified name and key-value arguments.
+   * {@link ClasspathResourceLoader} is used for loading resources, so any required ones should
+   * be on the test classpath.
+   */
+  protected TokenFilterFactory tokenFilterFactory(String name, Version version, String... keysAndValues) throws Exception {
+    return tokenFilterFactory(name, version, new ClasspathResourceLoader(getClass()), keysAndValues);
+  }
+
   /** 
    * Returns a fully initialized TokenFilterFactory with the specified name and key-value arguments.
    * {@link ClasspathResourceLoader} is used for loading resources, so any required ones should
    * be on the test classpath.
    */
   protected TokenFilterFactory tokenFilterFactory(String name, String... keysAndValues) throws Exception {
-    return tokenFilterFactory(name, TEST_VERSION_CURRENT, new ClasspathResourceLoader(getClass()), keysAndValues);
+    return tokenFilterFactory(name, TEST_VERSION_CURRENT, keysAndValues);
   }
   
   /** 
diff --git a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/BeiderMorseFilter.java b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/BeiderMorseFilter.java
index 0461564..ba5c595 100644
--- a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/BeiderMorseFilter.java
+++ b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/BeiderMorseFilter.java
@@ -27,7 +27,6 @@
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 
 /**
@@ -48,13 +47,11 @@
   private final Matcher matcher = pattern.matcher("");
   // encoded representation
   private String encoded;
-  // offsets for any buffered outputs
-  private int startOffset;
-  private int endOffset;
+  // preserves all attributes for any buffered outputs
+  private State state;
   
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
-  private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
   
   
   /**
@@ -83,10 +80,10 @@
   @Override
   public boolean incrementToken() throws IOException {
     if (matcher.find()) {
-      clearAttributes();
+      assert state != null && encoded != null;
+      restoreState(state);
       termAtt.setEmpty().append(encoded, matcher.start(1), matcher.end(1));
       posIncAtt.setPositionIncrement(0);
-      offsetAtt.setOffset(startOffset, endOffset);
       return true;
     }
     
@@ -94,8 +91,7 @@
       encoded = (languages == null) 
           ? engine.encode(termAtt.toString())
           : engine.encode(termAtt.toString(), languages);
-      startOffset = offsetAtt.startOffset();
-      endOffset = offsetAtt.endOffset();
+      state = captureState();
       matcher.reset(encoded);
       if (matcher.find()) {
         termAtt.setEmpty().append(encoded, matcher.start(1), matcher.end(1));
diff --git a/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilter.java b/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilter.java
index da8fe51..b4c77a9 100644
--- a/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilter.java
+++ b/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilter.java
@@ -19,7 +19,9 @@
 
 import java.io.IOException;
 import java.io.Reader;
+import java.io.StringReader;
 import java.util.HashSet;
+import java.util.regex.Pattern;
 
 import org.apache.commons.codec.language.bm.NameType;
 import org.apache.commons.codec.language.bm.PhoneticEngine;
@@ -29,7 +31,10 @@
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.core.KeywordTokenizer;
+import org.apache.lucene.analysis.miscellaneous.PatternKeywordMarkerFilter;
+import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
 import org.junit.Ignore;
 
 /** Tests {@link BeiderMorseFilter} */
@@ -103,4 +108,20 @@
     };
     checkOneTermReuse(a, "", "");
   }
+  
+  public void testCustomAttribute() throws IOException {
+    TokenStream stream = new KeywordTokenizer(new StringReader("D'Angelo"));
+    stream = new PatternKeywordMarkerFilter(stream, Pattern.compile(".*"));
+    stream = new BeiderMorseFilter(stream, new PhoneticEngine(NameType.GENERIC, RuleType.EXACT, true));
+    KeywordAttribute keyAtt = stream.addAttribute(KeywordAttribute.class);
+    stream.reset();
+    int i = 0;
+    while(stream.incrementToken()) {
+      assertTrue(keyAtt.isKeyword());
+      i++;
+    }
+    assertEquals(12, i);
+    stream.end();
+    stream.close();
+  }
 }
diff --git a/lucene/build.xml b/lucene/build.xml
index e0af8ff..0c16b7f 100644
--- a/lucene/build.xml
+++ b/lucene/build.xml
@@ -160,7 +160,13 @@
   </target>
 
   <target name="check-licenses" depends="compile-tools,resolve,load-custom-tasks" description="Validate license stuff.">
-    <license-check-macro dir="${basedir}" licensedir="${common.dir}/licenses" />
+    <license-check-macro dir="${basedir}" licensedir="${common.dir}/licenses">
+      <additional-filters>
+        <replaceregex pattern="jetty([^/]+)$" replace="jetty" flags="gi" />
+        <replaceregex pattern="slf4j-([^/]+)$" replace="slf4j" flags="gi" />
+        <replaceregex pattern="(bcmail|bcprov)-([^/]+)$" replace="\1" flags="gi" />
+      </additional-filters>
+    </license-check-macro>
   </target>
 
   <target name="check-forbidden-apis" depends="compile-tools,compile-test,install-forbidden-apis,-forbidden-apis-classpath,-check-forbidden-jdk-apis,-check-forbidden-test-apis,-check-system-out" description="Check forbidden API calls in compiled class files"/>
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
index 46890c4..ede717e 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
@@ -18,9 +18,14 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.KeywordTokenizer;
+import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
 import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
+import org.apache.lucene.analysis.reverse.ReverseStringFilter;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.Version;
 import org.junit.Test;
 
 import java.io.Reader;
@@ -46,8 +51,8 @@
   private class NGramAnalyzer extends Analyzer {
     @Override
     protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
-      return new TokenStreamComponents(new EdgeNGramTokenizer(reader, EdgeNGramTokenizer.Side.BACK,
-          10, 20));
+      final Tokenizer tokenizer = new KeywordTokenizer(reader);
+      return new TokenStreamComponents(tokenizer, new ReverseStringFilter(TEST_VERSION_CURRENT, new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, new ReverseStringFilter(TEST_VERSION_CURRENT, tokenizer), 10, 20)));
     }
   }
 
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java
index 20122fd..a73b5e5 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java
@@ -88,13 +88,16 @@
         upto++;
       }
     }
-    assert upto == offsets.length;
+    //assert upto == offsets.length;
   }
   
   @Override
   public void visitDocument(int n, StoredFieldVisitor visitor, Set<String> ignoreFields) throws IOException {
     in.seek(offsets[n]);
     readLine();
+    if (!StringHelper.startsWith(scratch, NUM)) {
+      return;
+    }
     assert StringHelper.startsWith(scratch, NUM);
     int numFields = parseIntAt(NUM.length);
     
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java
index 00cc4f4..749b2e9 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java
@@ -165,6 +165,9 @@
     if (docID < 0 || docID >= maxDoc) {
       throw new IllegalArgumentException("docID out of range [0-" + maxDoc + "]: " + docID);
     }
+    if (docBases.length == 0) {
+      return -1;
+    }
     final int block = block(docID);
     final int relativeChunk = relativeChunk(block, docID - docBases[block]);
     return startPointers[block] + relativeStartPointer(block, relativeChunk);
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java
index 1a9a973..56a935e 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java
@@ -194,7 +194,11 @@
   @Override
   public void visitDocument(int docID, StoredFieldVisitor visitor, Set<String> ignoreFields)
       throws IOException {
-    fieldsStream.seek(indexReader.getStartPointer(docID));
+    long startPointer = indexReader.getStartPointer(docID);
+    if (startPointer < 0) {
+      return;
+    }
+    fieldsStream.seek(startPointer);
 
     final int docBase = fieldsStream.readVInt();
     final int chunkDocs = fieldsStream.readVInt();
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsReader.java
index 2df6749..db3787d 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsReader.java
@@ -90,8 +90,9 @@
       assert HEADER_LENGTH_IDX == indexStream.getFilePointer();
       final long indexSize = indexStream.length() - HEADER_LENGTH_IDX;
       this.size = (int) (indexSize >> 3);
-      // Verify two sources of "maxDoc" agree:
-      if (this.size != si.getDocCount()) {
+      // Verify two sources of "maxDoc" agree, but for stacked segments allow 
+      // less actual documents:
+      if (this.size > si.getDocCount()) {
         throw new CorruptIndexException("doc counts differ for segment " + segment + ": fieldsReader shows " + this.size + " but segmentInfo shows " + si.getDocCount());
       }
       numTotalDocs = (int) (indexSize >> 3);
diff --git a/lucene/core/src/java/org/apache/lucene/index/BufferedDeletesStream.java b/lucene/core/src/java/org/apache/lucene/index/BufferedDeletesStream.java
index 4465bc9..12b48f4 100644
--- a/lucene/core/src/java/org/apache/lucene/index/BufferedDeletesStream.java
+++ b/lucene/core/src/java/org/apache/lucene/index/BufferedDeletesStream.java
@@ -470,7 +470,7 @@
 
   private synchronized boolean applyTermUpdates(
       SortedSet<FieldsUpdate> packetUpdates, ReadersAndLiveDocs rld,
-      SegmentReader reader, boolean checkDocId) throws IOException {
+      SegmentReader reader, boolean exactSegment) throws IOException {
     Fields fields = reader.fields();
     if (fields == null) {
       // This reader has no postings
@@ -479,17 +479,8 @@
     
     assert checkDeleteTerm(null);
 
-    UpdatedSegmentData updatedSegmentData = new UpdatedSegmentData();
-    
-    for (FieldsUpdate update : packetUpdates) {
-      DocsEnum docsEnum = reader.termDocsEnum(update.term);
-      if (docsEnum != null) {
-        int docID;
-        while ((docID = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
-          updatedSegmentData.addUpdate(docID, update, checkDocId);
-        }
-      }
-    }
+    UpdatedSegmentData updatedSegmentData = new UpdatedSegmentData(reader,
+        packetUpdates, exactSegment);
     
     if (updatedSegmentData.hasUpdates()) {
       rld.setLiveUpdates(updatedSegmentData);
diff --git a/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java
index 2e84744..788b157 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java
@@ -327,7 +327,7 @@
     // corrupt first commit, but it's too deadly to make
     // this logic "smarter" and risk accidentally returning
     // false due to various cases like file description
-    // exhaustion, access denited, etc., because in that
+    // exhaustion, access denied, etc., because in that
     // case IndexWriter may delete the entire index.  It's
     // safer to err towards "index exists" than try to be
     // smart about detecting not-yet-fully-committed or
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocInverterPerField.java b/lucene/core/src/java/org/apache/lucene/index/DocInverterPerField.java
index d8f12ca..0285e24 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocInverterPerField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocInverterPerField.java
@@ -120,10 +120,10 @@
 
               final int posIncr = posIncrAttribute.getPositionIncrement();
               if (posIncr < 0) {
-                throw new IllegalArgumentException("position increment must be >=0 (got " + posIncr + ")");
+                throw new IllegalArgumentException("position increment must be >=0 (got " + posIncr + ") for field '" + field.name() + "'");
               }
               if (fieldState.position == 0 && posIncr == 0) {
-                throw new IllegalArgumentException("first position increment must be > 0 (got 0)");
+                throw new IllegalArgumentException("first position increment must be > 0 (got 0) for field '" + field.name() + "'");
               }
               int position = fieldState.position + posIncr;
               if (position > 0) {
@@ -146,11 +146,11 @@
                 int endOffset = fieldState.offset + offsetAttribute.endOffset();
                 if (startOffset < 0 || endOffset < startOffset) {
                   throw new IllegalArgumentException("startOffset must be non-negative, and endOffset must be >= startOffset, "
-                      + "startOffset=" + startOffset + ",endOffset=" + endOffset);
+                      + "startOffset=" + startOffset + ",endOffset=" + endOffset + " for field '" + field.name() + "'");
                 }
                 if (startOffset < lastStartOffset) {
                   throw new IllegalArgumentException("offsets must not go backwards startOffset=" 
-                       + startOffset + " is < lastStartOffset=" + lastStartOffset);
+                       + startOffset + " is < lastStartOffset=" + lastStartOffset + " for field '" + field.name() + "'");
                 }
                 lastStartOffset = startOffset;
               }
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
index d1795b9..99c2f1d 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
@@ -421,7 +421,7 @@
         // create new fields update, which should effect previous docs in the
         // current segment
         FieldsUpdate fieldsUpdate = new FieldsUpdate(term, operation, fields, 
-            analyzer, numDocsInRAM.get() - 1);
+            analyzer, numDocsInRAM.get() - 1, System.currentTimeMillis());
         // invert the given fields and store in RAMDirectory
         dwpt.invertFieldsUpdate(fieldsUpdate, globalFieldNumberMap);
         dwpt.updateFields(term, fieldsUpdate);
@@ -480,6 +480,9 @@
         // abortable so that IW.close(false) is able to stop it
         TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory);
         
+        updates.startWriting(infoPerCommit.getNextUpdateGen(),
+            infoPerCommit.info.getDocCount());
+
         final List<AtomicReader> mergeReaders = new ArrayList<AtomicReader>();
         AtomicReader reader;
         while ((reader = updates.nextReader()) != null) { // add new indexes
@@ -489,10 +492,6 @@
         SegmentMerger merger = new SegmentMerger(mergeReaders, info, infoStream, trackingDir,
             interval, MergeState.CheckAbort.NONE, globalFieldNumberMap, context);
         
-        updates.startWriting(infoPerCommit.getNextUpdateGen(),
-            infoPerCommit.info.getDocCount(), indexWriter.getConfig()
-                .getReaderTermsIndexDivisor());
-
         Set<String> generationReplacementFilenames = null;
         boolean success = false;
         try {
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
index ef399ce..5fb8af1 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
@@ -132,13 +132,11 @@
     final FieldInfos fieldInfos;
     final FrozenBufferedDeletes segmentDeletes;
     final MutableBits liveDocs;
-    final UpdatedSegmentData liveUpdates;
     final int delCount;
 
     private FlushedSegment(SegmentInfoPerCommit segmentInfo, FieldInfos fieldInfos,
                            BufferedDeletes segmentDeletes, MutableBits liveDocs, 
-                           int delCount, BufferedUpdates segmentUpdates,
-                           UpdatedSegmentData liveUpdates) {
+                           int delCount, BufferedUpdates segmentUpdates) {
       this.segmentInfo = segmentInfo;
       this.fieldInfos = fieldInfos;
       if ((segmentDeletes != null && segmentDeletes.any())
@@ -149,7 +147,6 @@
         this.segmentDeletes = null;
       }
       this.liveDocs = liveDocs;
-      this.liveUpdates = liveUpdates;
       this.delCount = delCount;
     }
   }
@@ -618,9 +615,9 @@
 
       assert segmentInfo != null;
 
-      FlushedSegment fs = new FlushedSegment(segmentInfoPerCommit, flushState.fieldInfos,
-                                             segmentDeletes, flushState.liveDocs, flushState.delCountOnFlush, 
-                                             pendingUpdates, flushState.liveUpdates);
+      FlushedSegment fs = new FlushedSegment(segmentInfoPerCommit,
+          flushState.fieldInfos, segmentDeletes, flushState.liveDocs,
+          flushState.delCountOnFlush, pendingUpdates);
       sealFlushedSegment(fs);
       doAfterFlush();
       success = true;
diff --git a/lucene/core/src/java/org/apache/lucene/index/FieldsUpdate.java b/lucene/core/src/java/org/apache/lucene/index/FieldsUpdate.java
index 5952e88..87531dd 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FieldsUpdate.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FieldsUpdate.java
@@ -46,7 +46,8 @@
   final Operation operation;
   final Set<String> replacedFields;
   final Analyzer analyzer;
-  final int docIDUpto;
+  final int docIdUpto;
+  final long timeStamp;
 
   IndexDocument fields;
   Directory directory;
@@ -67,7 +68,7 @@
    *          Document ID of the last document added before this field update
    */
   public FieldsUpdate(Term term, Operation operation, IndexDocument fields,
-      Analyzer analyzer, int docIDUpto) {
+      Analyzer analyzer, int docIDUpto, long timeStamp) {
     this.term = term;
     this.fields = fields;
     this.operation = operation;
@@ -83,7 +84,8 @@
       }
     }
     this.analyzer = analyzer;
-    this.docIDUpto = docIDUpto;
+    this.docIdUpto = docIDUpto;
+    this.timeStamp = timeStamp;
   }
   
   /**
@@ -97,7 +99,8 @@
     this.operation = other.operation;
     this.replacedFields = other.replacedFields;
     this.analyzer = other.analyzer;
-    this.docIDUpto = other.docIDUpto;
+    this.docIdUpto = other.docIdUpto;
+    this.timeStamp = other.timeStamp;
     this.directory = other.directory;
     this.segmentInfo = other.segmentInfo;
   }
@@ -105,7 +108,15 @@
   /* Order FrieldsUpdate by increasing docIDUpto */
   @Override
   public int compareTo(FieldsUpdate other) {
-    return this.docIDUpto - other.docIDUpto;
+    int diff = this.docIdUpto - other.docIdUpto;
+    if (diff == 0) {
+      if (this.timeStamp < other.timeStamp) {
+        return -1;
+      } else if (this.timeStamp > other.timeStamp) {
+        return 1;
+      }
+    }
+    return diff;
   }
   
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
index ab1bc60..8c4aff3 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
@@ -19,9 +19,7 @@
 
 import java.io.IOException;
 import java.util.Comparator;
-import java.util.Iterator;
 import java.util.Map;
-import java.util.SortedSet;
 
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
@@ -367,13 +365,6 @@
       segDeletes = null;
     }
     
-    final Map<Term,SortedSet<FieldsUpdate>> segUpdates;
-    if (state.segUpdates != null && state.segUpdates.terms.size() > 0) {
-      segUpdates = state.segUpdates.terms;
-    } else {
-      segUpdates = null;
-    }
-    
     final int[] termIDs = termsHashPerField.sortPostings(termComp);
     final int numTerms = termsHashPerField.bytesHash.size();
     final BytesRef text = new BytesRef();
@@ -406,8 +397,6 @@
 
       final PostingsConsumer postingsConsumer = termsConsumer.startTerm(text);
 
-      Term term = new Term(fieldName, text);
-      
       final int delDocLimit;
       if (segDeletes != null) {
         protoTerm.bytes = text;
@@ -421,19 +410,6 @@
         delDocLimit = 0;
       }
 
-      final SortedSet<FieldsUpdate> termUpdates;
-      Iterator<FieldsUpdate> updatesIterator = null;
-      FieldsUpdate nextUpdate = null;
-      if (segUpdates != null) {
-        termUpdates = segUpdates.get(term);
-        if (termUpdates != null && !termUpdates.isEmpty()) {
-          updatesIterator = termUpdates.iterator();
-          nextUpdate = updatesIterator.next();
-        }
-      } else {
-        termUpdates = null;
-      }
-
       // Now termStates has numToMerge FieldMergeStates
       // which all share the same term.  Now we must
       // interleave the docID streams.
@@ -506,23 +482,6 @@
           }
         }
 
-        // make sure we update the relevant documents according to the doc ID
-        // in which the updates arrived
-        while (nextUpdate != null && docID > nextUpdate.docIDUpto) {
-          if (updatesIterator.hasNext()) {
-            nextUpdate = updatesIterator.next();
-          } else {
-            nextUpdate = null;
-          }
-        }
-        
-        if (nextUpdate != null) {
-            if (state.liveUpdates == null) {
-              state.liveUpdates = new UpdatedSegmentData();
-            }
-            state.liveUpdates.addUpdate(docID, nextUpdate, true);
-        }
-
         totTF += termFreq;
         
         // Carefully copy over the prox + payload info,
diff --git a/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedDeletes.java b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedDeletes.java
index 2f94d9a..450cae0 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedDeletes.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedDeletes.java
@@ -60,23 +60,36 @@
 
   public FrozenBufferedDeletes(BufferedDeletes deletes, BufferedUpdates updates, boolean isSegmentPrivate) {
     this.isSegmentPrivate = isSegmentPrivate;
-    assert !isSegmentPrivate || deletes.terms.size() == 0 : "segment private package should only have del queries"; 
-    Term termsArray[] = deletes.terms.keySet().toArray(new Term[deletes.terms.size()]);
-    termCount = termsArray.length;
-    ArrayUtil.timSort(termsArray);
-    PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder();
-    for (Term term : termsArray) {
-      builder.add(term);
-    }
-    terms = builder.finish();
-    
-    queries = new Query[deletes.queries.size()];
-    queryLimits = new int[deletes.queries.size()];
-    int upto = 0;
-    for(Map.Entry<Query,Integer> ent : deletes.queries.entrySet()) {
-      queries[upto] = ent.getKey();
-      queryLimits[upto] = ent.getValue();
-      upto++;
+    int localBytesUsed = 0;
+    if (deletes != null) {
+      assert !isSegmentPrivate || deletes.terms.size() == 0 : "segment private package should only have del queries";
+      Term termsArray[] = deletes.terms.keySet().toArray(
+          new Term[deletes.terms.size()]);
+      termCount = termsArray.length;
+      ArrayUtil.timSort(termsArray);
+      PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder();
+      for (Term term : termsArray) {
+        builder.add(term);
+      }
+      terms = builder.finish();
+      localBytesUsed += (int) terms.getSizeInBytes();
+      
+      queries = new Query[deletes.queries.size()];
+      queryLimits = new int[deletes.queries.size()];
+      int upto = 0;
+      for (Map.Entry<Query,Integer> ent : deletes.queries.entrySet()) {
+        queries[upto] = ent.getKey();
+        queryLimits[upto] = ent.getValue();
+        upto++;
+      }
+      
+      localBytesUsed += queries.length * BYTES_PER_DEL_QUERY;
+      numTermDeletes = deletes.numTermDeletes.get();
+    } else { 
+      terms = null;
+      numTermDeletes = 0;
+      queries = null;
+      queryLimits = null;
     }
     
     // freeze updates
@@ -87,10 +100,10 @@
       for (SortedSet<FieldsUpdate> list : updates.terms.values()) {
         allUpdates.addAll(list);
       }
+      localBytesUsed += 100;
     }
     
-    bytesUsed = (int) terms.getSizeInBytes() + queries.length * BYTES_PER_DEL_QUERY + 100 /* updates */;
-    numTermDeletes = deletes.numTermDeletes.get();
+    bytesUsed = localBytesUsed;
   }
   
   public void setDelGen(long gen) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexFileNames.java b/lucene/core/src/java/org/apache/lucene/index/IndexFileNames.java
index 5ec6c36..a67b51a 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexFileNames.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexFileNames.java
@@ -238,11 +238,15 @@
     return filename;
   }  
 
-  // All files created by codecs much match this pattern (we
-  // check this in SegmentInfo.java):
-  static final Pattern CODEC_FILE_PATTERN = Pattern.compile("_[_]?[a-z0-9]+(_.*)?\\..*");
+  /**
+   * All files created by codecs much match this pattern (checked in
+   * SegmentInfo).
+   */
+  public static final Pattern CODEC_FILE_PATTERN = Pattern.compile("_[a-z0-9]+(_.*)?\\..*");
 
+  /** Returns true if the file denotes an updated segment. */
   public static boolean isUpdatedSegmentFile(String file) {
     return file.startsWith("__");
   }
+  
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index 14975fa..92ef81a 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -384,7 +384,7 @@
           }
           success = true;
           // Prevent segmentInfos from changing while opening the
-          // reader; in theory we could do similar retry logic,
+          // reader; in theory we could instead do similar retry logic,
           // just like we do when loading segments_N
           synchronized(this) {
             maybeApplyDeletes(applyAllDeletes);
@@ -452,6 +452,16 @@
       }
     }
     
+    public synchronized boolean anyPendingDeletes() {
+      for(ReadersAndLiveDocs rld : readerMap.values()) {
+        if (rld.getPendingDeleteCount() != 0) {
+          return true;
+        }
+      }
+
+      return false;
+    }
+
     public synchronized void release(ReadersAndLiveDocs rld) throws IOException {
       
       // Matches incRef in get:
@@ -575,8 +585,21 @@
         rld.incRef();
       }
       
+      assert noDups();
+
       return rld;
     }
+
+    // Make sure that every segment appears only once in the
+    // pool:
+    private boolean noDups() {
+      Set<String> seen = new HashSet<String>();
+      for(SegmentInfoPerCommit info : readerMap.keySet()) {
+        assert !seen.contains(info.info.name);
+        seen.add(info.info.name);
+      }
+      return true;
+    }
   }
   
   /**
@@ -699,8 +722,7 @@
         
         // Record that we have a change (zero out all
         // segments) pending:
-        changeCount++;
-        segmentInfos.changed();
+        changed();
       } else {
         segmentInfos.read(directory);
         
@@ -716,8 +738,7 @@
           SegmentInfos oldInfos = new SegmentInfos();
           oldInfos.read(directory, commit.getSegmentsFileName());
           segmentInfos.replace(oldInfos);
-          changeCount++;
-          segmentInfos.changed();
+          changed();
           if (infoStream.isEnabled("IW")) {
             infoStream.message("IW",
                 "init: loaded commit \"" + commit.getSegmentsFileName() + "\"");
@@ -746,8 +767,7 @@
         // We have to mark ourself as changed so that if we
         // are closed w/o any further changes we write a new
         // segments_N file.
-        changeCount++;
-        segmentInfos.changed();
+        changed();
       }
       
       if (infoStream.isEnabled("IW")) {
@@ -1118,6 +1138,9 @@
     if (docWriter.anyDeletions()) {
       return true;
     }
+    if (readerPool.anyPendingDeletes()) {
+      return true;
+    }
     for (final SegmentInfoPerCommit info : segmentInfos) {
       if (info.hasDeletions()) {
         return true;
@@ -1524,7 +1547,7 @@
             
             // Must bump changeCount so if no other changes
             // happened, we still commit this change:
-            changeCount++;
+            changed();
           }
           // System.out.println("  yes " + info.info.name + " " + docID);
           return true;
@@ -2411,9 +2434,14 @@
    * referenced exist (correctly) in the index directory.
    */
   synchronized void checkpoint() throws IOException {
+    changed();
+    deleter.checkpoint(segmentInfos, false);
+  }
+
+  /** Called internally if any index state has changed. */
+  synchronized void changed() {
     changeCount++;
     segmentInfos.changed();
-    deleter.checkpoint(segmentInfos, false);
   }
   
   void writeSegmentUpdates(SegmentInfoPerCommit info,
@@ -4513,7 +4541,6 @@
               + (infos.version == segmentInfos.version) + " DW changes: "
               + docWriter.anyChanges() + " BD changes: "
               + bufferedDeletesStream.any());
-      
     }
     return infos.version == segmentInfos.version && !docWriter.anyChanges()
         && !bufferedDeletesStream.any();
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java b/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
index 666fb87..18055f0 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
@@ -242,6 +242,39 @@
     }
   }
 
+  /**
+   * A utility for writing the {@link IndexFileNames#SEGMENTS_GEN} file to a
+   * {@link Directory}.
+   * 
+   * <p>
+   * <b>NOTE:</b> this is an internal utility which is kept public so that it's
+   * accessible by code from other packages. You should avoid calling this
+   * method unless you're absolutely sure what you're doing!
+   * 
+   * @lucene.internal
+   */
+  public static void writeSegmentsGen(Directory dir, long generation) {
+    try {
+      IndexOutput genOutput = dir.createOutput(IndexFileNames.SEGMENTS_GEN, IOContext.READONCE);
+      try {
+        genOutput.writeInt(FORMAT_SEGMENTS_GEN_CURRENT);
+        genOutput.writeLong(generation);
+        genOutput.writeLong(generation);
+      } finally {
+        genOutput.close();
+        dir.sync(Collections.singleton(IndexFileNames.SEGMENTS_GEN));
+      }
+    } catch (Throwable t) {
+      // It's OK if we fail to write this file since it's
+      // used only as one of the retry fallbacks.
+      try {
+        dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
+      } catch (Throwable t2) {
+        // Ignore; this file is only used in a retry
+        // fallback on init.
+      }
+    }
+  }
 
   /**
    * Get the next segments_N filename that will be written.
@@ -850,27 +883,7 @@
     }
 
     lastGeneration = generation;
-
-    try {
-      IndexOutput genOutput = dir.createOutput(IndexFileNames.SEGMENTS_GEN, IOContext.READONCE);
-      try {
-        genOutput.writeInt(FORMAT_SEGMENTS_GEN_CURRENT);
-        genOutput.writeLong(generation);
-        genOutput.writeLong(generation);
-      } finally {
-        genOutput.close();
-        dir.sync(Collections.singleton(IndexFileNames.SEGMENTS_GEN));
-      }
-    } catch (Throwable t) {
-      // It's OK if we fail to write this file since it's
-      // used only as one of the retry fallbacks.
-      try {
-        dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
-      } catch (Throwable t2) {
-        // Ignore; this file is only used in a retry
-        // fallback on init.
-      }
-    }
+    writeSegmentsGen(dir, generation);
   }
 
   /** Writes & syncs to the Directory dir, taking care to
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentWriteState.java b/lucene/core/src/java/org/apache/lucene/index/SegmentWriteState.java
index 22c3c54..1d1661d 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentWriteState.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentWriteState.java
@@ -67,9 +67,6 @@
    *  only set if there is one or more deleted documents. */
   public MutableBits liveDocs;
 
-  // Lazily created:
-  public UpdatedSegmentData liveUpdates;
-  
   /** Unique suffix for any postings files written for this
    *  segment.  {@link PerFieldPostingsFormat} sets this for
    *  each of the postings formats it wraps.  If you create
diff --git a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
index 452e702..9425930 100644
--- a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
@@ -89,6 +89,10 @@
       IOException prior = null;
       boolean success = false;
       try {
+        // NOTE: important that we use infos not
+        // segmentInfos here, so that we are passing the
+        // actual instance of SegmentInfoPerCommit in
+        // IndexWriter's segmentInfos:
         final SegmentInfoPerCommit info = infos.info(i);
         assert info.info.dir == dir;
         final ReadersAndLiveDocs rld = writer.readerPool.get(info, true);
@@ -118,9 +122,10 @@
       writer, segmentInfos, writer.getConfig().getReaderTermsIndexDivisor(), applyAllDeletes);
   }
 
-  /** This constructor is only used for {@link #doOpenIfChanged(SegmentInfos, IndexWriter)} */
-  private static DirectoryReader open(Directory directory, IndexWriter writer, SegmentInfos infos, List<? extends AtomicReader> oldReaders,
+  /** This constructor is only used for {@link #doOpenIfChanged(SegmentInfos)} */
+  private static DirectoryReader open(Directory directory, SegmentInfos infos, List<? extends AtomicReader> oldReaders,
     int termInfosIndexDivisor) throws IOException {
+
     // we put the old SegmentReaders in a map, that allows us
     // to lookup a reader using its segment name
     final Map<String,Integer> segmentReaders = new HashMap<String,Integer>();
@@ -208,7 +213,7 @@
         }
       }
     }    
-    return new StandardDirectoryReader(directory, newReaders, writer, infos, termInfosIndexDivisor, false);
+    return new StandardDirectoryReader(directory, newReaders, null, infos, termInfosIndexDivisor, false);
   }
 
   @Override
@@ -233,7 +238,7 @@
 
   @Override
   protected DirectoryReader doOpenIfChanged() throws IOException {
-    return doOpenIfChanged(null);
+    return doOpenIfChanged((IndexCommit) null);
   }
 
   @Override
@@ -303,13 +308,13 @@
       protected Object doBody(String segmentFileName) throws IOException {
         final SegmentInfos infos = new SegmentInfos();
         infos.read(directory, segmentFileName);
-        return doOpenIfChanged(infos, null);
+        return doOpenIfChanged(infos);
       }
     }.run(commit);
   }
 
-  DirectoryReader doOpenIfChanged(SegmentInfos infos, IndexWriter writer) throws IOException {
-    return StandardDirectoryReader.open(directory, writer, infos, getSequentialSubReaders(), termInfosIndexDivisor);
+  DirectoryReader doOpenIfChanged(SegmentInfos infos) throws IOException {
+    return StandardDirectoryReader.open(directory, infos, getSequentialSubReaders(), termInfosIndexDivisor);
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/index/UpdatedSegmentData.java b/lucene/core/src/java/org/apache/lucene/index/UpdatedSegmentData.java
index fa8406e..6c990a3 100644
--- a/lucene/core/src/java/org/apache/lucene/index/UpdatedSegmentData.java
+++ b/lucene/core/src/java/org/apache/lucene/index/UpdatedSegmentData.java
@@ -1,14 +1,21 @@
 package org.apache.lucene.index;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.PriorityQueue;
+import java.util.Set;
+import java.util.SortedSet;
 import java.util.TreeMap;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.index.FieldsUpdate.Operation;
+import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.util.Bits;
@@ -36,44 +43,96 @@
 class UpdatedSegmentData {
   
   static final FieldInfos EMPTY_FIELD_INFOS = new FieldInfos(new FieldInfo[0]);
-
+  
   /** Updates mapped by doc ID, for each do sorted list of updates. */
-  private TreeMap<Integer,PriorityQueue<FieldsUpdate>> updatesMap;
+  private TreeMap<Integer,TreeMap<FieldsUpdate, Set<String>>> docIdToUpdatesMap;
+  private HashMap<FieldsUpdate, List<Integer>> updatesToDocIdMap;
+  private LinkedHashMap<FieldsUpdate,UpdateAtomicReader> allApplied;
   
-  /** */
   private long generation;
+  private boolean exactSegment;
   
-  private Map<String,FieldGenerationReplacements> fieldGenerationReplacments = new HashMap<String,FieldGenerationReplacements>();
+  private Map<String,FieldGenerationReplacements> fieldGenerationReplacments;
   
-  private Iterator<Entry<Integer,PriorityQueue<FieldsUpdate>>> updatesIterator;
+  private Iterator<Entry<Integer,TreeMap<FieldsUpdate,Set<String>>>> updatesIterator;
   private int currDocID;
   private int nextDocID;
   private int numDocs;
-  private PriorityQueue<FieldsUpdate> nextUpdate;
+  private TreeMap<FieldsUpdate,Set<String>> nextUpdate;
   private Analyzer analyzer;
   
-  private int termsIndexDivisor;
-  
-  UpdatedSegmentData() {
-    updatesMap = new TreeMap<Integer,PriorityQueue<FieldsUpdate>>();
+  UpdatedSegmentData(SegmentReader reader,
+      SortedSet<FieldsUpdate> packetUpdates, boolean exactSegment)
+      throws IOException {
+    docIdToUpdatesMap = new TreeMap<>();
+    updatesToDocIdMap = new HashMap<>();
+    this.exactSegment = exactSegment;
+    
+    allApplied = new LinkedHashMap<>();
+    
+    for (FieldsUpdate update : packetUpdates) {
+      // add updates according to the base reader
+      DocsEnum docsEnum = reader.termDocsEnum(update.term);
+      if (docsEnum != null) {
+        int docId;
+        while ((docId = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+          addUpdate(docId, update);
+        }
+      }
+      
+      // try applying on previous updates in this packet
+      for (Entry<FieldsUpdate,UpdateAtomicReader> applied : allApplied
+          .entrySet()) {
+        if (applied.getValue().hasTerm(update.term)) {
+          List<Integer> list = updatesToDocIdMap.get(applied.getKey());
+          if (list != null) {
+            for (Integer docId : list) {
+              Set<String> ignoredFields = docIdToUpdatesMap.get(docId).get(
+                  applied.getKey());
+              if (ignoredFields == null
+                  || !ignoredFields.contains(update.term.field())) {
+                addUpdate(docId, update);
+              }
+            }
+          }
+        }
+      }
+      
+      allApplied.put(update, new UpdateAtomicReader(update.directory,
+          update.segmentInfo, IOContext.DEFAULT));
+    }
+    
   }
   
-  void addUpdate(int docId, FieldsUpdate fieldsUpdate, boolean checkDocId) {
-    if (checkDocId && docId > fieldsUpdate.docIDUpto) {
+  private void addUpdate(int docId, FieldsUpdate fieldsUpdate) {
+    if (exactSegment && docId > fieldsUpdate.docIdUpto) {
       return;
     }
-    PriorityQueue<FieldsUpdate> prevUpdates = updatesMap.get(docId);
+    TreeMap<FieldsUpdate,Set<String>> prevUpdates = docIdToUpdatesMap.get(docId);
     if (prevUpdates == null) {
-      prevUpdates = new PriorityQueue<FieldsUpdate>();
-      updatesMap.put(docId, prevUpdates);
-    } else {
-      System.out.println();
+      prevUpdates = new TreeMap<>();
+      docIdToUpdatesMap.put(docId, prevUpdates);
+    } else if (fieldsUpdate.operation == Operation.REPLACE_FIELDS) {
+      // set ignored fields in previous updates
+      for (Entry<FieldsUpdate,Set<String>> addIgnore : prevUpdates.entrySet()) {
+        if (addIgnore.getValue() == null) {
+          prevUpdates.put(addIgnore.getKey(), new HashSet<>(fieldsUpdate.replacedFields));
+        } else {
+          addIgnore.getValue().addAll(fieldsUpdate.replacedFields);
+        }
+      }
     }
-    prevUpdates.add(fieldsUpdate);
+    prevUpdates.put(fieldsUpdate, null);
+    List<Integer> prevDocIds = updatesToDocIdMap.get(fieldsUpdate);
+    if (prevDocIds == null) {
+      prevDocIds = new ArrayList<Integer>();
+      updatesToDocIdMap.put(fieldsUpdate, prevDocIds);
+    }
+    prevDocIds.add(docId);
   }
   
   boolean hasUpdates() {
-    return !updatesMap.isEmpty();
+    return !docIdToUpdatesMap.isEmpty();
   }
   
   /**
@@ -83,16 +142,13 @@
    *          The updates generation.
    * @param numDocs
    *          number of documents in the base segment
-   * @param termsIndexDivisor
-   *          Terms index divisor to use in temporary segments
    */
-  void startWriting(long generation, int numDocs, int termsIndexDivisor) {
+  void startWriting(long generation, int numDocs) {
     this.generation = generation;
     this.numDocs = numDocs;
-    this.termsIndexDivisor = termsIndexDivisor;
-    updatesIterator = updatesMap.entrySet().iterator();
+    
+    updatesIterator = docIdToUpdatesMap.entrySet().iterator();
     currDocID = 0;
-    fieldGenerationReplacments.clear();
     // fetch the first actual updates document if exists
     nextDocUpdate();
   }
@@ -102,8 +158,7 @@
    */
   private void nextDocUpdate() {
     if (updatesIterator.hasNext()) {
-      Entry<Integer,PriorityQueue<FieldsUpdate>> docUpdates = updatesIterator
-          .next();
+      Entry<Integer,TreeMap<FieldsUpdate,Set<String>>> docUpdates = updatesIterator.next();
       nextDocID = docUpdates.getKey();
       nextUpdate = docUpdates.getValue();
     } else {
@@ -128,9 +183,9 @@
       currDocID = nextDocID;
     } else if (currDocID < numDocs) {
       // get the an actual updates reader...
-      FieldsUpdate update = nextUpdate.poll();
-      toReturn = new UpdateAtomicReader(update.directory, update.segmentInfo,
-          IOContext.DEFAULT);
+      FieldsUpdate update = nextUpdate.firstEntry().getKey();
+      Set<String> ignore = nextUpdate.remove(update);
+      toReturn = allApplied.get(update);
       
       // ... and if done for this document remove from updates map
       if (nextUpdate.isEmpty()) {
@@ -139,6 +194,9 @@
       
       // add generation replacements if exist
       if (update.replacedFields != null) {
+        if (fieldGenerationReplacments == null) {
+          fieldGenerationReplacments = new HashMap<String,FieldGenerationReplacements>();
+        }
         for (String fieldName : update.replacedFields) {
           FieldGenerationReplacements fieldReplacement = fieldGenerationReplacments
               .get(fieldName);
@@ -158,9 +216,9 @@
   }
   
   boolean isEmpty() {
-    return updatesMap.isEmpty();
+    return docIdToUpdatesMap.isEmpty();
   }
-
+  
   private class UpdateAtomicReader extends AtomicReader {
     
     final private SegmentCoreReaders core;
@@ -180,8 +238,7 @@
      */
     UpdateAtomicReader(Directory fieldsDir, SegmentInfo segmentInfo,
         IOContext context) throws IOException {
-      core = new SegmentCoreReaders(null, segmentInfo, -1, context,
-          termsIndexDivisor);
+      core = new SegmentCoreReaders(null, segmentInfo, -1, context, -1);
       numDocs = 1;
     }
     
@@ -193,6 +250,17 @@
       this.numDocs = numDocs;
     }
     
+    boolean hasTerm(Term term) throws IOException {
+      if (core == null) {
+        return false;
+      }
+      DocsEnum termDocsEnum = termDocsEnum(term);
+      if (termDocsEnum == null) {
+        return false;
+      }
+      return termDocsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS;
+    }
+
     @Override
     public Fields fields() throws IOException {
       if (core == null) {
@@ -247,8 +315,13 @@
     }
     
     @Override
-    protected void doClose() throws IOException {}
-
+    protected void doClose() throws IOException {
+      if (core == null) {
+        return;
+      }
+      core.decRef();
+    }
+    
     @Override
     public NumericDocValues getNumericDocValues(String field)
         throws IOException {
@@ -257,7 +330,7 @@
       }
       return core.getNumericDocValues(field);
     }
-
+    
     @Override
     public BinaryDocValues getBinaryDocValues(String field) throws IOException {
       if (core == null) {
@@ -265,7 +338,7 @@
       }
       return core.getBinaryDocValues(field);
     }
-
+    
     @Override
     public SortedDocValues getSortedDocValues(String field) throws IOException {
       if (core == null) {
@@ -273,7 +346,7 @@
       }
       return core.getSortedDocValues(field);
     }
-
+    
     @Override
     public SortedSetDocValues getSortedSetDocValues(String field)
         throws IOException {
@@ -282,7 +355,7 @@
       }
       return core.getSortedSetDocValues(field);
     }
-
+    
     @Override
     public NumericDocValues getNormValues(String field) throws IOException {
       if (core == null) {
@@ -290,6 +363,6 @@
       }
       return core.getNormValues(field);
     }
-    
   }
+  
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/LiveFieldValues.java b/lucene/core/src/java/org/apache/lucene/search/LiveFieldValues.java
index c0a28ed..76ffae3 100644
--- a/lucene/core/src/java/org/apache/lucene/search/LiveFieldValues.java
+++ b/lucene/core/src/java/org/apache/lucene/search/LiveFieldValues.java
@@ -33,14 +33,14 @@
  *  the same time by two threads, because in this case you
  *  cannot in general know which thread "won". */
 
-public abstract class LiveFieldValues<T> implements ReferenceManager.RefreshListener, Closeable {
+public abstract class LiveFieldValues<S,T> implements ReferenceManager.RefreshListener, Closeable {
 
   private volatile Map<String,T> current = new ConcurrentHashMap<String,T>();
   private volatile Map<String,T> old = new ConcurrentHashMap<String,T>();
-  private final ReferenceManager<IndexSearcher> mgr;
+  private final ReferenceManager<S> mgr;
   private final T missingValue;
 
-  public LiveFieldValues(ReferenceManager<IndexSearcher> mgr, T missingValue) {
+  public LiveFieldValues(ReferenceManager<S> mgr, T missingValue) {
     this.missingValue = missingValue;
     this.mgr = mgr;
     mgr.addListener(this);
@@ -114,7 +114,7 @@
         // It either does not exist in the index, or, it was
         // already flushed & NRT reader was opened on the
         // segment, so fallback to current searcher:
-        IndexSearcher s = mgr.acquire();
+        S s = mgr.acquire();
         try {
           return lookupFromSearcher(s, id);
         } finally {
@@ -128,6 +128,6 @@
    *  in an NRT IndexSearcher.  You must implement this to
    *  go look up the value (eg, via doc values, field cache,
    *  stored fields, etc.). */
-  protected abstract T lookupFromSearcher(IndexSearcher s, String id) throws IOException;
+  protected abstract T lookupFromSearcher(S s, String id) throws IOException;
 }
 
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFieldReplacements.java b/lucene/core/src/test/org/apache/lucene/index/TestFieldReplacements.java
index c2f5f80..e8fb76e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFieldReplacements.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFieldReplacements.java
@@ -132,17 +132,12 @@
   }
   
   private static void addDocuments(Directory directory, Random localRandom,
-      int maxDocs, boolean randomConfig) throws IOException {
+      int maxDocs) throws IOException {
     init(localRandom);
     HashSet<Term> usedTerms = new HashSet<Term>();
     
-    final IndexWriterConfig config;
-    if (randomConfig) {
-      config = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
-    } else {
-      config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
-    }
-    System.out.println(config.getMergePolicy());
+    final IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
+        new MockAnalyzer(random()));
     config.setCodec(new SimpleTextCodec());
     IndexWriter writer = new IndexWriter(directory, config);
     
@@ -273,13 +268,13 @@
   }
   
   public void testRandomIndexGeneration() throws IOException {
-    addDocuments(dir, random(), Integer.MAX_VALUE, true);
+    addDocuments(dir, random(), Integer.MAX_VALUE);
     DirectoryReader directoryReader = DirectoryReader.open(dir);
     directoryReader.close();
   }
   
   public void testAddIndexes() throws IOException {
-    addDocuments(dir, random(), Integer.MAX_VALUE, true);
+    addDocuments(dir, random(), Integer.MAX_VALUE);
     RAMDirectory addedDir = new RAMDirectory();
     IndexWriter addedIndexWriter = new IndexWriter(addedDir,
         newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
@@ -300,13 +295,10 @@
   
   public void testIndexEquality() throws IOException {
     // create index through updates
-    addDocuments(dir, new Random(3), Integer.MAX_VALUE, true);
+    addDocuments(dir, new Random(3), Integer.MAX_VALUE);
     
     DirectoryReader updatesReader = DirectoryReader.open(dir);
     IndexData updatesIndexData = new IndexData(updatesReader);
-    System.out.println("Updates index data");
-    System.out.println(updatesIndexData.toString(false));
-    System.out.println();
     updatesReader.close();
     
     // create the same index directly
@@ -598,9 +590,6 @@
     DirectoryReader directReader = DirectoryReader.open(directDir);
     
     IndexData directIndexData = new IndexData(directReader);
-    System.out.println("Direct index data");
-    System.out.println(directIndexData.toString(false));
-    System.out.println();
     directReader.close();
     directDir.close();
     
@@ -783,6 +772,81 @@
     
   }
   
+  public void testReplaceLayers() throws IOException {
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random())));
+    
+    FieldType fieldType = new FieldType();
+    fieldType.setIndexed(true);
+    fieldType.setTokenized(false);
+    fieldType.setOmitNorms(true);
+    fieldType.setStored(true);
+    
+    Document doc0 = new Document();
+    doc0.add(new StoredField("f1", "a", fieldType));
+    writer.addDocument(doc0);
+
+    // add f2:b
+    Document fields1 = new Document();
+    fields1.add(new StoredField("f2", "b", fieldType));
+    writer.updateFields(Operation.ADD_FIELDS, new Term("f1", "a"), fields1);
+    
+    // remove f2:b and add f2:c
+    Document fields2 = new Document();
+    fields2.add(new StoredField("f2", "c", fieldType));
+    writer.updateFields(Operation.REPLACE_FIELDS, new Term("f2", "b"), fields2);
+    
+    // do nothing since f2:b was removed
+    Document fields3 = new Document();
+    fields3.add(new StoredField("f2", "d", fieldType));
+    writer.updateFields(Operation.ADD_FIELDS, new Term("f2", "b"), fields3);
+    
+    writer.close();
+    
+    DirectoryReader directoryReader = DirectoryReader.open(dir);
+    final AtomicReader atomicReader = directoryReader.leaves().get(0).reader();
+    printField(atomicReader, "f1");
+    
+    // check indexed fields
+    final DocsAndPositionsEnum termPositionsA = atomicReader
+        .termPositionsEnum(new Term("f1", "a"));
+    assertNotNull("no positions for term", termPositionsA);
+    assertEquals("wrong doc id", 0, termPositionsA.nextDoc());
+    assertEquals("wrong position", 0, termPositionsA.nextPosition());
+    assertEquals("wrong doc id", DocIdSetIterator.NO_MORE_DOCS,
+        termPositionsA.nextDoc());
+    
+    final DocsAndPositionsEnum termPositionsB = atomicReader
+        .termPositionsEnum(new Term("f2", "b"));
+    assertNotNull("no positions for term", termPositionsB);
+    assertEquals("wrong doc id", DocIdSetIterator.NO_MORE_DOCS,
+        termPositionsB.nextDoc());
+    
+    final DocsAndPositionsEnum termPositionsC = atomicReader
+        .termPositionsEnum(new Term("f2", "c"));
+    assertNotNull("no positions for term", termPositionsC);
+    assertEquals("wrong doc id", 0, termPositionsC.nextDoc());
+    assertEquals("wrong position", 100000, termPositionsC.nextPosition());
+    assertEquals("wrong doc id", DocIdSetIterator.NO_MORE_DOCS,
+        termPositionsC.nextDoc());
+    
+    final DocsAndPositionsEnum termPositionsD = atomicReader
+        .termPositionsEnum(new Term("f2", "d"));
+    assertNull("unexpected positions for term", termPositionsD);
+    
+    // check stored fields
+    final StoredDocument stored0 = atomicReader.document(0);
+    final StorableField[] f1_0 = stored0.getFields("f1");
+    assertEquals("wrong numeber of stored fields", 1, f1_0.length);
+    assertEquals("wrong field value", "a", f1_0[0].stringValue());
+    final StorableField[] f2_0 = stored0.getFields("f2");
+    assertEquals("wrong numeber of stored fields", 1, f2_0.length);
+    assertEquals("wrong field value", "c", f2_0[0].stringValue());
+    
+    directoryReader.close();
+    
+  }
+  
   private void printField(AtomicReader atomicReader, String fieldName)
       throws IOException {
     if (!VERBOSE_FIELD_REPLACEMENTS) {
@@ -807,8 +871,8 @@
     }
   }
   
-  public void testprintIndexes() throws IOException {
-    File outDir = new File("D:/temp/ifu/compare/scenario/a");
+  public void printIndexes() throws IOException {
+    File outDir = new File("D:/temp/ifu/compare/scenario/b");
     outDir.mkdirs();
     
     for (int i = 0; i < 42; i++) {
@@ -819,8 +883,7 @@
       for (String filename : directory.listAll()) {
         new File(fsDirFile, filename).delete();
       }
-      System.out.print("" + i + " ");
-      addDocuments(directory, new Random(3), i, true);
+      addDocuments(directory, new Random(3), i);
       DirectoryReader updatesReader = DirectoryReader.open(directory);
       IndexData updatesIndexData = new IndexData(updatesReader);
       updatesReader.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTryDelete.java b/lucene/core/src/test/org/apache/lucene/index/TestTryDelete.java
new file mode 100644
index 0000000..64fae63
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTryDelete.java
@@ -0,0 +1,201 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ReferenceManager;
+import org.apache.lucene.search.SearcherFactory;
+import org.apache.lucene.search.SearcherManager;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+
+public class TestTryDelete extends LuceneTestCase
+{
+  private static IndexWriter getWriter (Directory directory)
+    throws IOException
+  {
+    LogMergePolicy policy = new LogByteSizeMergePolicy();
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT,
+                                                   new MockAnalyzer(random()));
+    conf.setMergePolicy(policy);
+    conf.setOpenMode(OpenMode.CREATE_OR_APPEND);
+
+    IndexWriter writer = new IndexWriter(directory, conf);
+
+    return writer;
+  }
+
+  private static Directory createIndex ()
+    throws IOException
+  {
+    Directory directory = new RAMDirectory();
+
+    IndexWriter writer = getWriter(directory);
+
+    for (int i = 0; i < 10; i++) {
+      Document doc = new Document();
+      doc.add(new StringField("foo", String.valueOf(i), Store.YES));
+      writer.addDocument(doc);
+    }
+
+    writer.commit();
+    writer.close();
+
+    return directory;
+  }
+
+  public void testTryDeleteDocument ()
+    throws IOException
+  {
+    Directory directory = createIndex();
+
+    IndexWriter writer = getWriter(directory);
+
+    ReferenceManager<IndexSearcher> mgr = new SearcherManager(writer,
+                                                              true,
+                                                              new SearcherFactory());
+
+    TrackingIndexWriter mgrWriter = new TrackingIndexWriter(writer);
+
+    IndexSearcher searcher = mgr.acquire();
+
+    TopDocs topDocs = searcher.search(new TermQuery(new Term("foo", "0")),
+                                      100);
+    assertEquals(1, topDocs.totalHits);
+
+    long result;
+    if (random().nextBoolean()) {
+      IndexReader r = DirectoryReader.open(writer, true);
+      result = mgrWriter.tryDeleteDocument(r, 0);
+      r.close();
+    } else {
+      result = mgrWriter.tryDeleteDocument(searcher.getIndexReader(), 0);
+    }
+
+    // The tryDeleteDocument should have succeeded:
+    assertTrue(result != -1);
+
+    assertTrue(writer.hasDeletions());
+
+    if (random().nextBoolean()) {
+      writer.commit();
+    }
+
+    assertTrue(writer.hasDeletions());
+    
+    mgr.maybeRefresh();
+
+    searcher = mgr.acquire();
+
+    topDocs = searcher.search(new TermQuery(new Term("foo", "0")), 100);
+
+    assertEquals(0, topDocs.totalHits);
+  }
+
+  public void testTryDeleteDocumentCloseAndReopen ()
+    throws IOException
+  {
+    Directory directory = createIndex();
+
+    IndexWriter writer = getWriter(directory);
+
+    ReferenceManager<IndexSearcher> mgr = new SearcherManager(writer,
+                                                              true,
+                                                              new SearcherFactory());
+
+    IndexSearcher searcher = mgr.acquire();
+
+    TopDocs topDocs = searcher.search(new TermQuery(new Term("foo", "0")),
+                                      100);
+    assertEquals(1, topDocs.totalHits);
+
+    TrackingIndexWriter mgrWriter = new TrackingIndexWriter(writer);
+    long result = mgrWriter.tryDeleteDocument(DirectoryReader.open(writer,
+                                                                   true), 0);
+
+    assertEquals(1, result);
+
+    writer.commit();
+
+    assertTrue(writer.hasDeletions());
+
+    mgr.maybeRefresh();
+
+    searcher = mgr.acquire();
+
+    topDocs = searcher.search(new TermQuery(new Term("foo", "0")), 100);
+
+    assertEquals(0, topDocs.totalHits);
+
+    writer.close();
+
+    searcher = new IndexSearcher(DirectoryReader.open(directory));
+
+    topDocs = searcher.search(new TermQuery(new Term("foo", "0")), 100);
+
+    assertEquals(0, topDocs.totalHits);
+
+  }
+
+  public void testDeleteDocuments ()
+    throws IOException
+  {
+    Directory directory = createIndex();
+
+    IndexWriter writer = getWriter(directory);
+
+    ReferenceManager<IndexSearcher> mgr = new SearcherManager(writer,
+                                                              true,
+                                                              new SearcherFactory());
+
+    IndexSearcher searcher = mgr.acquire();
+
+    TopDocs topDocs = searcher.search(new TermQuery(new Term("foo", "0")),
+                                      100);
+    assertEquals(1, topDocs.totalHits);
+
+    TrackingIndexWriter mgrWriter = new TrackingIndexWriter(writer);
+    long result = mgrWriter.deleteDocuments(new TermQuery(new Term("foo",
+                                                                   "0")));
+
+    assertEquals(1, result);
+
+    // writer.commit();
+
+    assertTrue(writer.hasDeletions());
+
+    mgr.maybeRefresh();
+
+    searcher = mgr.acquire();
+
+    topDocs = searcher.search(new TermQuery(new Term("foo", "0")), 100);
+
+    assertEquals(0, topDocs.totalHits);
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java b/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java
index cae3a73..055de01 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java
@@ -58,7 +58,7 @@
 
     final Integer missing = -1;
 
-    final LiveFieldValues<Integer> rt = new LiveFieldValues<Integer>(mgr, missing) {
+    final LiveFieldValues<IndexSearcher,Integer> rt = new LiveFieldValues<IndexSearcher,Integer>(mgr, missing) {
         @Override
         protected Integer lookupFromSearcher(IndexSearcher s, String id) throws IOException {
           TermQuery tq = new TermQuery(new Term("id", id));
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
index 6f03439..29d05f1 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
@@ -38,13 +38,6 @@
  *
  * @see TestPositionIncrement
  */ 
-/*
- * Remove ThreadLeaks and run with (Eclipse or command line):
- * -ea -Drt.seed=AFD1E7E84B35D2B1
- * to get leaked thread errors.
- */
-// @ThreadLeaks(linger = 1000, leakedThreadsBelongToSuite = true)
-@Seed("AFD1E7E84B35D2B1")
 public class TestPhraseQuery extends LuceneTestCase {
 
   /** threshold for comparing floats */
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestMaxFailuresRule.java b/lucene/core/src/test/org/apache/lucene/util/TestMaxFailuresRule.java
index 1884a7f..8dd4144 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestMaxFailuresRule.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestMaxFailuresRule.java
@@ -66,13 +66,10 @@
 
   @Test
   public void testMaxFailures() {
-    int maxFailures = LuceneTestCase.ignoreAfterMaxFailures.maxFailures;
-    int failuresSoFar = LuceneTestCase.ignoreAfterMaxFailures.failuresSoFar;
+    TestRuleIgnoreAfterMaxFailures newRule = new TestRuleIgnoreAfterMaxFailures(2);
+    TestRuleIgnoreAfterMaxFailures prevRule = LuceneTestCase.replaceMaxFailureRule(newRule);
     System.clearProperty(SysGlobals.SYSPROP_ITERATIONS());
     try {
-      LuceneTestCase.ignoreAfterMaxFailures.maxFailures = 2;
-      LuceneTestCase.ignoreAfterMaxFailures.failuresSoFar = 0;
-
       JUnitCore core = new JUnitCore();
       final StringBuilder results = new StringBuilder();
       core.addListener(new RunListener() {
@@ -110,8 +107,7 @@
           results.toString().matches("(S*F){2}A+"));
 
     } finally {
-      LuceneTestCase.ignoreAfterMaxFailures.maxFailures = maxFailures;
-      LuceneTestCase.ignoreAfterMaxFailures.failuresSoFar = failuresSoFar;
+      LuceneTestCase.replaceMaxFailureRule(prevRule);
     }
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
index f06196a..fd7e8ac 100644
--- a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
+++ b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
@@ -593,7 +593,7 @@
   // TODO: can FST be used to index all internal substrings,
   // mapping to term?
 
-  // java -cp ../build/codecs/classes/java:../test-framework/lib/randomizedtesting-runner-2.0.9.jar:../build/core/classes/test:../build/core/classes/test-framework:../build/core/classes/java:../build/test-framework/classes/java:../test-framework/lib/junit-4.10.jar org.apache.lucene.util.fst.TestFSTs /xold/tmp/allTerms3.txt out
+  // java -cp ../build/codecs/classes/java:../test-framework/lib/randomizedtesting-runner-2.0.10.jar:../build/core/classes/test:../build/core/classes/test-framework:../build/core/classes/java:../build/test-framework/classes/java:../test-framework/lib/junit-4.10.jar org.apache.lucene.util.fst.TestFSTs /xold/tmp/allTerms3.txt out
   public static void main(String[] args) throws IOException {
     int prune = 0;
     int limit = Integer.MAX_VALUE;
diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/WithNestedTests.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/WithNestedTests.java
index af3694a..beca552 100644
--- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/WithNestedTests.java
+++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/WithNestedTests.java
@@ -23,11 +23,13 @@
 import java.util.List;
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures;
 import org.apache.lucene.util.TestRuleIgnoreTestSuites;
 import org.apache.lucene.util.TestRuleMarkFailure;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.ClassRule;
 import org.junit.Rule;
 import org.junit.rules.RuleChain;
 import org.junit.rules.TestRule;
@@ -66,6 +68,22 @@
   private ByteArrayOutputStream sysout;
   private ByteArrayOutputStream syserr;
 
+  @ClassRule
+  public static final TestRule classRules = RuleChain.outerRule(new TestRuleAdapter() {
+    private TestRuleIgnoreAfterMaxFailures prevRule;
+
+    protected void before() throws Throwable {
+      TestRuleIgnoreAfterMaxFailures newRule = new TestRuleIgnoreAfterMaxFailures(Integer.MAX_VALUE);
+      prevRule = LuceneTestCase.replaceMaxFailureRule(newRule);
+    }
+
+    protected void afterAlways(List<Throwable> errors) throws Throwable {
+      if (prevRule != null) {
+        LuceneTestCase.replaceMaxFailureRule(prevRule);
+      }
+    }
+  }); 
+
   /**
    * Restore properties after test.
    */
@@ -86,7 +104,7 @@
       })
       .around(marker);
   }
-      
+
   @Before
   public final void before() {
     if (suppressOutputStreams) {
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/associations/MultiAssociationsFacetsAggregator.java b/lucene/facet/src/java/org/apache/lucene/facet/associations/MultiAssociationsFacetsAggregator.java
index 5b3678c..3004ad6 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/associations/MultiAssociationsFacetsAggregator.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/associations/MultiAssociationsFacetsAggregator.java
@@ -48,7 +48,7 @@
    * Creates a new {@link MultiAssociationsFacetsAggregator} over the given
    * aggregators. The mapping is used by
    * {@link #rollupValues(FacetRequest, int, int[], int[], FacetArrays)} to
-   * rollup the values of the speicfic category by the corresponding
+   * rollup the values of the specific category by the corresponding
    * {@link FacetsAggregator}. However, since each {@link FacetsAggregator}
    * handles the associations of a specific type, which could cover multiple
    * categories, the aggregation is done on the unique set of aggregators, which
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/RangeAccumulator.java b/lucene/facet/src/java/org/apache/lucene/facet/range/RangeAccumulator.java
index f902b33..1ab3b73 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/range/RangeAccumulator.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/RangeAccumulator.java
@@ -119,7 +119,7 @@
   }
 
   @Override
-  protected boolean requiresDocScores() {
+  public boolean requiresDocScores() {
     return false;
   }
 }
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/RangeFacetsAccumulatorWrapper.java b/lucene/facet/src/java/org/apache/lucene/facet/range/RangeFacetsAccumulatorWrapper.java
new file mode 100644
index 0000000..ef108f3
--- /dev/null
+++ b/lucene/facet/src/java/org/apache/lucene/facet/range/RangeFacetsAccumulatorWrapper.java
@@ -0,0 +1,117 @@
+package org.apache.lucene.facet.range;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.lucene.facet.params.CategoryListParams;
+import org.apache.lucene.facet.params.FacetSearchParams;
+import org.apache.lucene.facet.search.FacetArrays;
+import org.apache.lucene.facet.search.FacetRequest;
+import org.apache.lucene.facet.search.FacetResult;
+import org.apache.lucene.facet.search.FacetResultsHandler;
+import org.apache.lucene.facet.search.FacetsAccumulator;
+import org.apache.lucene.facet.search.FacetsAggregator;
+import org.apache.lucene.facet.search.FacetsCollector.MatchingDocs;
+import org.apache.lucene.facet.taxonomy.TaxonomyReader;
+import org.apache.lucene.index.IndexReader;
+
+/** Takes multiple facet requests and if necessary splits
+ *  them between the normal {@link FacetsAccumulator} and a
+ *  {@link RangeAccumulator} */
+public class RangeFacetsAccumulatorWrapper extends FacetsAccumulator {
+  // TODO: somehow handle SortedSetDVAccumulator as
+  // well... but it's tricky because SSDV just uses an
+  // "ordinary" flat CountFacetRequest so we can't switch
+  // based on that.
+  private final FacetsAccumulator accumulator;
+  private final RangeAccumulator rangeAccumulator;
+
+  public static FacetsAccumulator create(FacetSearchParams fsp, IndexReader indexReader, TaxonomyReader taxoReader) {
+    return create(fsp, indexReader, taxoReader, new FacetArrays(taxoReader.getSize()));
+  }
+
+  public static FacetsAccumulator create(FacetSearchParams fsp, IndexReader indexReader, TaxonomyReader taxoReader, FacetArrays arrays) {
+    List<FacetRequest> rangeRequests = new ArrayList<FacetRequest>();
+    List<FacetRequest> nonRangeRequests = new ArrayList<FacetRequest>();
+    for(FacetRequest fr : fsp.facetRequests) {
+      if (fr instanceof RangeFacetRequest) {
+        rangeRequests.add(fr);
+      } else {
+        nonRangeRequests.add(fr);
+      }
+    }
+
+    if (rangeRequests.isEmpty()) {
+      return new FacetsAccumulator(fsp, indexReader, taxoReader, arrays);
+    } else if (nonRangeRequests.isEmpty()) {
+      return new RangeAccumulator(fsp, indexReader);
+    } else {
+      FacetsAccumulator accumulator = new FacetsAccumulator(new FacetSearchParams(fsp.indexingParams, nonRangeRequests), indexReader, taxoReader, arrays);
+      RangeAccumulator rangeAccumulator = new RangeAccumulator(new FacetSearchParams(fsp.indexingParams, rangeRequests), indexReader);
+      return new RangeFacetsAccumulatorWrapper(accumulator, rangeAccumulator, fsp);
+    }
+  }
+
+  private RangeFacetsAccumulatorWrapper(FacetsAccumulator accumulator, RangeAccumulator rangeAccumulator, FacetSearchParams fsp) {
+    super(fsp, accumulator.indexReader, accumulator.taxonomyReader);
+    this.accumulator = accumulator;
+    this.rangeAccumulator = rangeAccumulator;
+  }
+
+  @Override
+  public FacetsAggregator getAggregator() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  protected FacetResultsHandler createFacetResultsHandler(FacetRequest fr) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  protected Set<CategoryListParams> getCategoryLists() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean requiresDocScores() {
+    return accumulator.requiresDocScores();
+  }
+
+  public List<FacetResult> accumulate(List<MatchingDocs> matchingDocs) throws IOException {
+    List<FacetResult> results = accumulator.accumulate(matchingDocs);
+    List<FacetResult> rangeResults = rangeAccumulator.accumulate(matchingDocs);
+
+    int aUpto = 0;
+    int raUpto = 0;
+    List<FacetResult> merged = new ArrayList<FacetResult>();
+    for(FacetRequest fr : searchParams.facetRequests) {
+      if (fr instanceof RangeFacetRequest) {
+        merged.add(rangeResults.get(raUpto++));
+      } else {
+        merged.add(results.get(aUpto++));
+      }
+    }
+
+    return merged;
+  }
+}
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillDownQuery.java b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillDownQuery.java
index 5bb920e..f612ba1 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillDownQuery.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillDownQuery.java
@@ -21,7 +21,6 @@
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.regex.Pattern;
 
 import org.apache.lucene.facet.params.CategoryListParams;
 import org.apache.lucene.facet.params.FacetIndexingParams;
@@ -90,7 +89,7 @@
   }
 
   /** Used by DrillSideways */
-  DrillDownQuery(FacetIndexingParams fip, Query baseQuery, List<Query> clauses) {
+  DrillDownQuery(FacetIndexingParams fip, Query baseQuery, List<Query> clauses, Map<String,Integer> drillDownDims) {
     this.fip = fip;
     this.query = new BooleanQuery(true);
     if (baseQuery != null) {
@@ -98,21 +97,8 @@
     }
     for(Query clause : clauses) {
       query.add(clause, Occur.MUST);
-      drillDownDims.put(getDim(clause), drillDownDims.size());
     }
-  }
-
-  String getDim(Query clause) {
-    assert clause instanceof ConstantScoreQuery;
-    clause = ((ConstantScoreQuery) clause).getQuery();
-    assert clause instanceof TermQuery || clause instanceof BooleanQuery;
-    String term;
-    if (clause instanceof TermQuery) {
-      term = ((TermQuery) clause).getTerm().text();
-    } else {
-      term = ((TermQuery) ((BooleanQuery) clause).getClauses()[0].getQuery()).getTerm().text();
-    }
-    return term.split(Pattern.quote(Character.toString(fip.getFacetDelimChar())), 2)[0];
+    this.drillDownDims.putAll(drillDownDims);
   }
 
   /**
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSideways.java b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSideways.java
index 1ed4a29..a67d4e3 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSideways.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSideways.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashSet;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -94,6 +95,11 @@
     BooleanClause[] clauses = in.getBooleanQuery().getClauses();
     Map<String,Integer> drillDownDims = in.getDims();
 
+    String[] dimsByIndex = new String[drillDownDims.size()];
+    for(Map.Entry<String,Integer> ent : drillDownDims.entrySet()) {
+      dimsByIndex[ent.getValue()] = ent.getKey();
+    }
+
     int startClause;
     if (clauses.length == drillDownDims.size()) {
       startClause = 0;
@@ -107,13 +113,15 @@
     // baseQuery:
     List<Query> nonFacetClauses = new ArrayList<Query>();
     List<Query> facetClauses = new ArrayList<Query>();
+    Map<String,Integer> dimToIndex = new LinkedHashMap<String,Integer>();
     for(int i=startClause;i<clauses.length;i++) {
       Query q = clauses[i].getQuery();
-      String dim = in.getDim(q);
+      String dim = dimsByIndex[i-startClause];
       if (!facetDims.contains(dim)) {
         nonFacetClauses.add(q);
       } else {
         facetClauses.add(q);
+        dimToIndex.put(dim, dimToIndex.size());
       }
     }
 
@@ -127,7 +135,7 @@
         newBaseQuery.add(q, BooleanClause.Occur.MUST);
       }
 
-      return new DrillDownQuery(fsp.indexingParams, newBaseQuery, facetClauses);
+      return new DrillDownQuery(fsp.indexingParams, newBaseQuery, facetClauses, dimToIndex);
     } else {
       // No change:
       return in;
@@ -157,6 +165,20 @@
       return new DrillSidewaysResult(c.getFacetResults(), null);      
     }
 
+    List<FacetRequest> ddRequests = new ArrayList<FacetRequest>();
+    for(FacetRequest fr : fsp.facetRequests) {
+      assert fr.categoryPath.length > 0;
+      if (!drillDownDims.containsKey(fr.categoryPath.components[0])) {
+        ddRequests.add(fr);
+      }
+    }
+    FacetSearchParams fsp2;
+    if (!ddRequests.isEmpty()) {
+      fsp2 = new FacetSearchParams(fsp.indexingParams, ddRequests);
+    } else {
+      fsp2 = null;
+    }
+
     BooleanQuery ddq = query.getBooleanQuery();
     BooleanClause[] clauses = ddq.getClauses();
 
@@ -173,7 +195,7 @@
       startClause = 1;
     }
 
-    FacetsCollector drillDownCollector = FacetsCollector.create(getDrillDownAccumulator(fsp));
+    FacetsCollector drillDownCollector = fsp2 == null ? null : FacetsCollector.create(getDrillDownAccumulator(fsp2));
 
     FacetsCollector[] drillSidewaysCollectors = new FacetsCollector[drillDownDims.size()];
 
@@ -225,6 +247,8 @@
               break;
             }
           }
+        } else {
+          useCollectorMethod = true;
         }
       }
     }
@@ -246,6 +270,7 @@
 
     List<FacetResult> mergedResults = new ArrayList<FacetResult>();
     int[] requestUpto = new int[drillDownDims.size()];
+    int ddUpto = 0;
     for(int i=0;i<fsp.facetRequests.size();i++) {
       FacetRequest fr = fsp.facetRequests.get(i);
       assert fr.categoryPath.length > 0;
@@ -260,7 +285,7 @@
           //System.out.println("get DD results");
         }
         //System.out.println("add dd results " + i);
-        mergedResults.add(drillDownResults.get(i));
+        mergedResults.add(drillDownResults.get(ddUpto++));
       } else {
         // Drill sideways dim:
         int dim = dimIndex.intValue();
@@ -359,7 +384,7 @@
 
     subQuery.setMinimumNumberShouldMatch(minShouldMatch);
 
-    //System.out.println("EXE " + topQuery);
+    // System.out.println("EXE " + topQuery);
 
     // Collects against the passed-in
     // drillDown/SidewaysCollectors as a side effect:
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysCollector.java b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysCollector.java
index bfefca0..01dded7 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysCollector.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysCollector.java
@@ -82,7 +82,9 @@
       // drillDown collector:
       //System.out.println("  hit " + drillDownCollector);
       hitCollector.collect(doc);
-      drillDownCollector.collect(doc);
+      if (drillDownCollector != null) {
+        drillDownCollector.collect(doc);
+      }
 
       // Also collect across all drill-sideways counts so
       // we "merge in" drill-down counts for this
@@ -98,21 +100,28 @@
       }
 
     } else {
+      boolean found = false;
       for(int i=0;i<subScorers.length;i++) {
         if (subScorers[i] == null) {
           // This segment did not have any docs with this
           // drill-down field & value:
-          continue;
+          drillSidewaysCollectors[i].collect(doc);
+          assert allMatchesFrom(i+1, doc);
+          found = true;
+          break;
         }
         int subDoc = subScorers[i].docID();
-        //System.out.println("  sub: " + subDoc);
+        //System.out.println("  i=" + i + " sub: " + subDoc);
         if (subDoc != doc) {
+          //System.out.println("  +ds[" + i + "]");
           assert subDoc > doc: "subDoc=" + subDoc + " doc=" + doc;
           drillSidewaysCollectors[i].collect(doc);
           assert allMatchesFrom(i+1, doc);
+          found = true;
           break;
         }
       }
+      assert found;
     }
   }
 
@@ -134,8 +143,11 @@
 
   @Override
   public void setNextReader(AtomicReaderContext leaf) throws IOException {
+    //System.out.println("DS.setNextReader reader=" + leaf.reader());
     hitCollector.setNextReader(leaf);
-    drillDownCollector.setNextReader(leaf);
+    if (drillDownCollector != null) {
+      drillDownCollector.setNextReader(leaf);
+    }
     for(Collector dsc : drillSidewaysCollectors) {
       dsc.setNextReader(leaf);
     }
@@ -166,7 +178,9 @@
     Arrays.fill(subScorers, null);
     findScorers(scorer);
     hitCollector.setScorer(scorer);
-    drillDownCollector.setScorer(scorer);
+    if (drillDownCollector != null) {
+      drillDownCollector.setScorer(scorer);
+    }
     for(Collector dsc : drillSidewaysCollectors) {
       dsc.setScorer(scorer);
     }
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysScorer.java b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysScorer.java
index cf74879..5db28f2 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysScorer.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/search/DrillSidewaysScorer.java
@@ -63,8 +63,10 @@
     //}
     //System.out.println("score r=" + context.reader());
     collector.setScorer(this);
-    drillDownCollector.setScorer(this);
-    drillDownCollector.setNextReader(context);
+    if (drillDownCollector != null) {
+      drillDownCollector.setScorer(this);
+      drillDownCollector.setNextReader(context);
+    }
     for(DocsEnumsAndFreq dim : dims) {
       dim.sidewaysCollector.setScorer(this);
       dim.sidewaysCollector.setNextReader(context);
@@ -393,7 +395,9 @@
     //}
 
     collector.collect(collectDocID);
-    drillDownCollector.collect(collectDocID);
+    if (drillDownCollector != null) {
+      drillDownCollector.collect(collectDocID);
+    }
 
     // TODO: we could "fix" faceting of the sideways counts
     // to do this "union" (of the drill down hits) in the
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/search/FacetsAccumulator.java b/lucene/facet/src/java/org/apache/lucene/facet/search/FacetsAccumulator.java
index 907cbd9..ce0dd33 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/search/FacetsAccumulator.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/search/FacetsAccumulator.java
@@ -196,7 +196,7 @@
     return res;
   }
 
-  protected boolean requiresDocScores() {
+  public boolean requiresDocScores() {
     return getAggregator().requiresDocScores();
   }
 }
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeAccumulator.java b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeAccumulator.java
index 0bfab90..48738c2 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeAccumulator.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeAccumulator.java
@@ -17,7 +17,10 @@
  * limitations under the License.
  */
 
+import java.util.Collections;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.DoubleDocValuesField;
@@ -29,18 +32,30 @@
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.facet.FacetTestCase;
 import org.apache.lucene.facet.FacetTestUtils;
+import org.apache.lucene.facet.index.FacetFields;
 import org.apache.lucene.facet.params.FacetIndexingParams;
 import org.apache.lucene.facet.params.FacetSearchParams;
+import org.apache.lucene.facet.search.CountFacetRequest;
 import org.apache.lucene.facet.search.DrillDownQuery;
+import org.apache.lucene.facet.search.DrillSideways.DrillSidewaysResult;
+import org.apache.lucene.facet.search.DrillSideways;
+import org.apache.lucene.facet.search.FacetRequest;
 import org.apache.lucene.facet.search.FacetResult;
 import org.apache.lucene.facet.search.FacetResultNode;
+import org.apache.lucene.facet.search.FacetsAccumulator;
 import org.apache.lucene.facet.search.FacetsCollector;
+import org.apache.lucene.facet.taxonomy.CategoryPath;
+import org.apache.lucene.facet.taxonomy.TaxonomyReader;
+import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
+import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util._TestUtil;
 
 public class TestRangeAccumulator extends FacetTestCase {
@@ -81,6 +96,114 @@
     d.close();
   }
 
+  /** Tests single request that mixes Range and non-Range
+   *  faceting, with DrillSideways. */
+  public void testMixedRangeAndNonRange() throws Exception {
+    Directory d = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random(), d);
+    Directory td = newDirectory();
+    DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(td, IndexWriterConfig.OpenMode.CREATE);
+    FacetFields ff = new FacetFields(tw);
+
+    for(long l=0;l<100;l++) {
+      Document doc = new Document();
+      // For computing range facet counts:
+      doc.add(new NumericDocValuesField("field", l));
+      // For drill down by numeric range:
+      doc.add(new LongField("field", l, Field.Store.NO));
+
+      CategoryPath cp;
+      if ((l&3) == 0) {
+        cp = new CategoryPath("dim", "a");
+      } else {
+        cp = new CategoryPath("dim", "b");
+      }
+      ff.addFields(doc, Collections.singletonList(cp));
+      w.addDocument(doc);
+    }
+
+    IndexReader r = w.getReader();
+    w.close();
+
+    final TaxonomyReader tr = new DirectoryTaxonomyReader(tw);
+    tw.close();
+
+    IndexSearcher s = newSearcher(r);
+
+    final FacetSearchParams fsp = new FacetSearchParams(
+                                new CountFacetRequest(new CategoryPath("dim"), 2),
+                                new RangeFacetRequest<LongRange>("field",
+                                                      new LongRange("less than 10", 0L, true, 10L, false),
+                                                      new LongRange("less than or equal to 10", 0L, true, 10L, true),
+                                                      new LongRange("over 90", 90L, false, 100L, false),
+                                                      new LongRange("90 or above", 90L, true, 100L, false),
+                                                      new LongRange("over 1000", 1000L, false, Long.MAX_VALUE, false)));
+
+    final Set<String> dimSeen = new HashSet<String>();
+
+    DrillSideways ds = new DrillSideways(s, tr) {
+        @Override
+        protected FacetsAccumulator getDrillDownAccumulator(FacetSearchParams fsp) {
+          checkSeen(fsp);
+          return RangeFacetsAccumulatorWrapper.create(fsp, searcher.getIndexReader(), tr);
+        }
+
+        @Override
+        protected FacetsAccumulator getDrillSidewaysAccumulator(String dim, FacetSearchParams fsp) {
+          checkSeen(fsp);
+          return RangeFacetsAccumulatorWrapper.create(fsp, searcher.getIndexReader(), tr);
+        }
+
+        private void checkSeen(FacetSearchParams fsp) {
+          // Each dim should should up only once, across
+          // both drillDown and drillSideways requests:
+          for(FacetRequest fr : fsp.facetRequests) {
+            String dim = fr.categoryPath.components[0];
+            assertFalse("dim " + dim + " already seen", dimSeen.contains(dim));
+            dimSeen.add(dim);
+          }
+        }
+
+        @Override
+        protected boolean scoreSubDocsAtOnce() {
+          return random().nextBoolean();
+        }
+      };
+
+    // First search, no drill downs:
+    DrillDownQuery ddq = new DrillDownQuery(FacetIndexingParams.DEFAULT, new MatchAllDocsQuery());
+    DrillSidewaysResult dsr = ds.search(null, ddq, 10, fsp);
+
+    assertEquals(100, dsr.hits.totalHits);
+    assertEquals(2, dsr.facetResults.size());
+    assertEquals("dim (0)\n  b (75)\n  a (25)\n", FacetTestUtils.toSimpleString(dsr.facetResults.get(0)));
+    assertEquals("field (0)\n  less than 10 (10)\n  less than or equal to 10 (11)\n  over 90 (9)\n  90 or above (10)\n  over 1000 (0)\n", FacetTestUtils.toSimpleString(dsr.facetResults.get(1)));
+
+    // Second search, drill down on dim=b:
+    ddq = new DrillDownQuery(FacetIndexingParams.DEFAULT, new MatchAllDocsQuery());
+    ddq.add(new CategoryPath("dim", "b"));
+    dimSeen.clear();
+    dsr = ds.search(null, ddq, 10, fsp);
+
+    assertEquals(75, dsr.hits.totalHits);
+    assertEquals(2, dsr.facetResults.size());
+    assertEquals("dim (0)\n  b (75)\n  a (25)\n", FacetTestUtils.toSimpleString(dsr.facetResults.get(0)));
+    assertEquals("field (0)\n  less than 10 (7)\n  less than or equal to 10 (8)\n  over 90 (7)\n  90 or above (8)\n  over 1000 (0)\n", FacetTestUtils.toSimpleString(dsr.facetResults.get(1)));
+
+    // Third search, drill down on "less than or equal to 10":
+    ddq = new DrillDownQuery(FacetIndexingParams.DEFAULT, new MatchAllDocsQuery());
+    ddq.add("field", NumericRangeQuery.newLongRange("field", 0L, 10L, true, true));
+    dimSeen.clear();
+    dsr = ds.search(null, ddq, 10, fsp);
+
+    assertEquals(11, dsr.hits.totalHits);
+    assertEquals(2, dsr.facetResults.size());
+    assertEquals("dim (0)\n  b (8)\n  a (3)\n", FacetTestUtils.toSimpleString(dsr.facetResults.get(0)));
+    assertEquals("field (0)\n  less than 10 (10)\n  less than or equal to 10 (11)\n  over 90 (9)\n  90 or above (10)\n  over 1000 (0)\n", FacetTestUtils.toSimpleString(dsr.facetResults.get(1)));
+
+    IOUtils.close(tr, td, r, d);
+  }
+
   public void testBasicDouble() throws Exception {
     Directory d = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
diff --git a/lucene/licenses/httpclient-4.2.3.jar.sha1 b/lucene/licenses/httpclient-4.2.3.jar.sha1
new file mode 100644
index 0000000..b737191
--- /dev/null
+++ b/lucene/licenses/httpclient-4.2.3.jar.sha1
@@ -0,0 +1 @@
+37ced84d839a02fb856255eca85f0a4be95aa634
diff --git a/lucene/licenses/httpclient-LICENSE-ASL.txt b/lucene/licenses/httpclient-LICENSE-ASL.txt
new file mode 100644
index 0000000..2c41ec8
--- /dev/null
+++ b/lucene/licenses/httpclient-LICENSE-ASL.txt
@@ -0,0 +1,182 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+   
+This project contains annotations derived from JCIP-ANNOTATIONS
+Copyright (c) 2005 Brian Goetz and Tim Peierls.
+See http://www.jcip.net and the Creative Commons Attribution License 
+(http://creativecommons.org/licenses/by/2.5)
+
diff --git a/lucene/licenses/httpclient-NOTICE.txt b/lucene/licenses/httpclient-NOTICE.txt
new file mode 100644
index 0000000..3a98f45
--- /dev/null
+++ b/lucene/licenses/httpclient-NOTICE.txt
@@ -0,0 +1,8 @@
+Apache HttpComponents Client
+Copyright 1999-2011 The Apache Software Foundation
+
+This product includes software developed by
+The Apache Software Foundation (http://www.apache.org/).
+
+This project contains annotations derived from JCIP-ANNOTATIONS
+Copyright (c) 2005 Brian Goetz and Tim Peierls. See http://www.jcip.net
\ No newline at end of file
diff --git a/lucene/licenses/httpcore-4.2.2.jar.sha1 b/lucene/licenses/httpcore-4.2.2.jar.sha1
new file mode 100644
index 0000000..0bc6aaa
--- /dev/null
+++ b/lucene/licenses/httpcore-4.2.2.jar.sha1
@@ -0,0 +1 @@
+b76bee23cd3f3ee9b98bc7c2c14670e821ddbbfd
diff --git a/lucene/licenses/httpcore-LICENSE-ASL.txt b/lucene/licenses/httpcore-LICENSE-ASL.txt
new file mode 100644
index 0000000..2c41ec8
--- /dev/null
+++ b/lucene/licenses/httpcore-LICENSE-ASL.txt
@@ -0,0 +1,182 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+   
+This project contains annotations derived from JCIP-ANNOTATIONS
+Copyright (c) 2005 Brian Goetz and Tim Peierls.
+See http://www.jcip.net and the Creative Commons Attribution License 
+(http://creativecommons.org/licenses/by/2.5)
+
diff --git a/lucene/licenses/httpcore-NOTICE.txt b/lucene/licenses/httpcore-NOTICE.txt
new file mode 100644
index 0000000..3a98f45
--- /dev/null
+++ b/lucene/licenses/httpcore-NOTICE.txt
@@ -0,0 +1,8 @@
+Apache HttpComponents Client
+Copyright 1999-2011 The Apache Software Foundation
+
+This product includes software developed by
+The Apache Software Foundation (http://www.apache.org/).
+
+This project contains annotations derived from JCIP-ANNOTATIONS
+Copyright (c) 2005 Brian Goetz and Tim Peierls. See http://www.jcip.net
\ No newline at end of file
diff --git a/lucene/licenses/javax.servlet-api-LICENSE-CDDL.txt b/lucene/licenses/javax.servlet-api-LICENSE-CDDL.txt
new file mode 100644
index 0000000..a0ccc93
--- /dev/null
+++ b/lucene/licenses/javax.servlet-api-LICENSE-CDDL.txt
@@ -0,0 +1,263 @@
+COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
+
+1. Definitions.
+
+   1.1. Contributor. means each individual or entity that creates or contributes to the creation of Modifications.
+
+   1.2. Contributor Version. means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor.
+
+   1.3. Covered Software. means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof.
+
+   1.4. Executable. means the Covered Software in any form other than Source Code.
+
+   1.5. Initial Developer. means the individual or entity that first makes Original Software available under this License.
+
+   1.6. Larger Work. means a work which combines Covered Software or portions thereof with code not governed by the terms of this License.
+
+   1.7. License. means this document.
+
+   1.8. Licensable. means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein.
+
+   1.9. Modifications. means the Source Code and Executable form of any of the following:
+
+        A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications;
+
+        B. Any new file that contains any part of the Original Software or previous Modification; or
+
+        C. Any new file that is contributed or otherwise made available under the terms of this License.
+
+   1.10. Original Software. means the Source Code and Executable form of computer software code that is originally released under this License.
+
+   1.11. Patent Claims. means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor.
+
+   1.12. Source Code. means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code.
+
+   1.13. You. (or .Your.) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, .You. includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, .control. means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
+
+2. License Grants.
+
+      2.1. The Initial Developer Grant.
+
+      Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license:
+
+         (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and
+
+         (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof).
+
+        (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License.
+
+        (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices.
+
+    2.2. Contributor Grant.
+
+    Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
+
+        (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and
+
+        (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination).
+
+        (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party.
+
+        (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor.
+
+3. Distribution Obligations.
+
+      3.1. Availability of Source Code.
+      Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange.
+
+      3.2. Modifications.
+      The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License.
+
+      3.3. Required Notices.
+      You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer.
+
+      3.4. Application of Additional Terms.
+      You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients. rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer.
+
+      3.5. Distribution of Executable Versions.
+      You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient.s rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer.
+
+      3.6. Larger Works.
+      You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software.
+
+4. Versions of the License.
+
+      4.1. New Versions.
+      Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License.
+
+      4.2. Effect of New Versions.
+      You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward.
+
+      4.3. Modified Versions.
+      When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License.
+
+5. DISCLAIMER OF WARRANTY.
+
+   COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN .AS IS. BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+6. TERMINATION.
+
+      6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive.
+
+      6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as .Participant.) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant.
+
+      6.3. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination.
+
+7. LIMITATION OF LIABILITY.
+
+   UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY.S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+8. U.S. GOVERNMENT END USERS.
+
+   The Covered Software is a .commercial item,. as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of .commercial computer software. (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and .commercial computer software documentation. as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License.
+
+9. MISCELLANEOUS.
+
+   This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction.s conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys. fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software.
+
+10. RESPONSIBILITY FOR CLAIMS.
+
+   As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.
+
+   NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)
+
+   The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California.
+
+
+The GNU General Public License (GPL) Version 2, June 1991
+
+
+Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too.
+
+When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things.
+
+To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it.
+
+For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
+
+We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software.
+
+Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations.
+
+Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all.
+
+The precise terms and conditions for copying, distribution and modification follow.
+
+
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does.
+
+1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee.
+
+2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions:
+
+   a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change.
+
+   b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License.
+
+   c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.
+
+3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following:
+
+   a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
+
+   b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,
+
+   c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable.
+
+If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code.
+
+4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance.
+
+5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it.
+
+6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License.
+
+7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice.
+
+This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.
+
+8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License.
+
+9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation.
+
+10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally.
+
+NO WARRANTY
+
+11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+END OF TERMS AND CONDITIONS
+
+
+How to Apply These Terms to Your New Programs
+
+If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.
+
+To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found.
+
+   One line to give the program's name and a brief idea of what it does.
+
+   Copyright (C)
+
+   This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this when it starts in an interactive mode:
+
+   Gnomovision version 69, Copyright (C) year name of author
+   Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names:
+
+   Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+   signature of Ty Coon, 1 April 1989
+   Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License.
+
+
+"CLASSPATH" EXCEPTION TO THE GPL VERSION 2
+
+Certain source files distributed by Sun Microsystems, Inc. are subject to the following clarification and special exception to the GPL Version 2, but only where Sun has expressly included in the particular source file's header the words
+
+"Sun designates this particular file as subject to the "Classpath" exception as provided by Sun in the License file that accompanied this code."
+
+Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination.
+
+As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module.? An independent module is a module which is not derived from or based on this library.? If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so.? If you do not wish to do so, delete this exception statement from your version.
diff --git a/lucene/licenses/javax.servlet-api-NOTICE.txt b/lucene/licenses/javax.servlet-api-NOTICE.txt
new file mode 100644
index 0000000..6340ec9
--- /dev/null
+++ b/lucene/licenses/javax.servlet-api-NOTICE.txt
@@ -0,0 +1,2 @@
+Servlet-api.jar is under the CDDL license, the original source
+code for this can be found at http://www.eclipse.org/jetty/downloads.php
diff --git a/lucene/licenses/jcl-over-slf4j-1.6.6.jar.sha1 b/lucene/licenses/jcl-over-slf4j-1.6.6.jar.sha1
new file mode 100644
index 0000000..c2582b1
--- /dev/null
+++ b/lucene/licenses/jcl-over-slf4j-1.6.6.jar.sha1
@@ -0,0 +1 @@
+ec497945fdcaf7fd970ae9931b9bbfaf735d385e
diff --git a/lucene/licenses/jcl-over-slf4j-LICENSE-BSD_LIKE.txt b/lucene/licenses/jcl-over-slf4j-LICENSE-BSD_LIKE.txt
new file mode 100644
index 0000000..f5ecafa
--- /dev/null
+++ b/lucene/licenses/jcl-over-slf4j-LICENSE-BSD_LIKE.txt
@@ -0,0 +1,21 @@
+Copyright (c) 2004-2008 QOS.ch
+All rights reserved.
+
+Permission is hereby granted, free  of charge, to any person obtaining
+a  copy  of this  software  and  associated  documentation files  (the
+"Software"), to  deal in  the Software without  restriction, including
+without limitation  the rights to  use, copy, modify,  merge, publish,
+distribute,  sublicense, and/or sell  copies of  the Software,  and to
+permit persons to whom the Software  is furnished to do so, subject to
+the following conditions:
+
+The  above  copyright  notice  and  this permission  notice  shall  be
+included in all copies or substantial portions of the Software.
+
+THE  SOFTWARE IS  PROVIDED  "AS  IS", WITHOUT  WARRANTY  OF ANY  KIND,
+EXPRESS OR  IMPLIED, INCLUDING  BUT NOT LIMITED  TO THE  WARRANTIES OF
+MERCHANTABILITY,    FITNESS    FOR    A   PARTICULAR    PURPOSE    AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE,  ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lucene/licenses/jcl-over-slf4j-NOTICE.txt b/lucene/licenses/jcl-over-slf4j-NOTICE.txt
new file mode 100644
index 0000000..cf43894
--- /dev/null
+++ b/lucene/licenses/jcl-over-slf4j-NOTICE.txt
@@ -0,0 +1,25 @@
+=========================================================================
+==  SLF4J Notice -- http://www.slf4j.org/license.html                  ==
+=========================================================================
+
+Copyright (c) 2004-2008 QOS.ch
+All rights reserved.
+
+Permission is hereby granted, free  of charge, to any person obtaining
+a  copy  of this  software  and  associated  documentation files  (the
+"Software"), to  deal in  the Software without  restriction, including
+without limitation  the rights to  use, copy, modify,  merge, publish,
+distribute,  sublicense, and/or sell  copies of  the Software,  and to
+permit persons to whom the Software  is furnished to do so, subject to
+the following conditions:
+
+The  above  copyright  notice  and  this permission  notice  shall  be
+included in all copies or substantial portions of the Software.
+
+THE  SOFTWARE IS  PROVIDED  "AS  IS", WITHOUT  WARRANTY  OF ANY  KIND,
+EXPRESS OR  IMPLIED, INCLUDING  BUT NOT LIMITED  TO THE  WARRANTIES OF
+MERCHANTABILITY,    FITNESS    FOR    A   PARTICULAR    PURPOSE    AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE,  ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lucene/licenses/jetty-LICENSE-ASL.txt b/lucene/licenses/jetty-LICENSE-ASL.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/lucene/licenses/jetty-LICENSE-ASL.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/lucene/licenses/jetty-NOTICE.txt b/lucene/licenses/jetty-NOTICE.txt
new file mode 100644
index 0000000..e9461a8
--- /dev/null
+++ b/lucene/licenses/jetty-NOTICE.txt
@@ -0,0 +1,111 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
+<html><head>
+
+
+<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
+<title>Eclipse.org Software User Agreement</title>
+</head><body lang="EN-US" link="blue" vlink="purple">
+<h2>Eclipse Foundation Software User Agreement</h2>
+<p>March 17, 2005</p>
+
+<h3>Usage Of Content</h3>
+
+<p>THE ECLIPSE FOUNDATION MAKES AVAILABLE SOFTWARE, DOCUMENTATION, INFORMATION AND/OR OTHER MATERIALS FOR OPEN SOURCE PROJECTS
+   (COLLECTIVELY "CONTENT").  USE OF THE CONTENT IS GOVERNED BY THE TERMS AND CONDITIONS OF THIS AGREEMENT AND/OR THE TERMS AND
+   CONDITIONS OF LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW.  BY USING THE CONTENT, YOU AGREE THAT YOUR USE
+   OF THE CONTENT IS GOVERNED BY THIS AGREEMENT AND/OR THE TERMS AND CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR
+   NOTICES INDICATED OR REFERENCED BELOW.  IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT AND THE TERMS AND
+   CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW, THEN YOU MAY NOT USE THE CONTENT.</p>
+   
+<h3>Applicable Licenses</h3>   
+   
+<p>Unless otherwise indicated, all Content made available by the
+Eclipse Foundation is provided to you under the terms and conditions of
+the Eclipse Public License Version 1.0 ("EPL"). A copy of the EPL is
+provided with this Content and is also available at <a href="http://www.eclipse.org/legal/epl-v10.html">http://www.eclipse.org/legal/epl-v10.html</a>.
+   For purposes of the EPL, "Program" will mean the Content.</p>
+
+<p>Content includes, but is not limited to, source code, object code,
+documentation and other files maintained in the Eclipse.org CVS
+repository ("Repository") in CVS modules ("Modules") and made available
+as downloadable archives ("Downloads").</p>
+   
+<ul>
+	<li>Content may be structured and packaged into modules to
+facilitate delivering, extending, and upgrading the Content. Typical
+modules may include plug-ins ("Plug-ins"), plug-in fragments
+("Fragments"), and features ("Features").</li>
+	<li>Each Plug-in or Fragment may be packaged as a sub-directory or JAR (Java&#8482; ARchive) in a directory named "plugins".</li>
+	<li>A
+Feature is a bundle of one or more Plug-ins and/or Fragments and
+associated material. Each Feature may be packaged as a sub-directory in
+a directory named "features". Within a Feature, files named
+"feature.xml" may contain a list of the names and version numbers of
+the Plug-ins and/or Fragments associated with that Feature.</li>
+	<li>Features
+may also include other Features ("Included Features"). Within a
+Feature, files named "feature.xml" may contain a list of the names and
+version numbers of Included Features.</li>
+</ul>   
+ 
+<p>The terms and conditions governing Plug-ins and Fragments should be
+contained in files named "about.html" ("Abouts"). The terms and
+conditions governing Features and
+Included Features should be contained in files named "license.html"
+("Feature Licenses"). Abouts and Feature Licenses may be located in any
+directory of a Download or Module
+including, but not limited to the following locations:</p>
+
+<ul>
+	<li>The top-level (root) directory</li>
+	<li>Plug-in and Fragment directories</li>
+	<li>Inside Plug-ins and Fragments packaged as JARs</li>
+	<li>Sub-directories of the directory named "src" of certain Plug-ins</li>
+	<li>Feature directories</li>
+</ul>
+		
+<p>Note: if a Feature made available by the Eclipse Foundation is
+installed using the Eclipse Update Manager, you must agree to a license
+("Feature Update License") during the
+installation process. If the Feature contains Included Features, the
+Feature Update License should either provide you with the terms and
+conditions governing the Included Features or
+inform you where you can locate them. Feature Update Licenses may be
+found in the "license" property of files named "feature.properties"
+found within a Feature.
+Such Abouts, Feature Licenses, and Feature Update Licenses contain the
+terms and conditions (or references to such terms and conditions) that
+govern your use of the associated Content in
+that directory.</p>
+
+<p>THE ABOUTS, FEATURE LICENSES, AND FEATURE UPDATE LICENSES MAY REFER
+TO THE EPL OR OTHER LICENSE AGREEMENTS, NOTICES OR TERMS AND
+CONDITIONS. SOME OF THESE
+OTHER LICENSE AGREEMENTS MAY INCLUDE (BUT ARE NOT LIMITED TO):</p>
+
+<ul>
+	<li>Common Public License Version 1.0 (available at <a href="http://www.eclipse.org/legal/cpl-v10.html">http://www.eclipse.org/legal/cpl-v10.html</a>)</li>
+	<li>Apache Software License 1.1 (available at <a href="http://www.apache.org/licenses/LICENSE">http://www.apache.org/licenses/LICENSE</a>)</li>
+	<li>Apache Software License 2.0 (available at <a href="http://www.apache.org/licenses/LICENSE-2.0">http://www.apache.org/licenses/LICENSE-2.0</a>)</li>
+	<li>IBM Public License 1.0 (available at <a href="http://oss.software.ibm.com/developerworks/opensource/license10.html">http://oss.software.ibm.com/developerworks/opensource/license10.html</a>)</li>	
+	<li>Metro Link Public License 1.00 (available at <a href="http://www.opengroup.org/openmotif/supporters/metrolink/license.html">http://www.opengroup.org/openmotif/supporters/metrolink/license.html</a>)</li>
+	<li>Mozilla Public License Version 1.1 (available at <a href="http://www.mozilla.org/MPL/MPL-1.1.html">http://www.mozilla.org/MPL/MPL-1.1.html</a>)</li>
+</ul>
+
+<p>IT IS YOUR OBLIGATION TO READ AND ACCEPT ALL SUCH TERMS AND
+CONDITIONS PRIOR TO USE OF THE CONTENT. If no About, Feature License,
+or Feature Update License is provided, please
+contact the Eclipse Foundation to determine what terms and conditions
+govern that particular Content.</p>
+
+<h3>Cryptography</h3>
+
+<p>Content may contain encryption software. The country in which you
+are currently may have restrictions on the import, possession, and use,
+and/or re-export to another country, of encryption software. BEFORE
+using any encryption software, please check the country's laws,
+regulations and policies concerning the import, possession, or use, and
+re-export of encryption software, to see if this is permitted.</p>
+   
+<small>Java and all Java-based trademarks are trademarks of Sun Microsystems, Inc. in the United States, other countries, or both.</small>   
+</body></html>
\ No newline at end of file
diff --git a/lucene/licenses/jetty-continuation-8.1.10.v20130312.jar.sha1 b/lucene/licenses/jetty-continuation-8.1.10.v20130312.jar.sha1
new file mode 100644
index 0000000..40d32c2
--- /dev/null
+++ b/lucene/licenses/jetty-continuation-8.1.10.v20130312.jar.sha1
@@ -0,0 +1 @@
+c0e26574ddcac7a86486f19a8b3782657acfd961
diff --git a/lucene/licenses/jetty-http-8.1.10.v20130312.jar.sha1 b/lucene/licenses/jetty-http-8.1.10.v20130312.jar.sha1
new file mode 100644
index 0000000..7cc9164
--- /dev/null
+++ b/lucene/licenses/jetty-http-8.1.10.v20130312.jar.sha1
@@ -0,0 +1 @@
+d9eb53007e04d6338f12f3ded60fad1f7bfcb40e
diff --git a/lucene/licenses/jetty-io-8.1.10.v20130312.jar.sha1 b/lucene/licenses/jetty-io-8.1.10.v20130312.jar.sha1
new file mode 100644
index 0000000..2ba8e66
--- /dev/null
+++ b/lucene/licenses/jetty-io-8.1.10.v20130312.jar.sha1
@@ -0,0 +1 @@
+e829c768f2b9de5d9fae3bc0aba3996bd0344f56
diff --git a/lucene/licenses/jetty-server-8.1.10.v20130312.jar.sha1 b/lucene/licenses/jetty-server-8.1.10.v20130312.jar.sha1
new file mode 100644
index 0000000..2674565
--- /dev/null
+++ b/lucene/licenses/jetty-server-8.1.10.v20130312.jar.sha1
@@ -0,0 +1 @@
+13ca9587bc1645f8fac89454b15252a2ad5bdcf5
diff --git a/lucene/licenses/jetty-servlet-8.1.10.v20130312.jar.sha1 b/lucene/licenses/jetty-servlet-8.1.10.v20130312.jar.sha1
new file mode 100644
index 0000000..ac4faaa
--- /dev/null
+++ b/lucene/licenses/jetty-servlet-8.1.10.v20130312.jar.sha1
@@ -0,0 +1 @@
+98f8029fe7236e9c66381c04f292b5319f47ca84
diff --git a/lucene/licenses/jetty-util-8.1.10.v20130312.jar.sha1 b/lucene/licenses/jetty-util-8.1.10.v20130312.jar.sha1
new file mode 100644
index 0000000..cff356d
--- /dev/null
+++ b/lucene/licenses/jetty-util-8.1.10.v20130312.jar.sha1
@@ -0,0 +1 @@
+d198a8ad8ea20b4fb74c781175c48500ec2b8b7a
diff --git a/lucene/licenses/junit4-ant-2.0.10.jar.sha1 b/lucene/licenses/junit4-ant-2.0.10.jar.sha1
new file mode 100644
index 0000000..d63d8dd
--- /dev/null
+++ b/lucene/licenses/junit4-ant-2.0.10.jar.sha1
@@ -0,0 +1 @@
+ca55927404cf0a1a0e078d988222c4feb9dfc01c
diff --git a/lucene/licenses/junit4-ant-2.0.9.jar.sha1 b/lucene/licenses/junit4-ant-2.0.9.jar.sha1
deleted file mode 100644
index 8d50518..0000000
--- a/lucene/licenses/junit4-ant-2.0.9.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-bba707f4b0933f782dd456c262dc36f4bac01f45
diff --git a/lucene/licenses/randomizedtesting-runner-2.0.10.jar.sha1 b/lucene/licenses/randomizedtesting-runner-2.0.10.jar.sha1
new file mode 100644
index 0000000..fb7355b
--- /dev/null
+++ b/lucene/licenses/randomizedtesting-runner-2.0.10.jar.sha1
@@ -0,0 +1 @@
+00befdff5ccc24797b46a68819524f42b570e745
diff --git a/lucene/licenses/randomizedtesting-runner-2.0.9.jar.sha1 b/lucene/licenses/randomizedtesting-runner-2.0.9.jar.sha1
deleted file mode 100644
index 22d5067..0000000
--- a/lucene/licenses/randomizedtesting-runner-2.0.9.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-4b8e918d278f56a18a6044660215290995889bfa
diff --git a/lucene/licenses/servlet-api-3.0.jar.sha1 b/lucene/licenses/servlet-api-3.0.jar.sha1
new file mode 100644
index 0000000..749a2c2
--- /dev/null
+++ b/lucene/licenses/servlet-api-3.0.jar.sha1
@@ -0,0 +1 @@
+0aaaa85845fb5c59da00193f06b8e5278d8bf3f8
diff --git a/lucene/licenses/slf4j-LICENSE-BSD_LIKE.txt b/lucene/licenses/slf4j-LICENSE-BSD_LIKE.txt
new file mode 100644
index 0000000..f5ecafa
--- /dev/null
+++ b/lucene/licenses/slf4j-LICENSE-BSD_LIKE.txt
@@ -0,0 +1,21 @@
+Copyright (c) 2004-2008 QOS.ch
+All rights reserved.
+
+Permission is hereby granted, free  of charge, to any person obtaining
+a  copy  of this  software  and  associated  documentation files  (the
+"Software"), to  deal in  the Software without  restriction, including
+without limitation  the rights to  use, copy, modify,  merge, publish,
+distribute,  sublicense, and/or sell  copies of  the Software,  and to
+permit persons to whom the Software  is furnished to do so, subject to
+the following conditions:
+
+The  above  copyright  notice  and  this permission  notice  shall  be
+included in all copies or substantial portions of the Software.
+
+THE  SOFTWARE IS  PROVIDED  "AS  IS", WITHOUT  WARRANTY  OF ANY  KIND,
+EXPRESS OR  IMPLIED, INCLUDING  BUT NOT LIMITED  TO THE  WARRANTIES OF
+MERCHANTABILITY,    FITNESS    FOR    A   PARTICULAR    PURPOSE    AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE,  ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lucene/licenses/slf4j-NOTICE.txt b/lucene/licenses/slf4j-NOTICE.txt
new file mode 100644
index 0000000..cf43894
--- /dev/null
+++ b/lucene/licenses/slf4j-NOTICE.txt
@@ -0,0 +1,25 @@
+=========================================================================
+==  SLF4J Notice -- http://www.slf4j.org/license.html                  ==
+=========================================================================
+
+Copyright (c) 2004-2008 QOS.ch
+All rights reserved.
+
+Permission is hereby granted, free  of charge, to any person obtaining
+a  copy  of this  software  and  associated  documentation files  (the
+"Software"), to  deal in  the Software without  restriction, including
+without limitation  the rights to  use, copy, modify,  merge, publish,
+distribute,  sublicense, and/or sell  copies of  the Software,  and to
+permit persons to whom the Software  is furnished to do so, subject to
+the following conditions:
+
+The  above  copyright  notice  and  this permission  notice  shall  be
+included in all copies or substantial portions of the Software.
+
+THE  SOFTWARE IS  PROVIDED  "AS  IS", WITHOUT  WARRANTY  OF ANY  KIND,
+EXPRESS OR  IMPLIED, INCLUDING  BUT NOT LIMITED  TO THE  WARRANTIES OF
+MERCHANTABILITY,    FITNESS    FOR    A   PARTICULAR    PURPOSE    AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE,  ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lucene/licenses/slf4j-api-1.6.6.jar.sha1 b/lucene/licenses/slf4j-api-1.6.6.jar.sha1
new file mode 100644
index 0000000..e2e47d0
--- /dev/null
+++ b/lucene/licenses/slf4j-api-1.6.6.jar.sha1
@@ -0,0 +1 @@
+ce53b0a0e2cfbb27e8a59d38f79a18a5c6a8d2b0
diff --git a/lucene/module-build.xml b/lucene/module-build.xml
index a35c0b2..572de31 100644
--- a/lucene/module-build.xml
+++ b/lucene/module-build.xml
@@ -220,7 +220,29 @@
     </ant>
     <property name="facet-javadocs.uptodate" value="true"/>
   </target>
- 
+
+  <property name="replicator.jar" value="${common.dir}/build/replicator/lucene-replicator-${version}.jar"/>
+  <target name="check-replicator-uptodate" unless="replicator.uptodate">
+    <module-uptodate name="replicator" jarfile="${replicator.jar}" property="replicator.uptodate"/>
+  </target>
+  <target name="jar-replicator" unless="replicator.uptodate" depends="check-replicator-uptodate">
+    <ant dir="${common.dir}/replicator" target="jar-core" inheritall="false">
+      <propertyset refid="uptodate.and.compiled.properties"/>
+    </ant>
+    <property name="replicator.uptodate" value="true"/>
+  </target>
+
+  <property name="replicator-javadoc.jar" value="${common.dir}/build/replicator/lucene-replicator-${version}-javadoc.jar"/>
+  <target name="check-replicator-javadocs-uptodate" unless="replicator-javadocs.uptodate">
+    <module-uptodate name="replicator" jarfile="${replicator-javadoc.jar}" property="replicator-javadocs.uptodate"/>
+  </target>
+  <target name="javadocs-replicator" unless="replicator-javadocs.uptodate" depends="check-replicator-javadocs-uptodate">
+    <ant dir="${common.dir}/replicator" target="javadocs" inheritAll="false">
+      <propertyset refid="uptodate.and.compiled.properties"/>
+    </ant>
+    <property name="replicator-javadocs.uptodate" value="true"/>
+  </target>
+
   <property name="analyzers-icu.jar" value="${common.dir}/build/analysis/icu/lucene-analyzers-icu-${version}.jar"/>
   <target name="check-analyzers-icu-uptodate" unless="analyzers-icu.uptodate">
     <module-uptodate name="analysis/icu" jarfile="${analyzers-icu.jar}" property="analyzers-icu.uptodate"/>
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java
index 9dc34b6..b1b9273 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java
@@ -572,24 +572,53 @@
       if (severalTokensAtSamePosition || (!quoted && !autoGeneratePhraseQueries)) {
         if (positionCount == 1 || (!quoted && !autoGeneratePhraseQueries)) {
           // no phrase query:
-          BooleanQuery q = newBooleanQuery(positionCount == 1);
-
-          BooleanClause.Occur occur = positionCount > 1 && operator == AND_OPERATOR ?
-            BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD;
-
-          for (int i = 0; i < numTokens; i++) {
-            try {
-              boolean hasNext = buffer.incrementToken();
-              assert hasNext == true;
-              termAtt.fillBytesRef();
-            } catch (IOException e) {
-              // safe to ignore, because we know the number of tokens
+          
+          if (positionCount == 1) {
+            // simple case: only one position, with synonyms
+            BooleanQuery q = newBooleanQuery(true);
+            for (int i = 0; i < numTokens; i++) {
+              try {
+                boolean hasNext = buffer.incrementToken();
+                assert hasNext == true;
+                termAtt.fillBytesRef();
+              } catch (IOException e) {
+                // safe to ignore, because we know the number of tokens
+              }
+              Query currentQuery = newTermQuery(
+                  new Term(field, BytesRef.deepCopyOf(bytes)));
+              q.add(currentQuery, BooleanClause.Occur.SHOULD);
             }
-            Query currentQuery = newTermQuery(
-                new Term(field, BytesRef.deepCopyOf(bytes)));
+            return q;
+          } else {
+            // multiple positions
+            BooleanQuery q = newBooleanQuery(false);
+            final BooleanClause.Occur occur = operator == Operator.AND ? BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD;
+            Query currentQuery = null;
+            for (int i = 0; i < numTokens; i++) {
+              try {
+                boolean hasNext = buffer.incrementToken();
+                assert hasNext == true;
+                termAtt.fillBytesRef();
+              } catch (IOException e) {
+                // safe to ignore, because we know the number of tokens
+              }
+              if (posIncrAtt != null && posIncrAtt.getPositionIncrement() == 0) {
+                if (!(currentQuery instanceof BooleanQuery)) {
+                  Query t = currentQuery;
+                  currentQuery = newBooleanQuery(true);
+                  ((BooleanQuery)currentQuery).add(t, BooleanClause.Occur.SHOULD);
+                }
+                ((BooleanQuery)currentQuery).add(newTermQuery(new Term(field, BytesRef.deepCopyOf(bytes))), BooleanClause.Occur.SHOULD);
+              } else {
+                if (currentQuery != null) {
+                  q.add(currentQuery, occur);
+                }
+                currentQuery = newTermQuery(new Term(field, BytesRef.deepCopyOf(bytes)));
+              }
+            }
             q.add(currentQuery, occur);
+            return q;
           }
-          return q;
         }
         else {
           // phrase query:
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java
index cbd0496..9f26048 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java
@@ -17,9 +17,17 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.io.Reader;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.DateTools.Resolution;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.queryparser.classic.QueryParser.Operator;
@@ -27,6 +35,7 @@
 import org.apache.lucene.queryparser.util.QueryParserTestBase;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.MultiPhraseQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermQuery;
 
@@ -307,4 +316,178 @@
     assertEquals(unexpanded, smart.parse("\"dogs\""));
   }
   
+  // TODO: fold these into QueryParserTestBase
+  
+  /** adds synonym of "dog" for "dogs". */
+  static class MockSynonymAnalyzer extends Analyzer {
+    @Override
+    protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+      MockTokenizer tokenizer = new MockTokenizer(reader);
+      return new TokenStreamComponents(tokenizer, new MockSynonymFilter(tokenizer));
+    }
+  }
+  
+  /** simple synonyms test */
+  public void testSynonyms() throws Exception {
+    BooleanQuery expected = new BooleanQuery(true);
+    expected.add(new TermQuery(new Term("field", "dogs")), BooleanClause.Occur.SHOULD);
+    expected.add(new TermQuery(new Term("field", "dog")), BooleanClause.Occur.SHOULD);
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockSynonymAnalyzer());
+    assertEquals(expected, qp.parse("dogs"));
+    assertEquals(expected, qp.parse("\"dogs\""));
+    qp.setDefaultOperator(Operator.AND);
+    assertEquals(expected, qp.parse("dogs"));
+    assertEquals(expected, qp.parse("\"dogs\""));
+    expected.setBoost(2.0f);
+    assertEquals(expected, qp.parse("dogs^2"));
+    assertEquals(expected, qp.parse("\"dogs\"^2"));
+  }
+  
+  /** forms multiphrase query */
+  public void testSynonymsPhrase() throws Exception {
+    MultiPhraseQuery expected = new MultiPhraseQuery();
+    expected.add(new Term("field", "old"));
+    expected.add(new Term[] { new Term("field", "dogs"), new Term("field", "dog") });
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockSynonymAnalyzer());
+    assertEquals(expected, qp.parse("\"old dogs\""));
+    qp.setDefaultOperator(Operator.AND);
+    assertEquals(expected, qp.parse("\"old dogs\""));
+    expected.setBoost(2.0f);
+    assertEquals(expected, qp.parse("\"old dogs\"^2"));
+    expected.setSlop(3);
+    assertEquals(expected, qp.parse("\"old dogs\"~3^2"));
+  }
+  
+  /**
+   * adds synonym of "國" for "国".
+   */
+  protected static class MockCJKSynonymFilter extends TokenFilter {
+    CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+    PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
+    boolean addSynonym = false;
+    
+    public MockCJKSynonymFilter(TokenStream input) {
+      super(input);
+    }
+
+    @Override
+    public final boolean incrementToken() throws IOException {
+      if (addSynonym) { // inject our synonym
+        clearAttributes();
+        termAtt.setEmpty().append("國");
+        posIncAtt.setPositionIncrement(0);
+        addSynonym = false;
+        return true;
+      }
+      
+      if (input.incrementToken()) {
+        addSynonym = termAtt.toString().equals("国");
+        return true;
+      } else {
+        return false;
+      }
+    } 
+  }
+  
+  static class MockCJKSynonymAnalyzer extends Analyzer {
+    @Override
+    protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+      Tokenizer tokenizer = new SimpleCJKTokenizer(reader);
+      return new TokenStreamComponents(tokenizer, new MockCJKSynonymFilter(tokenizer));
+    }
+  }
+  
+  /** simple CJK synonym test */
+  public void testCJKSynonym() throws Exception {
+    BooleanQuery expected = new BooleanQuery(true);
+    expected.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD);
+    expected.add(new TermQuery(new Term("field", "國")), BooleanClause.Occur.SHOULD);
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockCJKSynonymAnalyzer());
+    assertEquals(expected, qp.parse("国"));
+    qp.setDefaultOperator(Operator.AND);
+    assertEquals(expected, qp.parse("国"));
+    expected.setBoost(2.0f);
+    assertEquals(expected, qp.parse("国^2"));
+  }
+  
+  /** synonyms with default OR operator */
+  public void testCJKSynonymsOR() throws Exception {
+    BooleanQuery expected = new BooleanQuery();
+    expected.add(new TermQuery(new Term("field", "中")), BooleanClause.Occur.SHOULD);
+    BooleanQuery inner = new BooleanQuery(true);
+    inner.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD);
+    inner.add(new TermQuery(new Term("field", "國")), BooleanClause.Occur.SHOULD);
+    expected.add(inner, BooleanClause.Occur.SHOULD);
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockCJKSynonymAnalyzer());
+    assertEquals(expected, qp.parse("中国"));
+    expected.setBoost(2.0f);
+    assertEquals(expected, qp.parse("中国^2"));
+  }
+  
+  /** more complex synonyms with default OR operator */
+  public void testCJKSynonymsOR2() throws Exception {
+    BooleanQuery expected = new BooleanQuery();
+    expected.add(new TermQuery(new Term("field", "中")), BooleanClause.Occur.SHOULD);
+    BooleanQuery inner = new BooleanQuery(true);
+    inner.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD);
+    inner.add(new TermQuery(new Term("field", "國")), BooleanClause.Occur.SHOULD);
+    expected.add(inner, BooleanClause.Occur.SHOULD);
+    BooleanQuery inner2 = new BooleanQuery(true);
+    inner2.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD);
+    inner2.add(new TermQuery(new Term("field", "國")), BooleanClause.Occur.SHOULD);
+    expected.add(inner2, BooleanClause.Occur.SHOULD);
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockCJKSynonymAnalyzer());
+    assertEquals(expected, qp.parse("中国国"));
+    expected.setBoost(2.0f);
+    assertEquals(expected, qp.parse("中国国^2"));
+  }
+  
+  /** synonyms with default AND operator */
+  public void testCJKSynonymsAND() throws Exception {
+    BooleanQuery expected = new BooleanQuery();
+    expected.add(new TermQuery(new Term("field", "中")), BooleanClause.Occur.MUST);
+    BooleanQuery inner = new BooleanQuery(true);
+    inner.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD);
+    inner.add(new TermQuery(new Term("field", "國")), BooleanClause.Occur.SHOULD);
+    expected.add(inner, BooleanClause.Occur.MUST);
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockCJKSynonymAnalyzer());
+    qp.setDefaultOperator(Operator.AND);
+    assertEquals(expected, qp.parse("中国"));
+    expected.setBoost(2.0f);
+    assertEquals(expected, qp.parse("中国^2"));
+  }
+  
+  /** more complex synonyms with default AND operator */
+  public void testCJKSynonymsAND2() throws Exception {
+    BooleanQuery expected = new BooleanQuery();
+    expected.add(new TermQuery(new Term("field", "中")), BooleanClause.Occur.MUST);
+    BooleanQuery inner = new BooleanQuery(true);
+    inner.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD);
+    inner.add(new TermQuery(new Term("field", "國")), BooleanClause.Occur.SHOULD);
+    expected.add(inner, BooleanClause.Occur.MUST);
+    BooleanQuery inner2 = new BooleanQuery(true);
+    inner2.add(new TermQuery(new Term("field", "国")), BooleanClause.Occur.SHOULD);
+    inner2.add(new TermQuery(new Term("field", "國")), BooleanClause.Occur.SHOULD);
+    expected.add(inner2, BooleanClause.Occur.MUST);
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockCJKSynonymAnalyzer());
+    qp.setDefaultOperator(Operator.AND);
+    assertEquals(expected, qp.parse("中国国"));
+    expected.setBoost(2.0f);
+    assertEquals(expected, qp.parse("中国国^2"));
+  }
+  
+  /** forms multiphrase query */
+  public void testCJKSynonymsPhrase() throws Exception {
+    MultiPhraseQuery expected = new MultiPhraseQuery();
+    expected.add(new Term("field", "中"));
+    expected.add(new Term[] { new Term("field", "国"), new Term("field", "國")});
+    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockCJKSynonymAnalyzer());
+    qp.setDefaultOperator(Operator.AND);
+    assertEquals(expected, qp.parse("\"中国\""));
+    expected.setBoost(2.0f);
+    assertEquals(expected, qp.parse("\"中国\"^2"));
+    expected.setSlop(3);
+    assertEquals(expected, qp.parse("\"中国\"~3^2"));
+  }
+  
 }
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java
index 708827c..d6de35a 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java
@@ -236,7 +236,7 @@
   }
 
   //individual CJK chars as terms, like StandardAnalyzer
-  private class SimpleCJKTokenizer extends Tokenizer {
+  protected static class SimpleCJKTokenizer extends Tokenizer {
     private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
 
     public SimpleCJKTokenizer(Reader input) {
@@ -244,7 +244,7 @@
     }
 
     @Override
-    public boolean incrementToken() throws IOException {
+    public final boolean incrementToken() throws IOException {
       int ch = input.read();
       if (ch < 0)
         return false;
@@ -1088,7 +1088,7 @@
   /**
    * adds synonym of "dog" for "dogs".
    */
-  private class MockSynonymFilter extends TokenFilter {
+  protected static class MockSynonymFilter extends TokenFilter {
     CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
     PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
     boolean addSynonym = false;
diff --git a/lucene/replicator/build.xml b/lucene/replicator/build.xml
new file mode 100644
index 0000000..3786902
--- /dev/null
+++ b/lucene/replicator/build.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+<project name="replicator" default="default" xmlns:ivy="antlib:org.apache.ivy.ant">
+
+  <description>
+    Files replication utility
+  </description>
+
+  <import file="../module-build.xml"/>
+
+  <path id="classpath">
+  	<fileset dir="lib" />
+    <pathelement path="${facet.jar}"/>
+    <path refid="base.classpath"/>
+  </path>
+
+	<target name="resolve" depends="common.resolve">
+		<sequential>
+	    <!-- servlet-api.jar -->
+	    <ivy:retrieve conf="servlet" log="download-only" type="orbit" pattern="lib/servlet-api-3.0.jar"/>
+		</sequential>
+	</target>
+
+  <target name="init" depends="module-build.init,jar-facet"/>
+
+  <target name="javadocs" depends="javadocs-facet,compile-core">
+    <invoke-module-javadoc>
+      <links>
+        <link href="../facet"/>
+      </links>
+    </invoke-module-javadoc>
+  </target>
+
+</project>
diff --git a/lucene/replicator/ivy.xml b/lucene/replicator/ivy.xml
new file mode 100644
index 0000000..fe3bd34
--- /dev/null
+++ b/lucene/replicator/ivy.xml
@@ -0,0 +1,50 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one
+   or more contributor license agreements.  See the NOTICE file
+   distributed with this work for additional information
+   regarding copyright ownership.  The ASF licenses this file
+   to you under the Apache License, Version 2.0 (the
+   "License"); you may not use this file except in compliance
+   with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing,
+   software distributed under the License is distributed on an
+   "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+   KIND, either express or implied.  See the License for the
+   specific language governing permissions and limitations
+   under the License.    
+-->
+<!DOCTYPE ivy-module [
+  <!ENTITY jetty.version "8.1.10.v20130312">
+]>
+<ivy-module version="2.0">
+  <info organisation="org.apache.lucene" module="replicator"/>
+
+  <configurations>
+  	<conf name="http" description="httpclient jars"/>
+    <conf name="jetty" description="jetty jars"/>
+    <conf name="start" description="jetty start jar"/>
+    <conf name="servlet" description="servlet-api jar"/>
+    <conf name="logging" description="logging setup"/>
+  </configurations>
+
+  <dependencies>
+    <dependency org="org.apache.httpcomponents" name="httpclient" rev="4.2.3" transitive="false" conf="http->default"/>
+    <dependency org="org.apache.httpcomponents" name="httpcore" rev="4.2.2" transitive="false" conf="http->default"/>
+    <dependency org="org.eclipse.jetty" name="jetty-server" rev="&jetty.version;" transitive="false" conf="jetty->default"/>
+    <dependency org="org.eclipse.jetty" name="jetty-servlet" rev="&jetty.version;" transitive="false" conf="jetty->default"/>
+    <dependency org="org.eclipse.jetty" name="jetty-util" rev="&jetty.version;" transitive="false" conf="jetty->default"/>
+    <dependency org="org.eclipse.jetty" name="jetty-io" rev="&jetty.version;" transitive="false" conf="jetty->default"/>
+    <dependency org="org.eclipse.jetty" name="jetty-continuation" rev="&jetty.version;" transitive="false" conf="jetty->default"/>
+    <dependency org="org.eclipse.jetty" name="jetty-http" rev="&jetty.version;" transitive="false" conf="jetty->default"/>
+    <dependency org="org.slf4j" name="slf4j-api" rev="1.6.6" transitive="false" conf="logging->default"/>
+    <dependency org="org.slf4j" name="jcl-over-slf4j" rev="1.6.6" transitive="false" conf="logging->default"/>
+    <dependency org="org.eclipse.jetty.orbit" name="javax.servlet" rev="3.0.0.v201112011016" transitive="false" conf="servlet->default">
+      <artifact name="javax.servlet" type="orbit" ext="jar"/>
+    </dependency>
+    <exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/>
+  </dependencies>
+
+</ivy-module>
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java
new file mode 100755
index 0000000..83aba75
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java
@@ -0,0 +1,191 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Callable;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexCommit;
+import org.apache.lucene.replicator.ReplicationClient.ReplicationHandler;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.util.InfoStream;
+
+/**
+ * A {@link ReplicationHandler} for replication of an index and taxonomy pair.
+ * See {@link IndexReplicationHandler} for more detail. This handler ensures
+ * that the search and taxonomy indexes are replicated in a consistent way.
+ * <p>
+ * <b>NOTE:</b> if you intend to recreate a taxonomy index, you should make sure
+ * to reopen an IndexSearcher and TaxonomyReader pair via the provided callback,
+ * to guarantee that both indexes are in sync. This handler does not prevent
+ * replicating such index and taxonomy pairs, and if they are reopened by a
+ * different thread, unexpected errors can occur, as well as inconsistency
+ * between the taxonomy and index readers.
+ * 
+ * @see IndexReplicationHandler
+ * 
+ * @lucene.experimental
+ */
+public class IndexAndTaxonomyReplicationHandler implements ReplicationHandler {
+  
+  /**
+   * The component used to log messages to the {@link InfoStream#getDefault()
+   * default} {@link InfoStream}.
+   */
+  public static final String INFO_STREAM_COMPONENT = "IndexAndTaxonomyReplicationHandler";
+
+  private final Directory indexDir;
+  private final Directory taxoDir;
+  private final Callable<Boolean> callback;
+  
+  private volatile Map<String,List<RevisionFile>> currentRevisionFiles;
+  private volatile String currentVersion;
+  private volatile InfoStream infoStream = InfoStream.getDefault();
+
+  /**
+   * Constructor with the given index directory and callback to notify when the
+   * indexes were updated.
+   */
+  public IndexAndTaxonomyReplicationHandler(Directory indexDir, Directory taxoDir, Callable<Boolean> callback)
+      throws IOException {
+    this.callback = callback;
+    this.indexDir = indexDir;
+    this.taxoDir = taxoDir;
+    currentRevisionFiles = null;
+    currentVersion = null;
+    final boolean indexExists = DirectoryReader.indexExists(indexDir);
+    final boolean taxoExists = DirectoryReader.indexExists(taxoDir);
+    if (indexExists != taxoExists) {
+      throw new IllegalStateException("search and taxonomy indexes must either both exist or not: index=" + indexExists
+          + " taxo=" + taxoExists);
+    }
+    if (indexExists) { // both indexes exist
+      final IndexCommit indexCommit = IndexReplicationHandler.getLastCommit(indexDir);
+      final IndexCommit taxoCommit = IndexReplicationHandler.getLastCommit(taxoDir);
+      currentRevisionFiles = IndexAndTaxonomyRevision.revisionFiles(indexCommit, taxoCommit);
+      currentVersion = IndexAndTaxonomyRevision.revisionVersion(indexCommit, taxoCommit);
+      final InfoStream infoStream = InfoStream.getDefault();
+      if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) {
+        infoStream.message(INFO_STREAM_COMPONENT, "constructor(): currentVersion=" + currentVersion
+            + " currentRevisionFiles=" + currentRevisionFiles);
+        infoStream.message(INFO_STREAM_COMPONENT, "constructor(): indexCommit=" + indexCommit
+            + " taxoCommit=" + taxoCommit);
+      }
+    }
+  }
+  
+  @Override
+  public String currentVersion() {
+    return currentVersion;
+  }
+  
+  @Override
+  public Map<String,List<RevisionFile>> currentRevisionFiles() {
+    return currentRevisionFiles;
+  }
+  
+  @Override
+  public void revisionReady(String version, Map<String,List<RevisionFile>> revisionFiles,
+      Map<String,List<String>> copiedFiles, Map<String,Directory> sourceDirectory) throws IOException {
+    Directory taxoClientDir = sourceDirectory.get(IndexAndTaxonomyRevision.TAXONOMY_SOURCE);
+    Directory indexClientDir = sourceDirectory.get(IndexAndTaxonomyRevision.INDEX_SOURCE);
+    List<String> taxoFiles = copiedFiles.get(IndexAndTaxonomyRevision.TAXONOMY_SOURCE);
+    List<String> indexFiles = copiedFiles.get(IndexAndTaxonomyRevision.INDEX_SOURCE);
+    String taxoSegmentsFile = IndexReplicationHandler.getSegmentsFile(taxoFiles, true);
+    String indexSegmentsFile = IndexReplicationHandler.getSegmentsFile(indexFiles, false);
+    
+    boolean success = false;
+    try {
+      // copy taxonomy files before index files
+      IndexReplicationHandler.copyFiles(taxoClientDir, taxoDir, taxoFiles);
+      IndexReplicationHandler.copyFiles(indexClientDir, indexDir, indexFiles);
+
+      // fsync all copied files (except segmentsFile)
+      if (!taxoFiles.isEmpty()) {
+        taxoDir.sync(taxoFiles);
+      }
+      indexDir.sync(indexFiles);
+      
+      // now copy and fsync segmentsFile, taxonomy first because it is ok if a
+      // reader sees a more advanced taxonomy than the index.
+      if (taxoSegmentsFile != null) {
+        taxoClientDir.copy(taxoDir, taxoSegmentsFile, taxoSegmentsFile, IOContext.READONCE);
+      }
+      indexClientDir.copy(indexDir, indexSegmentsFile, indexSegmentsFile, IOContext.READONCE);
+      
+      if (taxoSegmentsFile != null) {
+        taxoDir.sync(Collections.singletonList(taxoSegmentsFile));
+      }
+      indexDir.sync(Collections.singletonList(indexSegmentsFile));
+      
+      success = true;
+    } finally {
+      if (!success) {
+        taxoFiles.add(taxoSegmentsFile); // add it back so it gets deleted too
+        IndexReplicationHandler.cleanupFilesOnFailure(taxoDir, taxoFiles);
+        indexFiles.add(indexSegmentsFile); // add it back so it gets deleted too
+        IndexReplicationHandler.cleanupFilesOnFailure(indexDir, indexFiles);
+      }
+    }
+
+    // all files have been successfully copied + sync'd. update the handler's state
+    currentRevisionFiles = revisionFiles;
+    currentVersion = version;
+    
+    if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) {
+      infoStream.message(INFO_STREAM_COMPONENT, "revisionReady(): currentVersion=" + currentVersion
+          + " currentRevisionFiles=" + currentRevisionFiles);
+    }
+
+    // update the segments.gen file
+    IndexReplicationHandler.writeSegmentsGen(taxoSegmentsFile, taxoDir);
+    IndexReplicationHandler.writeSegmentsGen(indexSegmentsFile, indexDir);
+    
+    // Cleanup the index directory from old and unused index files.
+    // NOTE: we don't use IndexWriter.deleteUnusedFiles here since it may have
+    // side-effects, e.g. if it hits sudden IO errors while opening the index
+    // (and can end up deleting the entire index). It is not our job to protect
+    // against those errors, app will probably hit them elsewhere.
+    IndexReplicationHandler.cleanupOldIndexFiles(indexDir, indexSegmentsFile);
+    IndexReplicationHandler.cleanupOldIndexFiles(taxoDir, taxoSegmentsFile);
+
+    // successfully updated the index, notify the callback that the index is
+    // ready.
+    if (callback != null) {
+      try {
+        callback.call();
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+    }
+  }
+
+  /** Sets the {@link InfoStream} to use for logging messages. */
+  public void setInfoStream(InfoStream infoStream) {
+    if (infoStream == null) {
+      infoStream = InfoStream.NO_OUTPUT;
+    }
+    this.infoStream = infoStream;
+  }
+  
+}
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyRevision.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyRevision.java
new file mode 100755
index 0000000..dbad317
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyRevision.java
@@ -0,0 +1,219 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
+import org.apache.lucene.facet.taxonomy.writercache.TaxonomyWriterCache;
+import org.apache.lucene.index.IndexCommit;
+import org.apache.lucene.index.IndexDeletionPolicy;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.SnapshotDeletionPolicy;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+
+/**
+ * A {@link Revision} of a single index and taxonomy index files which comprises
+ * the list of files from both indexes. This revision should be used whenever a
+ * pair of search and taxonomy indexes need to be replicated together to
+ * guarantee consistency of both on the replicating (client) side.
+ * 
+ * @see IndexRevision
+ * 
+ * @lucene.experimental
+ */
+public class IndexAndTaxonomyRevision implements Revision {
+  
+  /**
+   * A {@link DirectoryTaxonomyWriter} which sets the underlying
+   * {@link IndexWriter}'s {@link IndexDeletionPolicy} to
+   * {@link SnapshotDeletionPolicy}.
+   */
+  public static final class SnapshotDirectoryTaxonomyWriter extends DirectoryTaxonomyWriter {
+    
+    private SnapshotDeletionPolicy sdp;
+    private IndexWriter writer;
+    
+    /**
+     * @see DirectoryTaxonomyWriter#DirectoryTaxonomyWriter(Directory,
+     *      IndexWriterConfig.OpenMode, TaxonomyWriterCache)
+     */
+    public SnapshotDirectoryTaxonomyWriter(Directory directory, OpenMode openMode, TaxonomyWriterCache cache)
+        throws IOException {
+      super(directory, openMode, cache);
+    }
+    
+    /** @see DirectoryTaxonomyWriter#DirectoryTaxonomyWriter(Directory, IndexWriterConfig.OpenMode) */
+    public SnapshotDirectoryTaxonomyWriter(Directory directory, OpenMode openMode) throws IOException {
+      super(directory, openMode);
+    }
+    
+    /** @see DirectoryTaxonomyWriter#DirectoryTaxonomyWriter(Directory) */
+    public SnapshotDirectoryTaxonomyWriter(Directory d) throws IOException {
+      super(d);
+    }
+    
+    @Override
+    protected IndexWriterConfig createIndexWriterConfig(OpenMode openMode) {
+      IndexWriterConfig conf = super.createIndexWriterConfig(openMode);
+      conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
+      return conf;
+    }
+    
+    @Override
+    protected IndexWriter openIndexWriter(Directory directory, IndexWriterConfig config) throws IOException {
+      writer = super.openIndexWriter(directory, config);
+      // must set it here because IndexWriter clones the config
+      sdp = (SnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
+      return writer;
+    }
+    
+    /** Returns the {@link SnapshotDeletionPolicy} used by the underlying {@link IndexWriter}. */
+    public SnapshotDeletionPolicy getDeletionPolicy() {
+      return sdp;
+    }
+    
+    /** Returns the {@link IndexWriter} used by this {@link DirectoryTaxonomyWriter}. */
+    public IndexWriter getIndexWriter() {
+      return writer;
+    }
+    
+  }
+  
+  private static final int RADIX = 16;
+  
+  public static final String INDEX_SOURCE = "index";
+  public static final String TAXONOMY_SOURCE = "taxo";
+  
+  private final IndexWriter indexWriter;
+  private final SnapshotDirectoryTaxonomyWriter taxoWriter;
+  private final IndexCommit indexCommit, taxoCommit;
+  private final SnapshotDeletionPolicy indexSDP, taxoSDP;
+  private final String version;
+  private final Map<String,List<RevisionFile>> sourceFiles;
+  
+  /** Returns a singleton map of the revision files from the given {@link IndexCommit}. */
+  public static Map<String, List<RevisionFile>> revisionFiles(IndexCommit indexCommit, IndexCommit taxoCommit)
+      throws IOException {
+    HashMap<String,List<RevisionFile>> files = new HashMap<String,List<RevisionFile>>();
+    files.put(INDEX_SOURCE, IndexRevision.revisionFiles(indexCommit).values().iterator().next());
+    files.put(TAXONOMY_SOURCE, IndexRevision.revisionFiles(taxoCommit).values().iterator().next());
+    return files;
+  }
+  
+  /**
+   * Returns a String representation of a revision's version from the given
+   * {@link IndexCommit}s of the search and taxonomy indexes.
+   */
+  public static String revisionVersion(IndexCommit indexCommit, IndexCommit taxoCommit) {
+    return Long.toString(indexCommit.getGeneration(), RADIX) + ":" + Long.toString(taxoCommit.getGeneration(), RADIX);
+  }
+  
+  /**
+   * Constructor over the given {@link IndexWriter}. Uses the last
+   * {@link IndexCommit} found in the {@link Directory} managed by the given
+   * writer.
+   */
+  public IndexAndTaxonomyRevision(IndexWriter indexWriter, SnapshotDirectoryTaxonomyWriter taxoWriter)
+      throws IOException {
+    IndexDeletionPolicy delPolicy = indexWriter.getConfig().getIndexDeletionPolicy();
+    if (!(delPolicy instanceof SnapshotDeletionPolicy)) {
+      throw new IllegalArgumentException("IndexWriter must be created with SnapshotDeletionPolicy");
+    }
+    this.indexWriter = indexWriter;
+    this.taxoWriter = taxoWriter;
+    this.indexSDP = (SnapshotDeletionPolicy) delPolicy;
+    this.taxoSDP = taxoWriter.getDeletionPolicy();
+    this.indexCommit = indexSDP.snapshot();
+    this.taxoCommit = taxoSDP.snapshot();
+    this.version = revisionVersion(indexCommit, taxoCommit);
+    this.sourceFiles = revisionFiles(indexCommit, taxoCommit);
+  }
+  
+  @Override
+  public int compareTo(String version) {
+    final String[] parts = version.split(":");
+    final long indexGen = Long.parseLong(parts[0], RADIX);
+    final long taxoGen = Long.parseLong(parts[1], RADIX);
+    final long indexCommitGen = indexCommit.getGeneration();
+    final long taxoCommitGen = taxoCommit.getGeneration();
+    
+    // if the index generation is not the same as this commit's generation,
+    // compare by it. Otherwise, compare by the taxonomy generation.
+    if (indexCommitGen < indexGen) {
+      return -1;
+    } else if (indexCommitGen > indexGen) {
+      return 1;
+    } else {
+      return taxoCommitGen < taxoGen ? -1 : (taxoCommitGen > taxoGen ? 1 : 0);
+    }
+  }
+  
+  @Override
+  public int compareTo(Revision o) {
+    IndexAndTaxonomyRevision other = (IndexAndTaxonomyRevision) o;
+    int cmp = indexCommit.compareTo(other.indexCommit);
+    return cmp != 0 ? cmp : taxoCommit.compareTo(other.taxoCommit);
+  }
+  
+  @Override
+  public String getVersion() {
+    return version;
+  }
+  
+  @Override
+  public Map<String,List<RevisionFile>> getSourceFiles() {
+    return sourceFiles;
+  }
+  
+  @Override
+  public InputStream open(String source, String fileName) throws IOException {
+    assert source.equals(INDEX_SOURCE) || source.equals(TAXONOMY_SOURCE) : "invalid source; expected=(" + INDEX_SOURCE
+    + " or " + TAXONOMY_SOURCE + ") got=" + source;
+    IndexCommit ic = source.equals(INDEX_SOURCE) ? indexCommit : taxoCommit;
+    return new IndexInputInputStream(ic.getDirectory().openInput(fileName, IOContext.READONCE));
+  }
+  
+  @Override
+  public void release() throws IOException {
+    try {
+      indexSDP.release(indexCommit);
+    } finally {
+      taxoSDP.release(taxoCommit);
+    }
+    
+    try {
+      indexWriter.deleteUnusedFiles();
+    } finally {
+      taxoWriter.getIndexWriter().deleteUnusedFiles();
+    }
+  }
+  
+  @Override
+  public String toString() {
+    return "IndexAndTaxonomyRevision version=" + version + " files=" + sourceFiles;
+  }
+  
+}
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexInputInputStream.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexInputInputStream.java
new file mode 100755
index 0000000..3291194
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexInputInputStream.java
@@ -0,0 +1,92 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.lucene.store.IndexInput;
+
+/** 
+ * An {@link InputStream} which wraps an {@link IndexInput}.
+ * 
+ * @lucene.experimental
+ */
+public final class IndexInputInputStream extends InputStream {
+  
+  private final IndexInput in;
+  
+  private long remaining;
+  
+  public IndexInputInputStream(IndexInput in) {
+    this.in = in;
+    remaining = in.length();
+  }
+  
+  @Override
+  public int read() throws IOException {
+    if (remaining == 0) {
+      return -1;
+    } else {
+      --remaining;
+      return in.readByte();
+    }
+  }
+  
+  @Override
+  public int available() throws IOException {
+    return (int) in.length();
+  }
+  
+  @Override
+  public void close() throws IOException {
+    in.close();
+  }
+  
+  @Override
+  public int read(byte[] b) throws IOException {
+    return read(b, 0, b.length);
+  }
+  
+  @Override
+  public int read(byte[] b, int off, int len) throws IOException {
+    if (remaining == 0) {
+      return -1;
+    }
+    if (remaining < len) {
+      len = (int) remaining;
+    }
+    in.readBytes(b, off, len);
+    remaining -= len;
+    return len;
+  }
+  
+  @Override
+  public long skip(long n) throws IOException {
+    if (remaining == 0) {
+      return -1;
+    }
+    if (remaining < n) {
+      n = remaining;
+    }
+    in.seek(in.getFilePointer() + n);
+    remaining -= n;
+    return n;
+  }
+  
+}
\ No newline at end of file
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java
new file mode 100755
index 0000000..325c96d
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java
@@ -0,0 +1,308 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.regex.Matcher;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexCommit;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.IndexNotFoundException;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.SegmentInfos;
+import org.apache.lucene.replicator.ReplicationClient.ReplicationHandler;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.util.InfoStream;
+
+/**
+ * A {@link ReplicationHandler} for replication of an index. Implements
+ * {@link #revisionReady} by copying the files pointed by the client resolver to
+ * the index {@link Directory} and then touches the index with
+ * {@link IndexWriter} to make sure any unused files are deleted.
+ * <p>
+ * <b>NOTE:</b> this handler assumes that {@link IndexWriter} is not opened by
+ * another process on the index directory. In fact, opening an
+ * {@link IndexWriter} on the same directory to which files are copied can lead
+ * to undefined behavior, where some or all the files will be deleted, override
+ * other files or simply create a mess. When you replicate an index, it is best
+ * if the index is never modified by {@link IndexWriter}, except the one that is
+ * open on the source index, from which you replicate.
+ * <p>
+ * This handler notifies the application via a provided {@link Callable} when an
+ * updated index commit was made available for it.
+ * 
+ * @lucene.experimental
+ */
+public class IndexReplicationHandler implements ReplicationHandler {
+  
+  /**
+   * The component used to log messages to the {@link InfoStream#getDefault()
+   * default} {@link InfoStream}.
+   */
+  public static final String INFO_STREAM_COMPONENT = "IndexReplicationHandler";
+  
+  private final Directory indexDir;
+  private final Callable<Boolean> callback;
+  
+  private volatile Map<String,List<RevisionFile>> currentRevisionFiles;
+  private volatile String currentVersion;
+  private volatile InfoStream infoStream = InfoStream.getDefault();
+  
+  /**
+   * Returns the last {@link IndexCommit} found in the {@link Directory}, or
+   * {@code null} if there are no commits.
+   */
+  public static IndexCommit getLastCommit(Directory dir) throws IOException {
+    try {
+      if (DirectoryReader.indexExists(dir)) {
+        List<IndexCommit> commits = DirectoryReader.listCommits(dir);
+        // listCommits guarantees that we get at least one commit back, or
+        // IndexNotFoundException which we handle below
+        return commits.get(commits.size() - 1);
+      }
+    } catch (IndexNotFoundException e) {
+      // ignore the exception and return null
+    }
+    return null;
+  }
+  
+  /**
+   * Verifies that the last file is segments_N and fails otherwise. It also
+   * removes and returns the file from the list, because it needs to be handled
+   * last, after all files. This is important in order to guarantee that if a
+   * reader sees the new segments_N, all other segment files are already on
+   * stable storage.
+   * <p>
+   * The reason why the code fails instead of putting segments_N file last is
+   * that this indicates an error in the Revision implementation.
+   */
+  public static String getSegmentsFile(List<String> files, boolean allowEmpty) {
+    if (files.isEmpty()) {
+      if (allowEmpty) {
+        return null;
+      } else {
+        throw new IllegalStateException("empty list of files not allowed");
+      }
+    }
+    
+    String segmentsFile = files.remove(files.size() - 1);
+    if (!segmentsFile.startsWith(IndexFileNames.SEGMENTS) || segmentsFile.equals(IndexFileNames.SEGMENTS_GEN)) {
+      throw new IllegalStateException("last file to copy+sync must be segments_N but got " + segmentsFile
+          + "; check your Revision implementation!");
+    }
+    return segmentsFile;
+  }
+
+  /**
+   * Cleanup the index directory by deleting all given files. Called when file
+   * copy or sync failed.
+   */
+  public static void cleanupFilesOnFailure(Directory dir, List<String> files) {
+    for (String file : files) {
+      try {
+        if (dir.fileExists(file)) {
+          dir.deleteFile(file);
+        }
+      } catch (Throwable t) {
+        // suppress any exception because if we're here, it means copy
+        // failed, and we must cleanup after ourselves.
+      }
+    }
+  }
+  
+  /**
+   * Cleans up the index directory from old index files. This method uses the
+   * last commit found by {@link #getLastCommit(Directory)}. If it matches the
+   * expected segmentsFile, then all files not referenced by this commit point
+   * are deleted.
+   * <p>
+   * <b>NOTE:</b> this method does a best effort attempt to clean the index
+   * directory. It suppresses any exceptions that occur, as this can be retried
+   * the next time.
+   */
+  public static void cleanupOldIndexFiles(Directory dir, String segmentsFile) {
+    try {
+      IndexCommit commit = getLastCommit(dir);
+      // commit == null means weird IO errors occurred, ignore them
+      // if there were any IO errors reading the expected commit point (i.e.
+      // segments files mismatch), then ignore that commit either.
+      if (commit != null && commit.getSegmentsFileName().equals(segmentsFile)) {
+        Set<String> commitFiles = new HashSet<String>();
+        commitFiles.addAll(commit.getFileNames());
+        commitFiles.add(IndexFileNames.SEGMENTS_GEN);
+        Matcher matcher = IndexFileNames.CODEC_FILE_PATTERN.matcher("");
+        for (String file : dir.listAll()) {
+          if (!commitFiles.contains(file)
+              && (matcher.reset(file).matches() || file.startsWith(IndexFileNames.SEGMENTS))) {
+            try {
+              dir.deleteFile(file);
+            } catch (Throwable t) {
+              // suppress, it's just a best effort
+            }
+          }
+        }
+      }
+    } catch (Throwable t) {
+      // ignore any errors that happens during this state and only log it. this
+      // cleanup will have a chance to succeed the next time we get a new
+      // revision.
+    }
+  }
+  
+  /**
+   * Copies the files from the source directory to the target one, if they are
+   * not the same.
+   */
+  public static void copyFiles(Directory source, Directory target, List<String> files) throws IOException {
+    if (!source.equals(target)) {
+      for (String file : files) {
+        source.copy(target, file, file, IOContext.READONCE);
+      }
+    }
+  }
+
+  /**
+   * Writes {@link IndexFileNames#SEGMENTS_GEN} file to the directory, reading
+   * the generation from the given {@code segmentsFile}. If it is {@code null},
+   * this method deletes segments.gen from the directory.
+   */
+  public static void writeSegmentsGen(String segmentsFile, Directory dir) {
+    if (segmentsFile != null) {
+      SegmentInfos.writeSegmentsGen(dir, SegmentInfos.generationFromSegmentsFileName(segmentsFile));
+    } else {
+      try {
+        if (dir.fileExists(IndexFileNames.SEGMENTS_GEN)) {
+          dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
+        }
+      } catch (Throwable t) {
+        // suppress any errors while deleting this file.
+      }
+    }
+  }
+
+  /**
+   * Constructor with the given index directory and callback to notify when the
+   * indexes were updated.
+   */
+  public IndexReplicationHandler(Directory indexDir, Callable<Boolean> callback) throws IOException {
+    this.callback = callback;
+    this.indexDir = indexDir;
+    currentRevisionFiles = null;
+    currentVersion = null;
+    if (DirectoryReader.indexExists(indexDir)) {
+      final List<IndexCommit> commits = DirectoryReader.listCommits(indexDir);
+      final IndexCommit commit = commits.get(commits.size() - 1);
+      currentRevisionFiles = IndexRevision.revisionFiles(commit);
+      currentVersion = IndexRevision.revisionVersion(commit);
+      final InfoStream infoStream = InfoStream.getDefault();
+      if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) {
+        infoStream.message(INFO_STREAM_COMPONENT, "constructor(): currentVersion=" + currentVersion
+            + " currentRevisionFiles=" + currentRevisionFiles);
+        infoStream.message(INFO_STREAM_COMPONENT, "constructor(): commit=" + commit);
+      }
+    }
+  }
+  
+  @Override
+  public String currentVersion() {
+    return currentVersion;
+  }
+  
+  @Override
+  public Map<String,List<RevisionFile>> currentRevisionFiles() {
+    return currentRevisionFiles;
+  }
+  
+  @Override
+  public void revisionReady(String version, Map<String,List<RevisionFile>> revisionFiles,
+      Map<String,List<String>> copiedFiles, Map<String,Directory> sourceDirectory) throws IOException {
+    if (revisionFiles.size() > 1) {
+      throw new IllegalArgumentException("this handler handles only a single source; got " + revisionFiles.keySet());
+    }
+    
+    Directory clientDir = sourceDirectory.values().iterator().next();
+    List<String> files = copiedFiles.values().iterator().next();
+    String segmentsFile = getSegmentsFile(files, false);
+    
+    boolean success = false;
+    try {
+      // copy files from the client to index directory
+      copyFiles(clientDir, indexDir, files);
+      
+      // fsync all copied files (except segmentsFile)
+      indexDir.sync(files);
+      
+      // now copy and fsync segmentsFile
+      clientDir.copy(indexDir, segmentsFile, segmentsFile, IOContext.READONCE);
+      indexDir.sync(Collections.singletonList(segmentsFile));
+      
+      success = true;
+    } finally {
+      if (!success) {
+        files.add(segmentsFile); // add it back so it gets deleted too
+        cleanupFilesOnFailure(indexDir, files);
+      }
+    }
+
+    // all files have been successfully copied + sync'd. update the handler's state
+    currentRevisionFiles = revisionFiles;
+    currentVersion = version;
+    
+    if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) {
+      infoStream.message(INFO_STREAM_COMPONENT, "revisionReady(): currentVersion=" + currentVersion
+          + " currentRevisionFiles=" + currentRevisionFiles);
+    }
+
+    // update the segments.gen file
+    writeSegmentsGen(segmentsFile, indexDir);
+    
+    // Cleanup the index directory from old and unused index files.
+    // NOTE: we don't use IndexWriter.deleteUnusedFiles here since it may have
+    // side-effects, e.g. if it hits sudden IO errors while opening the index
+    // (and can end up deleting the entire index). It is not our job to protect
+    // against those errors, app will probably hit them elsewhere.
+    cleanupOldIndexFiles(indexDir, segmentsFile);
+
+    // successfully updated the index, notify the callback that the index is
+    // ready.
+    if (callback != null) {
+      try {
+        callback.call();
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+    }
+  }
+
+  /** Sets the {@link InfoStream} to use for logging messages. */
+  public void setInfoStream(InfoStream infoStream) {
+    if (infoStream == null) {
+      infoStream = InfoStream.NO_OUTPUT;
+    }
+    this.infoStream = infoStream;
+  }
+  
+}
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexRevision.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexRevision.java
new file mode 100755
index 0000000..d135a3d
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexRevision.java
@@ -0,0 +1,150 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.lucene.index.IndexCommit;
+import org.apache.lucene.index.IndexDeletionPolicy;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.SnapshotDeletionPolicy;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+
+/**
+ * A {@link Revision} of a single index files which comprises the list of files
+ * that are part of the current {@link IndexCommit}. To ensure the files are not
+ * deleted by {@link IndexWriter} for as long as this revision stays alive (i.e.
+ * until {@link #release()}), the current commit point is snapshotted, using
+ * {@link SnapshotDeletionPolicy} (this means that the given writer's
+ * {@link IndexWriterConfig#getIndexDeletionPolicy() config} should return
+ * {@link SnapshotDeletionPolicy}).
+ * <p>
+ * When this revision is {@link #release() released}, it releases the obtained
+ * snapshot as well as calls {@link IndexWriter#deleteUnusedFiles()} so that the
+ * snapshotted files are deleted (if they are no longer needed).
+ * 
+ * @lucene.experimental
+ */
+public class IndexRevision implements Revision {
+  
+  private static final int RADIX = 16;
+  private static final String SOURCE = "index";
+  
+  private final IndexWriter writer;
+  private final IndexCommit commit;
+  private final SnapshotDeletionPolicy sdp;
+  private final String version;
+  private final Map<String,List<RevisionFile>> sourceFiles;
+  
+  // returns a RevisionFile with some metadata
+  private static RevisionFile newRevisionFile(String file, Directory dir) throws IOException {
+    RevisionFile revFile = new RevisionFile(file);
+    revFile.size = dir.fileLength(file);
+    return revFile;
+  }
+  
+  /** Returns a singleton map of the revision files from the given {@link IndexCommit}. */
+  public static Map<String,List<RevisionFile>> revisionFiles(IndexCommit commit) throws IOException {
+    Collection<String> commitFiles = commit.getFileNames();
+    List<RevisionFile> revisionFiles = new ArrayList<RevisionFile>(commitFiles.size());
+    String segmentsFile = commit.getSegmentsFileName();
+    Directory dir = commit.getDirectory();
+    
+    for (String file : commitFiles) {
+      if (!file.equals(segmentsFile)) {
+        revisionFiles.add(newRevisionFile(file, dir));
+      }
+    }
+    revisionFiles.add(newRevisionFile(segmentsFile, dir)); // segments_N must be last
+    return Collections.singletonMap(SOURCE, revisionFiles);
+  }
+  
+  /**
+   * Returns a String representation of a revision's version from the given
+   * {@link IndexCommit}.
+   */
+  public static String revisionVersion(IndexCommit commit) {
+    return Long.toString(commit.getGeneration(), RADIX);
+  }
+  
+  /**
+   * Constructor over the given {@link IndexWriter}. Uses the last
+   * {@link IndexCommit} found in the {@link Directory} managed by the given
+   * writer.
+   */
+  public IndexRevision(IndexWriter writer) throws IOException {
+    IndexDeletionPolicy delPolicy = writer.getConfig().getIndexDeletionPolicy();
+    if (!(delPolicy instanceof SnapshotDeletionPolicy)) {
+      throw new IllegalArgumentException("IndexWriter must be created with SnapshotDeletionPolicy");
+    }
+    this.writer = writer;
+    this.sdp = (SnapshotDeletionPolicy) delPolicy;
+    this.commit = sdp.snapshot();
+    this.version = revisionVersion(commit);
+    this.sourceFiles = revisionFiles(commit);
+  }
+  
+  @Override
+  public int compareTo(String version) {
+    long gen = Long.parseLong(version, RADIX);
+    long commitGen = commit.getGeneration();
+    return commitGen < gen ? -1 : (commitGen > gen ? 1 : 0);
+  }
+  
+  @Override
+  public int compareTo(Revision o) {
+    IndexRevision other = (IndexRevision) o;
+    return commit.compareTo(other.commit);
+  }
+  
+  @Override
+  public String getVersion() {
+    return version;
+  }
+  
+  @Override
+  public Map<String,List<RevisionFile>> getSourceFiles() {
+    return sourceFiles;
+  }
+  
+  @Override
+  public InputStream open(String source, String fileName) throws IOException {
+    assert source.equals(SOURCE) : "invalid source; expected=" + SOURCE + " got=" + source;
+    return new IndexInputInputStream(commit.getDirectory().openInput(fileName, IOContext.READONCE));
+  }
+  
+  @Override
+  public void release() throws IOException {
+    sdp.release(commit);
+    writer.deleteUnusedFiles();
+  }
+  
+  @Override
+  public String toString() {
+    return "IndexRevision version=" + version + " files=" + sourceFiles;
+  }
+  
+}
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/LocalReplicator.java b/lucene/replicator/src/java/org/apache/lucene/replicator/LocalReplicator.java
new file mode 100755
index 0000000..4ab746c
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/LocalReplicator.java
@@ -0,0 +1,247 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.store.AlreadyClosedException;
+
+/**
+ * A {@link Replicator} implementation for use by the side that publishes
+ * {@link Revision}s, as well for clients to {@link #checkForUpdate(String)
+ * check for updates}. When a client needs to be updated, it is returned a
+ * {@link SessionToken} through which it can
+ * {@link #obtainFile(String, String, String) obtain} the files of that
+ * revision. As long as a revision is being replicated, this replicator
+ * guarantees that it will not be {@link Revision#release() released}.
+ * <p>
+ * Replication sessions expire by default after
+ * {@link #DEFAULT_SESSION_EXPIRATION_THRESHOLD}, and the threshold can be
+ * configured through {@link #setExpirationThreshold(long)}.
+ * 
+ * @lucene.experimental
+ */
+public class LocalReplicator implements Replicator {
+  
+  private static class RefCountedRevision {
+    private final AtomicInteger refCount = new AtomicInteger(1);
+    public final Revision revision;
+    
+    public RefCountedRevision(Revision revision) {
+      this.revision = revision;
+    }
+    
+    public void decRef() throws IOException {
+      if (refCount.get() <= 0) {
+        throw new IllegalStateException("this revision is already released");
+      }
+      
+      final int rc = refCount.decrementAndGet();
+      if (rc == 0) {
+        boolean success = false;
+        try {
+          revision.release();
+          success = true;
+        } finally {
+          if (!success) {
+            // Put reference back on failure
+            refCount.incrementAndGet();
+          }
+        }
+      } else if (rc < 0) {
+        throw new IllegalStateException("too many decRef calls: refCount is " + rc + " after decrement");
+      }
+    }
+    
+    public void incRef() {
+      refCount.incrementAndGet();
+    }
+    
+  }
+  
+  private static class ReplicationSession {
+    public final SessionToken session;
+    public final RefCountedRevision revision;
+    private volatile long lastAccessTime;
+    
+    ReplicationSession(SessionToken session, RefCountedRevision revision) {
+      this.session = session;
+      this.revision = revision;
+      lastAccessTime = System.currentTimeMillis();
+    }
+    
+    boolean isExpired(long expirationThreshold) {
+      return lastAccessTime < (System.currentTimeMillis() - expirationThreshold);
+    }
+    
+    void markAccessed() {
+      lastAccessTime = System.currentTimeMillis();
+    }
+  }
+  
+  /** Threshold for expiring inactive sessions. Defaults to 30 minutes. */
+  public static final long DEFAULT_SESSION_EXPIRATION_THRESHOLD = 1000 * 60 * 30;
+  
+  private long expirationThresholdMilllis = LocalReplicator.DEFAULT_SESSION_EXPIRATION_THRESHOLD;
+  
+  private volatile RefCountedRevision currentRevision;
+  private volatile boolean closed = false;
+  
+  private final AtomicInteger sessionToken = new AtomicInteger(0);
+  private final Map<String, ReplicationSession> sessions = new HashMap<String, ReplicationSession>();
+  
+  private void checkExpiredSessions() throws IOException {
+    // make a "to-delete" list so we don't risk deleting from the map while iterating it
+    final ArrayList<ReplicationSession> toExpire = new ArrayList<ReplicationSession>();
+    for (ReplicationSession token : sessions.values()) {
+      if (token.isExpired(expirationThresholdMilllis)) {
+        toExpire.add(token);
+      }
+    }
+    for (ReplicationSession token : toExpire) {
+      releaseSession(token.session.id);
+    }
+  }
+  
+  private void releaseSession(String sessionID) throws IOException {
+    ReplicationSession session = sessions.remove(sessionID);
+    // if we're called concurrently by close() and release(), could be that one
+    // thread beats the other to release the session.
+    if (session != null) {
+      session.revision.decRef();
+    }
+  }
+  
+  /** Ensure that replicator is still open, or throw {@link AlreadyClosedException} otherwise. */
+  protected final synchronized void ensureOpen() {
+    if (closed) {
+      throw new AlreadyClosedException("This replicator has already been closed");
+    }
+  }
+  
+  @Override
+  public synchronized SessionToken checkForUpdate(String currentVersion) {
+    ensureOpen();
+    if (currentRevision == null) { // no published revisions yet
+      return null;
+    }
+    
+    if (currentVersion != null && currentRevision.revision.compareTo(currentVersion) <= 0) {
+      // currentVersion is newer or equal to latest published revision
+      return null;
+    }
+    
+    // currentVersion is either null or older than latest published revision
+    currentRevision.incRef();
+    final String sessionID = Integer.toString(sessionToken.incrementAndGet());
+    final SessionToken sessionToken = new SessionToken(sessionID, currentRevision.revision);
+    final ReplicationSession timedSessionToken = new ReplicationSession(sessionToken, currentRevision);
+    sessions.put(sessionID, timedSessionToken);
+    return sessionToken;
+  }
+  
+  @Override
+  public synchronized void close() throws IOException {
+    if (!closed) {
+      // release all managed revisions
+      for (ReplicationSession session : sessions.values()) {
+        session.revision.decRef();
+      }
+      sessions.clear();
+      closed = true;
+    }
+  }
+  
+  /**
+   * Returns the expiration threshold.
+   * 
+   * @see #setExpirationThreshold(long)
+   */
+  public long getExpirationThreshold() {
+    return expirationThresholdMilllis;
+  }
+  
+  @Override
+  public synchronized InputStream obtainFile(String sessionID, String source, String fileName) throws IOException {
+    ensureOpen();
+    ReplicationSession session = sessions.get(sessionID);
+    if (session != null && session.isExpired(expirationThresholdMilllis)) {
+      releaseSession(sessionID);
+      session = null;
+    }
+    // session either previously expired, or we just expired it
+    if (session == null) {
+      throw new SessionExpiredException("session (" + sessionID + ") expired while obtaining file: source=" + source
+          + " file=" + fileName);
+    }
+    sessions.get(sessionID).markAccessed();
+    return session.revision.revision.open(source, fileName);
+  }
+  
+  @Override
+  public synchronized void publish(Revision revision) throws IOException {
+    ensureOpen();
+    if (currentRevision != null) {
+      int compare = revision.compareTo(currentRevision.revision);
+      if (compare == 0) {
+        // same revision published again, ignore but release it
+        revision.release();
+        return;
+      }
+      
+      if (compare < 0) {
+        revision.release();
+        throw new IllegalArgumentException("Cannot publish an older revision: rev=" + revision + " current="
+            + currentRevision);
+      } 
+    }
+    
+    // swap revisions
+    final RefCountedRevision oldRevision = currentRevision;
+    currentRevision = new RefCountedRevision(revision);
+    if (oldRevision != null) {
+      oldRevision.decRef();
+    }
+    
+    // check for expired sessions
+    checkExpiredSessions();
+  }
+  
+  @Override
+  public synchronized void release(String sessionID) throws IOException {
+    ensureOpen();
+    releaseSession(sessionID);
+  }
+  
+  /**
+   * Modify session expiration time - if a replication session is inactive that
+   * long it is automatically expired, and further attempts to operate within
+   * this session will throw a {@link SessionExpiredException}.
+   */
+  public synchronized void setExpirationThreshold(long expirationThreshold) throws IOException {
+    ensureOpen();
+    this.expirationThresholdMilllis = expirationThreshold;
+    checkExpiredSessions();
+  }
+  
+}
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/PerSessionDirectoryFactory.java b/lucene/replicator/src/java/org/apache/lucene/replicator/PerSessionDirectoryFactory.java
new file mode 100755
index 0000000..3dcd1b3
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/PerSessionDirectoryFactory.java
@@ -0,0 +1,77 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.lucene.replicator.ReplicationClient.SourceDirectoryFactory;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+
+/**
+ * A {@link SourceDirectoryFactory} which returns {@link FSDirectory} under a
+ * dedicated session directory. When a session is over, the entire directory is
+ * deleted.
+ * 
+ * @lucene.experimental
+ */
+public class PerSessionDirectoryFactory implements SourceDirectoryFactory {
+  
+  private final File workDir;
+  
+  /** Constructor with the given sources mapping. */
+  public PerSessionDirectoryFactory(File workDir) {
+    this.workDir = workDir;
+  }
+  
+  private void rm(File file) throws IOException {
+    if (file.isDirectory()) {
+      for (File f : file.listFiles()) {
+        rm(f);
+      }
+    }
+    
+    // This should be either an empty directory, or a file
+    if (!file.delete() && file.exists()) {
+      throw new IOException("failed to delete " + file);
+    }
+  }
+  
+  @Override
+  public Directory getDirectory(String sessionID, String source) throws IOException {
+    File sessionDir = new File(workDir, sessionID);
+    if (!sessionDir.exists() && !sessionDir.mkdirs()) {
+      throw new IOException("failed to create session directory " + sessionDir);
+    }
+    File sourceDir = new File(sessionDir, source);
+    if (!sourceDir.mkdirs()) {
+      throw new IOException("failed to create source directory " + sourceDir);
+    }
+    return FSDirectory.open(sourceDir);
+  }
+  
+  @Override
+  public void cleanupSession(String sessionID) throws IOException {
+    if (sessionID.isEmpty()) { // protect against deleting workDir entirely!
+      throw new IllegalArgumentException("sessionID cannot be empty");
+    }
+    rm(new File(workDir, sessionID));
+  }
+  
+}
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/ReplicationClient.java b/lucene/replicator/src/java/org/apache/lucene/replicator/ReplicationClient.java
new file mode 100755
index 0000000..9397351
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/ReplicationClient.java
@@ -0,0 +1,416 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.InfoStream;
+import org.apache.lucene.util.ThreadInterruptedException;
+
+/**
+ * A client which monitors and obtains new revisions from a {@link Replicator}.
+ * It can be used to either periodically check for updates by invoking
+ * {@link #startUpdateThread}, or manually by calling {@link #updateNow()}.
+ * <p>
+ * Whenever a new revision is available, the {@link #requiredFiles(Map)} are
+ * copied to the {@link Directory} specified by {@link PerSessionDirectoryFactory} and
+ * a handler is notified.
+ * 
+ * @lucene.experimental
+ */
+public class ReplicationClient implements Closeable {
+  
+  private class ReplicationThread extends Thread {
+    
+    private final long interval;
+    
+    // client uses this to stop us
+    final CountDownLatch stop = new CountDownLatch(1);
+    
+    public ReplicationThread(long interval) {
+      this.interval = interval;
+    }
+    
+    @SuppressWarnings("synthetic-access")
+    @Override
+    public void run() {
+      while (true) {
+        long time = System.currentTimeMillis();
+        updateLock.lock();
+        try {
+          doUpdate();
+        } catch (Throwable t) {
+          handleUpdateException(t);
+        } finally {
+          updateLock.unlock();
+        }
+        time = System.currentTimeMillis() - time;
+        
+        // adjust timeout to compensate the time spent doing the replication.
+        final long timeout = interval - time;
+        if (timeout > 0) {
+          try {
+            // this will return immediately if we were ordered to stop (count=0)
+            // or the timeout has elapsed. if it returns true, it means count=0,
+            // so terminate.
+            if (stop.await(timeout, TimeUnit.MILLISECONDS)) {
+              return;
+            }
+          } catch (InterruptedException e) {
+            // if we were interruted, somebody wants to terminate us, so just
+            // throw the exception further.
+            Thread.currentThread().interrupt();
+            throw new ThreadInterruptedException(e);
+          }
+        }
+      }
+    }
+    
+  }
+  
+  /** Handler for revisions obtained by the client. */
+  public static interface ReplicationHandler {
+    
+    /** Returns the current revision files held by the handler. */
+    public Map<String,List<RevisionFile>> currentRevisionFiles();
+    
+    /** Returns the current revision version held by the handler. */
+    public String currentVersion();
+    
+    /**
+     * Called when a new revision was obtained and is available (i.e. all needed
+     * files were successfully copied).
+     * 
+     * @param version
+     *          the version of the {@link Revision} that was copied
+     * @param revisionFiles
+     *          the files contained by this {@link Revision}
+     * @param copiedFiles
+     *          the files that were actually copied
+     * @param sourceDirectory
+     *          a mapping from a source of files to the {@link Directory} they
+     *          were copied into
+     */
+    public void revisionReady(String version, Map<String,List<RevisionFile>> revisionFiles, 
+        Map<String,List<String>> copiedFiles, Map<String, Directory> sourceDirectory) throws IOException;
+  }
+  
+  /**
+   * Resolves a session and source into a {@link Directory} to use for copying
+   * the session files to.
+   */
+  public static interface SourceDirectoryFactory {
+    
+    /**
+     * Called to denote that the replication actions for this session were finished and the directory is no longer needed. 
+     */
+    public void cleanupSession(String sessionID) throws IOException;
+    
+    /**
+     * Returns the {@link Directory} to use for the given session and source.
+     * Implementations may e.g. return different directories for different
+     * sessions, or the same directory for all sessions. In that case, it is
+     * advised to clean the directory before it is used for a new session.
+     * 
+     * @see #cleanupSession(String)
+     */
+    public Directory getDirectory(String sessionID, String source) throws IOException;
+    
+  }
+  
+  /** The component name to use with {@link InfoStream#isEnabled(String)}. */
+  public static final String INFO_STREAM_COMPONENT = "ReplicationThread";
+  
+  private final Replicator replicator;
+  private final ReplicationHandler handler;
+  private final SourceDirectoryFactory factory;
+  private final byte[] copyBuffer = new byte[16384];
+  private final Lock updateLock = new ReentrantLock();
+  
+  private volatile ReplicationThread updateThread;
+  private volatile boolean closed = false;
+  private volatile InfoStream infoStream = InfoStream.getDefault();
+  
+  /**
+   * Constructor.
+   * 
+   * @param replicator the {@link Replicator} used for checking for updates
+   * @param handler notified when new revisions are ready
+   * @param factory returns a {@link Directory} for a given source and session 
+   */
+  public ReplicationClient(Replicator replicator, ReplicationHandler handler, SourceDirectoryFactory factory) {
+    this.replicator = replicator;
+    this.handler = handler;
+    this.factory = factory;
+  }
+  
+  private void copyBytes(IndexOutput out, InputStream in) throws IOException {
+    int numBytes;
+    while ((numBytes = in.read(copyBuffer)) > 0) {
+      out.writeBytes(copyBuffer, 0, numBytes);
+    }
+  }
+  
+  private void doUpdate() throws IOException {
+    SessionToken session = null;
+    final Map<String,Directory> sourceDirectory = new HashMap<String,Directory>();
+    final Map<String,List<String>> copiedFiles = new HashMap<String,List<String>>();
+    boolean notify = false;
+    try {
+      final String version = handler.currentVersion();
+      session = replicator.checkForUpdate(version);
+      if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) {
+        infoStream.message(INFO_STREAM_COMPONENT, "doUpdate(): handlerVersion=" + version + " session=" + session);
+      }
+      if (session == null) {
+        // already up to date
+        return;
+      }
+      Map<String,List<RevisionFile>> requiredFiles = requiredFiles(session.sourceFiles);
+      if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) {
+        infoStream.message(INFO_STREAM_COMPONENT, "doUpdate(): requiredFiles=" + requiredFiles);
+      }
+      for (Entry<String,List<RevisionFile>> e : requiredFiles.entrySet()) {
+        String source = e.getKey();
+        Directory dir = factory.getDirectory(session.id, source);
+        sourceDirectory.put(source, dir);
+        List<String> cpFiles = new ArrayList<String>();
+        copiedFiles.put(source, cpFiles);
+        for (RevisionFile file : e.getValue()) {
+          if (closed) {
+            // if we're closed, abort file copy
+            if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) {
+              infoStream.message(INFO_STREAM_COMPONENT, "doUpdate(): detected client was closed); abort file copy");
+            }
+            return;
+          }
+          InputStream in = null;
+          IndexOutput out = null;
+          try {
+            in = replicator.obtainFile(session.id, source, file.fileName);
+            out = dir.createOutput(file.fileName, IOContext.DEFAULT);
+            copyBytes(out, in);
+            cpFiles.add(file.fileName);
+            // TODO add some validation, on size / checksum
+          } finally {
+            IOUtils.close(in, out);
+          }
+        }
+      }
+      // only notify if all required files were successfully obtained.
+      notify = true;
+    } finally {
+      if (session != null) {
+        try {
+          replicator.release(session.id);
+        } finally {
+          if (!notify) { // cleanup after ourselves
+            IOUtils.close(sourceDirectory.values());
+            factory.cleanupSession(session.id);
+          }
+        }
+      }
+    }
+    
+    // notify outside the try-finally above, so the session is released sooner.
+    // the handler may take time to finish acting on the copied files, but the
+    // session itself is no longer needed.
+    try {
+      if (notify && !closed ) { // no use to notify if we are closed already
+        handler.revisionReady(session.version, session.sourceFiles, copiedFiles, sourceDirectory);
+      }
+    } finally {
+      IOUtils.close(sourceDirectory.values());
+      if (session != null) {
+        factory.cleanupSession(session.id);
+      }
+    }
+  }
+  
+  /** Throws {@link AlreadyClosedException} if the client has already been closed. */
+  protected final void ensureOpen() {
+    if (closed) {
+      throw new AlreadyClosedException("this update client has already been closed");
+    }
+  }
+  
+  /**
+   * Called when an exception is hit by the replication thread. The default
+   * implementation prints the full stacktrace to the {@link InfoStream} set in
+   * {@link #setInfoStream(InfoStream)}, or the {@link InfoStream#getDefault()
+   * default} one. You can override to log the exception elswhere.
+   * <p>
+   * <b>NOTE:</b> if you override this method to throw the exception further,
+   * the replication thread will be terminated. The only way to restart it is to
+   * call {@link #stopUpdateThread()} followed by
+   * {@link #startUpdateThread(long, String)}.
+   */
+  protected void handleUpdateException(Throwable t) {
+    final StringWriter sw = new StringWriter();
+    t.printStackTrace(new PrintWriter(sw));
+    if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) {
+      infoStream.message(INFO_STREAM_COMPONENT, "an error occurred during revision update: " + sw.toString());
+    }
+  }
+  
+  /**
+   * Returns the files required for replication. By default, this method returns
+   * all files that exist in the new revision, but not in the handler.
+   */
+  protected Map<String,List<RevisionFile>> requiredFiles(Map<String,List<RevisionFile>> newRevisionFiles) {
+    Map<String,List<RevisionFile>> handlerRevisionFiles = handler.currentRevisionFiles();
+    if (handlerRevisionFiles == null) {
+      return newRevisionFiles;
+    }
+    
+    Map<String,List<RevisionFile>> requiredFiles = new HashMap<String,List<RevisionFile>>();
+    for (Entry<String,List<RevisionFile>> e : handlerRevisionFiles.entrySet()) {
+      // put the handler files in a Set, for faster contains() checks later
+      Set<String> handlerFiles = new HashSet<String>();
+      for (RevisionFile file : e.getValue()) {
+        handlerFiles.add(file.fileName);
+      }
+      
+      // make sure to preserve revisionFiles order
+      ArrayList<RevisionFile> res = new ArrayList<RevisionFile>();
+      String source = e.getKey();
+      assert newRevisionFiles.containsKey(source) : "source not found in newRevisionFiles: " + newRevisionFiles;
+      for (RevisionFile file : newRevisionFiles.get(source)) {
+        if (!handlerFiles.contains(file.fileName)) {
+          res.add(file);
+        }
+      }
+      requiredFiles.put(source, res);
+    }
+    
+    return requiredFiles;
+  }
+  
+  @Override
+  public synchronized void close() {
+    if (!closed) {
+      stopUpdateThread();
+      closed = true;
+    }
+  }
+  
+  /**
+   * Start the update thread with the specified interval in milliseconds. For
+   * debugging purposes, you can optionally set the name to set on
+   * {@link Thread#setName(String)}. If you pass {@code null}, a default name
+   * will be set.
+   * 
+   * @throws IllegalStateException if the thread has already been started
+   */
+  public synchronized void startUpdateThread(long intervalMillis, String threadName) {
+    ensureOpen();
+    if (updateThread != null && updateThread.isAlive()) {
+      throw new IllegalStateException(
+          "cannot start an update thread when one is running, must first call 'stopUpdateThread()'");
+    }
+    threadName = threadName == null ? INFO_STREAM_COMPONENT : "ReplicationThread-" + threadName;
+    updateThread = new ReplicationThread(intervalMillis);
+    updateThread.setName(threadName);
+    updateThread.start();
+    // we rely on isAlive to return true in isUpdateThreadAlive, assert to be on the safe side
+    assert updateThread.isAlive() : "updateThread started but not alive?";
+  }
+  
+  /**
+   * Stop the update thread. If the update thread is not running, silently does
+   * nothing. This method returns after the update thread has stopped.
+   */
+  public synchronized void stopUpdateThread() {
+    if (updateThread != null) {
+      // this will trigger the thread to terminate if it awaits the lock.
+      // otherwise, if it's in the middle of replication, we wait for it to
+      // stop.
+      updateThread.stop.countDown();
+      try {
+        updateThread.join();
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+        throw new ThreadInterruptedException(e);
+      }
+      updateThread = null;
+    }
+  }
+  
+  /**
+   * Returns true if the update thread is alive. The update thread is alive if
+   * it has been {@link #startUpdateThread(long, String) started} and not
+   * {@link #stopUpdateThread() stopped}, as well as didn't hit an error which
+   * caused it to terminate (i.e. {@link #handleUpdateException(Throwable)}
+   * threw the exception further).
+   */
+  public synchronized boolean isUpdateThreadAlive() {
+    return updateThread != null && updateThread.isAlive();
+  }
+  
+  @Override
+  public String toString() {
+    String res = "ReplicationClient";
+    if (updateThread != null) {
+      res += " (" + updateThread.getName() + ")";
+    }
+    return res;
+  }
+  
+  /**
+   * Executes the update operation immediately, irregardess if an update thread
+   * is running or not.
+   */
+  public void updateNow() throws IOException {
+    ensureOpen();
+    updateLock.lock();
+    try {
+      doUpdate();
+    } finally {
+      updateLock.unlock();
+    }
+  }
+
+  /** Sets the {@link InfoStream} to use for logging messages. */
+  public void setInfoStream(InfoStream infoStream) {
+    if (infoStream == null) {
+      infoStream = InfoStream.NO_OUTPUT;
+    }
+    this.infoStream = infoStream;
+  }
+  
+}
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/Replicator.java b/lucene/replicator/src/java/org/apache/lucene/replicator/Replicator.java
new file mode 100755
index 0000000..3e4d4e5
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/Replicator.java
@@ -0,0 +1,80 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * An interface for replicating files. Allows a producer to
+ * {@link #publish(Revision) publish} {@link Revision}s and consumers to
+ * {@link #checkForUpdate(String) check for updates}. When a client needs to be
+ * updated, it is given a {@link SessionToken} through which it can
+ * {@link #obtainFile(String, String, String) obtain} the files of that
+ * revision. After the client has finished obtaining all the files, it should
+ * {@link #release(String) release} the given session, so that the files can be
+ * reclaimed if they are not needed anymore.
+ * <p>
+ * A client is always updated to the newest revision available. That is, if a
+ * client is on revision <em>r1</em> and revisions <em>r2</em> and <em>r3</em>
+ * were published, then when the cllient will next check for update, it will
+ * receive <em>r3</em>.
+ * 
+ * @lucene.experimental
+ */
+public interface Replicator extends Closeable {
+  
+  /**
+   * Publish a new {@link Revision} for consumption by clients. It is the
+   * caller's responsibility to verify that the revision files exist and can be
+   * read by clients. When the revision is no longer needed, it will be
+   * {@link Revision#release() released} by the replicator.
+   */
+  public void publish(Revision revision) throws IOException;
+  
+  /**
+   * Check whether the given version is up-to-date and returns a
+   * {@link SessionToken} which can be used for fetching the revision files,
+   * otherwise returns {@code null}.
+   * <p>
+   * <b>NOTE:</b> when the returned session token is no longer needed, you
+   * should call {@link #release(String)} so that the session resources can be
+   * reclaimed, including the revision files.
+   */
+  public SessionToken checkForUpdate(String currVersion) throws IOException;
+  
+  /**
+   * Notify that the specified {@link SessionToken} is no longer needed by the
+   * caller.
+   */
+  public void release(String sessionID) throws IOException;
+  
+  /**
+   * Returns an {@link InputStream} for the requested file and source in the
+   * context of the given {@link SessionToken#id session}.
+   * <p>
+   * <b>NOTE:</b> it is the caller's responsibility to close the returned
+   * stream.
+   * 
+   * @throws SessionExpiredException if the specified session has already
+   *         expired
+   */
+  public InputStream obtainFile(String sessionID, String source, String fileName) throws IOException;
+  
+}
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/Revision.java b/lucene/replicator/src/java/org/apache/lucene/replicator/Revision.java
new file mode 100755
index 0000000..9df22fb
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/Revision.java
@@ -0,0 +1,75 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.lucene.store.IndexInput;
+
+/**
+ * A revision comprises lists of files that come from different sources and need
+ * to be replicated together to e.g. guarantee that all resources are in sync.
+ * In most cases an application will replicate a single index, and so the
+ * revision will contain files from a single source. However, some applications
+ * may require to treat a collection of indexes as a single entity so that the
+ * files from all sources are replicated together, to guarantee consistency
+ * beween them. For example, an application which indexes facets will need to
+ * replicate both the search and taxonomy indexes together, to guarantee that
+ * they match at the client side.
+ * 
+ * @lucene.experimental
+ */
+public interface Revision extends Comparable<Revision> {
+  
+  /**
+   * Compares the revision to the given version string. Behaves like
+   * {@link Comparable#compareTo(Object)}.
+   */
+  public int compareTo(String version);
+  
+  /**
+   * Returns a string representation of the version of this revision. The
+   * version is used by {@link #compareTo(String)} as well as to
+   * serialize/deserialize revision information. Therefore it must be self
+   * descriptive as well as be able to identify one revision from another.
+   */
+  public String getVersion();
+  
+  /**
+   * Returns the files that comprise this revision, as a mapping from a source
+   * to a list of files.
+   */
+  public Map<String,List<RevisionFile>> getSourceFiles();
+  
+  /**
+   * Returns an {@link IndexInput} for the given fileName and source. It is the
+   * caller's respnsibility to close the {@link IndexInput} when it has been
+   * consumed.
+   */
+  public InputStream open(String source, String fileName) throws IOException;
+  
+  /**
+   * Called when this revision can be safely released, i.e. where there are no
+   * more references to it.
+   */
+  public void release() throws IOException;
+  
+}
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/RevisionFile.java b/lucene/replicator/src/java/org/apache/lucene/replicator/RevisionFile.java
new file mode 100755
index 0000000..3fc8cff
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/RevisionFile.java
@@ -0,0 +1,59 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Describes a file in a {@link Revision}. A file has a source, which allows a
+ * single revision to contain files from multiple sources (e.g. multiple
+ * indexes).
+ * 
+ * @lucene.experimental
+ */
+public class RevisionFile {
+  
+  /** The name of the file. */
+  public final String fileName;
+  
+  /** The size of the file denoted by {@link #fileName}. */
+  public long size = -1;
+  
+  /** Constructor with the given file name. */
+  public RevisionFile(String fileName) {
+    if (fileName == null || fileName.isEmpty()) {
+      throw new IllegalArgumentException("fileName cannot be null or empty");
+    }
+    this.fileName = fileName;
+  }
+  
+  @Override
+  public boolean equals(Object obj) {
+    RevisionFile other = (RevisionFile) obj;
+    return fileName.equals(other.fileName) && size == other.size;
+  }
+  
+  @Override
+  public int hashCode() {
+    return fileName.hashCode() ^ (int) (size ^ (size >>> 32));
+  }
+  
+  @Override
+  public String toString() {
+    return "fileName=" + fileName + " size=" + size;
+  }
+  
+}
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/SessionExpiredException.java b/lucene/replicator/src/java/org/apache/lucene/replicator/SessionExpiredException.java
new file mode 100755
index 0000000..4b697c3
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/SessionExpiredException.java
@@ -0,0 +1,54 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+/**
+ * Exception indicating that a revision update session was expired due to lack
+ * of activity.
+ * 
+ * @see LocalReplicator#DEFAULT_SESSION_EXPIRATION_THRESHOLD
+ * @see LocalReplicator#setExpirationThreshold(long)
+ * 
+ * @lucene.experimental
+ */
+public class SessionExpiredException extends IOException {
+  
+  /**
+   * @see IOException#IOException(String, Throwable)
+   */
+  public SessionExpiredException(String message, Throwable cause) {
+    super(message, cause);
+  }
+  
+  /**
+   * @see IOException#IOException(String)
+   */
+  public SessionExpiredException(String message) {
+    super(message);
+  }
+  
+  /**
+   * @see IOException#IOException(Throwable)
+   */
+  public SessionExpiredException(Throwable cause) {
+    super(cause);
+  }
+  
+}
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/SessionToken.java b/lucene/replicator/src/java/org/apache/lucene/replicator/SessionToken.java
new file mode 100755
index 0000000..90b6e41
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/SessionToken.java
@@ -0,0 +1,108 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+/**
+ * Token for a replication session, for guaranteeing that source replicated
+ * files will be kept safe until the replication completes.
+ * 
+ * @see Replicator#checkForUpdate(String)
+ * @see Replicator#release(String)
+ * @see LocalReplicator#DEFAULT_SESSION_EXPIRATION_THRESHOLD
+ * 
+ * @lucene.experimental
+ */
+public final class SessionToken {
+  
+  /**
+   * ID of this session.
+   * Should be passed when releasing the session, thereby acknowledging the 
+   * {@link Replicator Replicator} that this session is no longer in use.
+   * @see Replicator#release(String)
+   */
+  public final String id;
+  
+  /**
+   * @see Revision#getVersion()
+   */
+  public final String version;
+  
+  /**
+   * @see Revision#getSourceFiles()
+   */
+  public final Map<String,List<RevisionFile>> sourceFiles;
+  
+  /** Constructor which deserializes from the given {@link DataInput}. */
+  public SessionToken(DataInput in) throws IOException {
+    this.id = in.readUTF();
+    this.version = in.readUTF();
+    this.sourceFiles = new HashMap<String,List<RevisionFile>>();
+    int numSources = in.readInt();
+    while (numSources > 0) {
+      String source = in.readUTF();
+      int numFiles = in.readInt();
+      List<RevisionFile> files = new ArrayList<RevisionFile>(numFiles);
+      for (int i = 0; i < numFiles; i++) {
+        String fileName = in.readUTF();
+        RevisionFile file = new RevisionFile(fileName);
+        file.size = in.readLong();
+        files.add(file);
+      }
+      this.sourceFiles.put(source, files);
+      --numSources;
+    }
+  }
+  
+  /** Constructor with the given id and revision. */
+  public SessionToken(String id, Revision revision) {
+    this.id = id;
+    this.version = revision.getVersion();
+    this.sourceFiles = revision.getSourceFiles();
+  }
+  
+  /** Serialize the token data for communication between server and client. */
+  public void serialize(DataOutput out) throws IOException {
+    out.writeUTF(id);
+    out.writeUTF(version);
+    out.writeInt(sourceFiles.size());
+    for (Entry<String,List<RevisionFile>> e : sourceFiles.entrySet()) {
+      out.writeUTF(e.getKey());
+      List<RevisionFile> files = e.getValue();
+      out.writeInt(files.size());
+      for (RevisionFile file : files) {
+        out.writeUTF(file.fileName);
+        out.writeLong(file.size);
+      }
+    }
+  }
+  
+  @Override
+  public String toString() {
+    return "id=" + id + " version=" + version + " files=" + sourceFiles;
+  }
+  
+}
\ No newline at end of file
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/http/HttpClientBase.java b/lucene/replicator/src/java/org/apache/lucene/replicator/http/HttpClientBase.java
new file mode 100755
index 0000000..ebe75ad
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/http/HttpClientBase.java
@@ -0,0 +1,297 @@
+package org.apache.lucene.replicator.http;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectInputStream;
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
+import java.util.concurrent.Callable;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpStatus;
+import org.apache.http.StatusLine;
+import org.apache.http.client.HttpClient;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.conn.ClientConnectionManager;
+import org.apache.http.impl.client.DefaultHttpClient;
+import org.apache.http.params.HttpConnectionParams;
+import org.apache.http.util.EntityUtils;
+import org.apache.lucene.store.AlreadyClosedException;
+
+/**
+ * Base class for Http clients.
+ * 
+ * @lucene.experimental
+ * */
+public abstract class HttpClientBase implements Closeable {
+  
+  /**
+   * Default connection timeout for this client, in milliseconds.
+   * 
+   * @see #setConnectionTimeout(int)
+   */
+  public static final int DEFAULT_CONNECTION_TIMEOUT = 1000;
+  
+  /**
+   * Default socket timeout for this client, in milliseconds.
+   * 
+   * @see #setSoTimeout(int)
+   */
+  public static final int DEFAULT_SO_TIMEOUT = 60000;
+  
+  // TODO compression?
+  
+  /** The URL stting to execute requests against. */
+  protected final String url;
+  
+  private volatile boolean closed = false;
+  
+  private final HttpClient httpc;
+  
+  /**
+   * @param conMgr connection manager to use for this http client.
+   *        <b>NOTE:</b>The provided {@link ClientConnectionManager} will not be
+   *        {@link ClientConnectionManager#shutdown()} by this class.
+   */
+  protected HttpClientBase(String host, int port, String path, ClientConnectionManager conMgr) {
+    url = normalizedURL(host, port, path);
+    httpc = new DefaultHttpClient(conMgr);
+    setConnectionTimeout(DEFAULT_CONNECTION_TIMEOUT);
+    setSoTimeout(DEFAULT_SO_TIMEOUT);
+  }
+  
+  /**
+   * Set the connection timeout for this client, in milliseconds. This setting
+   * is used to modify {@link HttpConnectionParams#setConnectionTimeout}.
+   * 
+   * @param timeout timeout to set, in millisecopnds
+   */
+  public void setConnectionTimeout(int timeout) {
+    HttpConnectionParams.setConnectionTimeout(httpc.getParams(), timeout);
+  }
+  
+  /**
+   * Set the socket timeout for this client, in milliseconds. This setting
+   * is used to modify {@link HttpConnectionParams#setSoTimeout}.
+   * 
+   * @param timeout timeout to set, in millisecopnds
+   */
+  public void setSoTimeout(int timeout) {
+    HttpConnectionParams.setSoTimeout(httpc.getParams(), timeout);
+  }
+  
+  /** Throws {@link AlreadyClosedException} if this client is already closed. */
+  protected final void ensureOpen() throws AlreadyClosedException {
+    if (closed) {
+      throw new AlreadyClosedException("HttpClient already closed");
+    }
+  }
+  
+  /**
+   * Create a URL out of the given parameters, translate an empty/null path to '/'
+   */
+  private static String normalizedURL(String host, int port, String path) {
+    if (path == null || path.length() == 0) {
+      path = "/";
+    }
+    return "http://" + host + ":" + port + path;
+  }
+  
+  /**
+   * <b>Internal:</b> response status after invocation, and in case or error attempt to read the 
+   * exception sent by the server. 
+   */
+  protected void verifyStatus(HttpResponse response) throws IOException {
+    StatusLine statusLine = response.getStatusLine();
+    if (statusLine.getStatusCode() != HttpStatus.SC_OK) {
+      throwKnownError(response, statusLine); 
+    }
+  }
+  
+  protected void throwKnownError(HttpResponse response, StatusLine statusLine) throws IOException {
+    ObjectInputStream in = null;
+    try {
+      in = new ObjectInputStream(response.getEntity().getContent());
+    } catch (Exception e) {
+      // the response stream is not an exception - could be an error in servlet.init().
+      throw new RuntimeException("Uknown error: " + statusLine);
+    }
+    
+    Throwable t;
+    try {
+      t = (Throwable) in.readObject();
+    } catch (Exception e) { 
+      //not likely
+      throw new RuntimeException("Failed to read exception object: " + statusLine, e);
+    } finally {
+      in.close();
+    }
+    if (t instanceof IOException) {
+      throw (IOException) t;
+    }
+    if (t instanceof RuntimeException) {
+      throw (RuntimeException) t;
+    }
+    throw new RuntimeException("unknown exception "+statusLine,t);
+  }
+  
+  /**
+   * <b>internal:</b> execute a request and return its result
+   * The <code>params</code> argument is treated as: name1,value1,name2,value2,...
+   */
+  protected HttpResponse executePOST(String request, HttpEntity entity, String... params) throws IOException {
+    ensureOpen();
+    HttpPost m = new HttpPost(queryString(request, params));
+    m.setEntity(entity);
+    HttpResponse response = httpc.execute(m);
+    verifyStatus(response);
+    return response;
+  }
+  
+  /**
+   * <b>internal:</b> execute a request and return its result
+   * The <code>params</code> argument is treated as: name1,value1,name2,value2,...
+   */
+  protected HttpResponse executeGET(String request, String... params) throws IOException {
+    ensureOpen();
+    HttpGet m = new HttpGet(queryString(request, params));
+    HttpResponse response = httpc.execute(m);
+    verifyStatus(response);
+    return response;
+  }
+  
+  private String queryString(String request, String... params) throws UnsupportedEncodingException {
+    StringBuilder query = new StringBuilder(url).append('/').append(request).append('?');
+    if (params != null) {
+      for (int i = 0; i < params.length; i += 2) {
+        query.append(params[i]).append('=').append(URLEncoder.encode(params[i+1], "UTF8")).append('&');
+      }
+    }
+    return query.substring(0, query.length() - 1);
+  }
+  
+  /** Internal utility: input stream of the provided response */
+  public InputStream responseInputStream(HttpResponse response) throws IOException {
+    return responseInputStream(response, false);
+  }
+  
+  // TODO: can we simplify this Consuming !?!?!?
+  /**
+   * Internal utility: input stream of the provided response, which optionally 
+   * consumes the response's resources when the input stream is exhausted.
+   */
+  public InputStream responseInputStream(HttpResponse response, boolean consume) throws IOException {
+    final HttpEntity entity = response.getEntity();
+    final InputStream in = entity.getContent();
+    if (!consume) {
+      return in;
+    }
+    return new InputStream() {
+      private boolean consumed = false;
+      @Override
+      public int read() throws IOException {
+        final int res = in.read();
+        consume(res);
+        return res;
+      }
+      @Override
+      public void close() throws IOException {
+        super.close();
+        consume(-1);
+      }
+      @Override
+      public int read(byte[] b) throws IOException {
+        final int res = super.read(b);
+        consume(res);
+        return res;
+      }
+      @Override
+      public int read(byte[] b, int off, int len) throws IOException {
+        final int res = super.read(b, off, len);
+        consume(res);
+        return res;
+      }
+      private void consume(int minusOne) {
+        if (!consumed && minusOne==-1) {
+          try {
+            EntityUtils.consume(entity);
+          } catch (Exception e) {
+            // ignored on purpose
+          }
+          consumed = true;
+        }
+      }
+    };
+  }
+  
+  /**
+   * Returns true iff this instance was {@link #close() closed}, otherwise
+   * returns false. Note that if you override {@link #close()}, you must call
+   * {@code super.close()}, in order for this instance to be properly closed.
+   */
+  protected final boolean isClosed() {
+    return closed;
+  }
+  
+  /**
+   * Same as {@link #doAction(HttpResponse, boolean, Callable)} but always do consume at the end.
+   */
+  protected <T> T doAction(HttpResponse response, Callable<T> call) throws IOException {
+    return doAction(response, true, call);
+  }
+  
+  /**
+   * Do a specific action and validate after the action that the status is still OK, 
+   * and if not, attempt to extract the actual server side exception. Optionally
+   * release the response at exit, depending on <code>consume</code> parameter.
+   */
+  protected <T> T doAction(HttpResponse response, boolean consume, Callable<T> call) throws IOException {
+    IOException error = null;
+    try {
+      return call.call();
+    } catch (IOException e) {
+      error = e;
+    } catch (Exception e) {
+      error = new IOException(e);
+    } finally {
+      try {
+        verifyStatus(response);
+      } finally {
+        if (consume) {
+          try {
+            EntityUtils.consume(response.getEntity());
+          } catch (Exception e) {
+            // ignoring on purpose
+          }
+        }
+      }
+    }
+    throw error; // should not get here
+  }
+  
+  @Override
+  public void close() throws IOException {
+    closed = true;
+  }
+  
+}
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/http/HttpReplicator.java b/lucene/replicator/src/java/org/apache/lucene/replicator/http/HttpReplicator.java
new file mode 100755
index 0000000..7df9697
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/http/HttpReplicator.java
@@ -0,0 +1,105 @@
+package org.apache.lucene.replicator.http;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.concurrent.Callable;
+
+import org.apache.http.HttpResponse;
+import org.apache.http.conn.ClientConnectionManager;
+import org.apache.lucene.replicator.Replicator;
+import org.apache.lucene.replicator.Revision;
+import org.apache.lucene.replicator.SessionToken;
+import org.apache.lucene.replicator.http.ReplicationService.ReplicationAction;
+
+/**
+ * An HTTP implementation of {@link Replicator}. Assumes the API supported by
+ * {@link ReplicationService}.
+ * 
+ * @lucene.experimental
+ */
+public class HttpReplicator extends HttpClientBase implements Replicator {
+  
+  /** Construct with specified connection manager. */
+  public HttpReplicator(String host, int port, String path, ClientConnectionManager conMgr) {
+    super(host, port, path, conMgr);
+  }
+  
+  @Override
+  public SessionToken checkForUpdate(String currVersion) throws IOException {
+    String[] params = null;
+    if (currVersion != null) {
+      params = new String[] { ReplicationService.REPLICATE_VERSION_PARAM, currVersion };
+    }
+    final HttpResponse response = executeGET(ReplicationAction.UPDATE.name(), params);
+    return doAction(response, new Callable<SessionToken>() {
+      @Override
+      public SessionToken call() throws Exception {
+        final DataInputStream dis = new DataInputStream(responseInputStream(response));
+        try {
+          if (dis.readByte() == 0) {
+            return null;
+          } else {
+            return new SessionToken(dis);
+          }
+        } finally {
+          dis.close();
+        }
+      }
+    });
+  }
+  
+  @Override
+  public InputStream obtainFile(String sessionID, String source, String fileName) throws IOException {
+    String[] params = new String[] {
+        ReplicationService.REPLICATE_SESSION_ID_PARAM, sessionID,
+        ReplicationService.REPLICATE_SOURCE_PARAM, source,
+        ReplicationService.REPLICATE_FILENAME_PARAM, fileName,
+    };
+    final HttpResponse response = executeGET(ReplicationAction.OBTAIN.name(), params);
+    return doAction(response, false, new Callable<InputStream>() {
+      @Override
+      public InputStream call() throws Exception {
+        return responseInputStream(response,true);
+      }
+    });
+  }
+  
+  @Override
+  public void publish(Revision revision) throws IOException {
+    throw new UnsupportedOperationException(
+        "this replicator implementation does not support remote publishing of revisions");
+  }
+  
+  @Override
+  public void release(String sessionID) throws IOException {
+    String[] params = new String[] {
+        ReplicationService.REPLICATE_SESSION_ID_PARAM, sessionID
+    };
+    final HttpResponse response = executeGET(ReplicationAction.RELEASE.name(), params);
+    doAction(response, new Callable<Object>() {
+      @Override
+      public Object call() throws Exception {
+        return null; // do not remove this call: as it is still validating for us!
+      }
+    });
+  }
+  
+}
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/http/ReplicationService.java b/lucene/replicator/src/java/org/apache/lucene/replicator/http/ReplicationService.java
new file mode 100755
index 0000000..4f62490
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/http/ReplicationService.java
@@ -0,0 +1,198 @@
+package org.apache.lucene.replicator.http;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectOutputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.Locale;
+import java.util.Map;
+import java.util.StringTokenizer;
+
+import javax.servlet.ServletException;
+import javax.servlet.ServletOutputStream;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.http.HttpStatus;
+import org.apache.lucene.replicator.Replicator;
+import org.apache.lucene.replicator.SessionToken;
+
+/**
+ * A server-side service for handling replication requests. The service assumes
+ * requests are sent in the format
+ * <code>/&lt;context&gt;/&lt;shard&gt;/&lt;action&gt;</code> where
+ * <ul>
+ * <li>{@code context} is the servlet context, e.g. {@link #REPLICATION_CONTEXT}
+ * <li>{@code shard} is the ID of the shard, e.g. "s1"
+ * <li>{@code action} is one of {@link ReplicationAction} values
+ * </ul>
+ * For example, to check whether there are revision updates for shard "s1" you
+ * should send the request: <code>http://host:port/replicate/s1/update</code>.
+ * <p>
+ * This service is written like a servlet, and
+ * {@link #perform(HttpServletRequest, HttpServletResponse)} takes servlet
+ * request and response accordingly, so it is quite easy to embed in your
+ * application's servlet.
+ * 
+ * @lucene.experimental
+ */
+public class ReplicationService {
+  
+  /** Actions supported by the {@link ReplicationService}. */
+  public enum ReplicationAction {
+    OBTAIN, RELEASE, UPDATE
+  }
+  
+  /** The context path for the servlet. */
+  public static final String REPLICATION_CONTEXT = "/replicate";
+  
+  /** Request parameter name for providing the revision version. */
+  public final static String REPLICATE_VERSION_PARAM = "version";
+  
+  /** Request parameter name for providing a session ID. */
+  public final static String REPLICATE_SESSION_ID_PARAM = "sessionid";
+  
+  /** Request parameter name for providing the file's source. */
+  public final static String REPLICATE_SOURCE_PARAM = "source";
+  
+  /** Request parameter name for providing the file's name. */
+  public final static String REPLICATE_FILENAME_PARAM = "filename";
+  
+  private static final int SHARD_IDX = 0, ACTION_IDX = 1;
+  
+  private final Map<String,Replicator> replicators;
+  
+  public ReplicationService(Map<String,Replicator> replicators) {
+    super();
+    this.replicators = replicators;
+  }
+  
+  /**
+   * Returns the path elements that were given in the servlet request, excluding
+   * the servlet's action context.
+   */
+  private String[] getPathElements(HttpServletRequest req) {
+    String path = req.getServletPath();
+    String pathInfo = req.getPathInfo();
+    if (pathInfo != null) {
+      path += pathInfo;
+    }
+    int actionLen = REPLICATION_CONTEXT.length();
+    int startIdx = actionLen;
+    if (path.length() > actionLen && path.charAt(actionLen) == '/') {
+      ++startIdx;
+    }
+    
+    // split the string on '/' and remove any empty elements. This is better
+    // than using String.split() since the latter may return empty elements in
+    // the array
+    StringTokenizer stok = new StringTokenizer(path.substring(startIdx), "/");
+    ArrayList<String> elements = new ArrayList<String>();
+    while (stok.hasMoreTokens()) {
+      elements.add(stok.nextToken());
+    }
+    return elements.toArray(new String[0]);
+  }
+  
+  private static String extractRequestParam(HttpServletRequest req, String paramName) throws ServletException {
+    String param = req.getParameter(paramName);
+    if (param == null) {
+      throw new ServletException("Missing mandatory parameter: " + paramName);
+    }
+    return param;
+  }
+  
+  private static void copy(InputStream in, OutputStream out) throws IOException {
+    byte[] buf = new byte[16384];
+    int numRead;
+    while ((numRead = in.read(buf)) != -1) {
+      out.write(buf, 0, numRead);
+    }
+  }
+  
+  /** Executes the replication task. */
+  public void perform(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+    String[] pathElements = getPathElements(req);
+    
+    if (pathElements.length != 2) {
+      throw new ServletException("invalid path, must contain shard ID and action, e.g. */s1/update");
+    }
+    
+    final ReplicationAction action;
+    try {
+      action = ReplicationAction.valueOf(pathElements[ACTION_IDX].toUpperCase(Locale.ENGLISH));
+    } catch (IllegalArgumentException e) {
+      throw new ServletException("Unsupported action provided: " + pathElements[ACTION_IDX]);
+    }
+    
+    final Replicator replicator = replicators.get(pathElements[SHARD_IDX]);
+    if (replicator == null) {
+      throw new ServletException("unrecognized shard ID " + pathElements[SHARD_IDX]);
+    }
+    
+    ServletOutputStream resOut = resp.getOutputStream();
+    try {
+      switch (action) {
+        case OBTAIN:
+          final String sessionID = extractRequestParam(req, REPLICATE_SESSION_ID_PARAM);
+          final String fileName = extractRequestParam(req, REPLICATE_FILENAME_PARAM);
+          final String source = extractRequestParam(req, REPLICATE_SOURCE_PARAM);
+          InputStream in = replicator.obtainFile(sessionID, source, fileName);
+          try {
+            copy(in, resOut);
+          } finally {
+            in.close();
+          }
+          break;
+        case RELEASE:
+          replicator.release(extractRequestParam(req, REPLICATE_SESSION_ID_PARAM));
+          break;
+        case UPDATE:
+          String currVersion = req.getParameter(REPLICATE_VERSION_PARAM);
+          SessionToken token = replicator.checkForUpdate(currVersion);
+          if (token == null) {
+            resOut.write(0); // marker for null token
+          } else {
+            resOut.write(1); // marker for null token
+            token.serialize(new DataOutputStream(resOut));
+          }
+          break;
+      }
+    } catch (Exception e) {
+      resp.setStatus(HttpStatus.SC_INTERNAL_SERVER_ERROR); // propagate the failure
+      try {
+        /*
+         * Note: it is assumed that "identified exceptions" are thrown before
+         * anything was written to the stream.
+         */
+        ObjectOutputStream oos = new ObjectOutputStream(resOut);
+        oos.writeObject(e);
+        oos.flush();
+      } catch (Exception e2) {
+        throw new IOException("Could not serialize", e2);
+      }
+    } finally {
+      resp.flushBuffer();
+    }
+  }
+  
+}
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/http/package.html b/lucene/replicator/src/java/org/apache/lucene/replicator/http/package.html
new file mode 100755
index 0000000..fce050b
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/http/package.html
@@ -0,0 +1,28 @@
+<html>
+
+<!-- 
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<head>
+<title>HTTP replication implementation</title>
+</head>
+
+<body>
+<h1>HTTP replication implementation</h1>
+</body>
+
+</html>
\ No newline at end of file
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/package.html b/lucene/replicator/src/java/org/apache/lucene/replicator/package.html
new file mode 100755
index 0000000..63b6284
--- /dev/null
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/package.html
@@ -0,0 +1,79 @@
+<html>
+
+<!-- 
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<head>
+<title>Files replication framework</title>
+</head>
+
+<body>
+<h1>Files replication framework</h1>
+
+	The
+	<a href="Replicator.html">Replicator</a> allows replicating files between a server and client(s). Producers publish
+	<a href="Revision.html">revisions</a> and consumers update to the latest revision available.
+	<a href="ReplicationClient.html">ReplicationClient</a> is a helper utility for performing the update operation. It can
+	be invoked either
+	<a href="ReplicationClient.html#updateNow()">manually</a> or periodically by
+	<a href="ReplicationClient.html#startUpdateThread(long, java.lang.String)">starting an update thread</a>.
+	<a href="http/HttpReplicator.html">HttpReplicator</a> can be used to replicate revisions by consumers that reside on
+	a different node than the producer.
+
+	<p />
+	The replication framework supports replicating any type of files, with built-in support for a single search index as
+	well as an index and taxonomy pair. For a single index, the application should publish an
+	<a href="IndexRevision.html">IndexRevision</a> and set
+	<a href="IndexReplicationHandler.html">IndexReplicationHandler</a> on the client. For an index and taxonomy pair, the
+	application should publish an <a href="IndexAndTaxonomyRevision.html">IndexAndTaxonomyRevision</a> and set 
+	<a href="IndexAndTaxonomyReplicationHandler.html">IndexAndTaxonomyReplicationHandler</a> on the client.
+
+	<p />
+	When the replication client detects that there is a newer revision available, it copies the files of the revision and
+	then invokes the handler to complete the operation (e.g. copy the files to the index directory, fsync them, reopen an
+	index reader etc.). By default, only files that do not exist in the handler's
+	<a href="ReplicationClient.ReplicationHandler.html#currentRevisionFiles()">current revision files</a> are copied,
+	however this can be overridden by extending the client.
+
+	<p />
+	An example usage of the Replicator:
+	
+<pre class="prettyprint lang-java">
+// ++++++++++++++ SERVER SIDE ++++++++++++++ // 
+IndexWriter publishWriter; // the writer used for indexing
+Replicator replicator = new LocalReplicator();
+replicator.publish(new IndexRevision(publishWriter));
+
+// ++++++++++++++ CLIENT SIDE ++++++++++++++ // 
+// either LocalReplictor, or HttpReplicator if client and server are on different nodes
+Replicator replicator;
+
+// callback invoked after handler finished handling the revision and e.g. can reopen the reader.
+Callable&lt;Boolean&gt; callback = null; // can also be null if no callback is needed
+ReplicationHandler handler = new IndexReplicationHandler(indexDir, callback);
+SourceDirectoryFactory factory = new PerSessionDirectoryFactory(workDir);
+ReplicationClient client = new ReplicationClient(replicator, handler, factory);
+
+// invoke client manually
+client.updateNow();
+
+// or, periodically
+client.startUpdateThread(100); // check for update every 100 milliseconds
+</pre>
+
+</body>
+</html>
\ No newline at end of file
diff --git a/lucene/replicator/src/java/overview.html b/lucene/replicator/src/java/overview.html
new file mode 100755
index 0000000..e20f482
--- /dev/null
+++ b/lucene/replicator/src/java/overview.html
@@ -0,0 +1,26 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<html>
+  <head>
+    <title>
+      replicator
+    </title>
+  </head>
+  <body>
+  Provides index files replication capabilities.
+  </body>
+</html>
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyReplicationClientTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyReplicationClientTest.java
new file mode 100755
index 0000000..587f7c6
--- /dev/null
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyReplicationClientTest.java
@@ -0,0 +1,444 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.facet.index.FacetFields;
+import org.apache.lucene.facet.params.FacetIndexingParams;
+import org.apache.lucene.facet.params.FacetSearchParams;
+import org.apache.lucene.facet.search.CountFacetRequest;
+import org.apache.lucene.facet.search.DrillDownQuery;
+import org.apache.lucene.facet.search.FacetsCollector;
+import org.apache.lucene.facet.taxonomy.CategoryPath;
+import org.apache.lucene.facet.taxonomy.TaxonomyReader;
+import org.apache.lucene.facet.taxonomy.TaxonomyWriter;
+import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
+import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.SnapshotDeletionPolicy;
+import org.apache.lucene.replicator.IndexAndTaxonomyRevision.SnapshotDirectoryTaxonomyWriter;
+import org.apache.lucene.replicator.ReplicationClient.ReplicationHandler;
+import org.apache.lucene.replicator.ReplicationClient.SourceDirectoryFactory;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.ThreadInterruptedException;
+import org.apache.lucene.util._TestUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class IndexAndTaxonomyReplicationClientTest extends ReplicatorTestCase {
+  
+  private static class IndexAndTaxonomyReadyCallback implements Callable<Boolean>, Closeable {
+    
+    private final Directory indexDir, taxoDir;
+    private DirectoryReader indexReader;
+    private DirectoryTaxonomyReader taxoReader;
+    private long lastIndexGeneration = -1;
+    
+    public IndexAndTaxonomyReadyCallback(Directory indexDir, Directory taxoDir) throws IOException {
+      this.indexDir = indexDir;
+      this.taxoDir = taxoDir;
+      if (DirectoryReader.indexExists(indexDir)) {
+        indexReader = DirectoryReader.open(indexDir);
+        lastIndexGeneration = indexReader.getIndexCommit().getGeneration();
+        taxoReader = new DirectoryTaxonomyReader(taxoDir);
+      }
+    }
+    
+    @Override
+    public Boolean call() throws Exception {
+      if (indexReader == null) {
+        indexReader = DirectoryReader.open(indexDir);
+        lastIndexGeneration = indexReader.getIndexCommit().getGeneration();
+        taxoReader = new DirectoryTaxonomyReader(taxoDir);
+      } else {
+        // verify search index
+        DirectoryReader newReader = DirectoryReader.openIfChanged(indexReader);
+        assertNotNull("should not have reached here if no changes were made to the index", newReader);
+        long newGeneration = newReader.getIndexCommit().getGeneration();
+        assertTrue("expected newer generation; current=" + lastIndexGeneration + " new=" + newGeneration, newGeneration > lastIndexGeneration);
+        indexReader.close();
+        indexReader = newReader;
+        lastIndexGeneration = newGeneration;
+        _TestUtil.checkIndex(indexDir);
+        
+        // verify taxonomy index
+        DirectoryTaxonomyReader newTaxoReader = TaxonomyReader.openIfChanged(taxoReader);
+        if (newTaxoReader != null) {
+          taxoReader.close();
+          taxoReader = newTaxoReader;
+        }
+        _TestUtil.checkIndex(taxoDir);
+        
+        // verify faceted search
+        int id = Integer.parseInt(indexReader.getIndexCommit().getUserData().get(VERSION_ID), 16);
+        CategoryPath cp = new CategoryPath("A", Integer.toString(id, 16));
+        IndexSearcher searcher = new IndexSearcher(indexReader);
+        FacetsCollector fc = FacetsCollector.create(new FacetSearchParams(new CountFacetRequest(cp, 10)), indexReader, taxoReader);
+        searcher.search(new MatchAllDocsQuery(), fc);
+        assertEquals(1, (int) fc.getFacetResults().get(0).getFacetResultNode().value);
+        
+        DrillDownQuery drillDown = new DrillDownQuery(FacetIndexingParams.DEFAULT);
+        drillDown.add(cp);
+        TopDocs docs = searcher.search(drillDown, 10);
+        assertEquals(1, docs.totalHits);
+      }
+      return null;
+    }
+    
+    @Override
+    public void close() throws IOException {
+      IOUtils.close(indexReader, taxoReader);
+    }
+  }
+  
+  private Directory publishIndexDir, publishTaxoDir;
+  private MockDirectoryWrapper handlerIndexDir, handlerTaxoDir;
+  private Replicator replicator;
+  private SourceDirectoryFactory sourceDirFactory;
+  private ReplicationClient client;
+  private ReplicationHandler handler;
+  private IndexWriter publishIndexWriter;
+  private SnapshotDirectoryTaxonomyWriter publishTaxoWriter;
+  private IndexAndTaxonomyReadyCallback callback;
+  private File clientWorkDir;
+  
+  private static final String VERSION_ID = "version";
+  
+  private void assertHandlerRevision(int expectedID, Directory dir) throws IOException {
+    // loop as long as client is alive. test-framework will terminate us if
+    // there's a serious bug, e.g. client doesn't really update. otherwise,
+    // introducing timeouts is not good, can easily lead to false positives.
+    while (client.isUpdateThreadAlive()) {
+      // give client a chance to update
+      try {
+        Thread.sleep(100);
+      } catch (InterruptedException e) {
+        throw new ThreadInterruptedException(e);
+      }
+      
+      try {
+        DirectoryReader reader = DirectoryReader.open(dir);
+        try {
+          int handlerID = Integer.parseInt(reader.getIndexCommit().getUserData().get(VERSION_ID), 16);
+          if (expectedID == handlerID) {
+            return;
+          }
+        } finally {
+          reader.close();
+        }
+      } catch (Exception e) {
+        // we can hit IndexNotFoundException or e.g. EOFException (on
+        // segments_N) because it is being copied at the same time it is read by
+        // DirectoryReader.open().
+      }
+    }
+  }
+  
+  private Revision createRevision(final int id) throws IOException {
+    publishIndexWriter.addDocument(newDocument(publishTaxoWriter, id));
+    publishIndexWriter.setCommitData(new HashMap<String, String>() {{
+      put(VERSION_ID, Integer.toString(id, 16));
+    }});
+    publishIndexWriter.commit();
+    publishTaxoWriter.commit();
+    return new IndexAndTaxonomyRevision(publishIndexWriter, publishTaxoWriter);
+  }
+  
+  private Document newDocument(TaxonomyWriter taxoWriter, int id) throws IOException {
+    Document doc = new Document();
+    FacetFields facetFields = new FacetFields(taxoWriter);
+    facetFields.addFields(doc, Collections.singleton(new CategoryPath("A", Integer.toString(id, 16))));
+    return doc;
+  }
+  
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+    publishIndexDir = newDirectory();
+    publishTaxoDir = newDirectory();
+    handlerIndexDir = newMockDirectory();
+    handlerTaxoDir = newMockDirectory();
+    clientWorkDir = _TestUtil.getTempDir("replicationClientTest");
+    sourceDirFactory = new PerSessionDirectoryFactory(clientWorkDir);
+    replicator = new LocalReplicator();
+    callback = new IndexAndTaxonomyReadyCallback(handlerIndexDir, handlerTaxoDir);
+    handler = new IndexAndTaxonomyReplicationHandler(handlerIndexDir, handlerTaxoDir, callback);
+    client = new ReplicationClient(replicator, handler, sourceDirFactory);
+    
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
+    publishIndexWriter = new IndexWriter(publishIndexDir, conf);
+    publishTaxoWriter = new SnapshotDirectoryTaxonomyWriter(publishTaxoDir);
+  }
+  
+  @After
+  @Override
+  public void tearDown() throws Exception {
+    IOUtils.close(client, callback, publishIndexWriter, publishTaxoWriter, replicator, publishIndexDir, publishTaxoDir,
+        handlerIndexDir, handlerTaxoDir);
+    super.tearDown();
+  }
+  
+  @Test
+  public void testNoUpdateThread() throws Exception {
+    assertNull("no version expected at start", handler.currentVersion());
+    
+    // Callback validates the replicated index
+    replicator.publish(createRevision(1));
+    client.updateNow();
+    
+    // make sure updating twice, when in fact there's nothing to update, works
+    client.updateNow();
+    
+    replicator.publish(createRevision(2));
+    client.updateNow();
+    
+    // Publish two revisions without update, handler should be upgraded to latest
+    replicator.publish(createRevision(3));
+    replicator.publish(createRevision(4));
+    client.updateNow();
+  }
+  
+  @Test
+  public void testRestart() throws Exception {
+    replicator.publish(createRevision(1));
+    client.updateNow();
+    
+    replicator.publish(createRevision(2));
+    client.updateNow();
+    
+    client.stopUpdateThread();
+    client.close();
+    client = new ReplicationClient(replicator, handler, sourceDirFactory);
+    
+    // Publish two revisions without update, handler should be upgraded to latest
+    replicator.publish(createRevision(3));
+    replicator.publish(createRevision(4));
+    client.updateNow();
+  }
+  
+  @Test
+  public void testUpdateThread() throws Exception {
+    client.startUpdateThread(10, "indexTaxo");
+    
+    replicator.publish(createRevision(1));
+    assertHandlerRevision(1, handlerIndexDir);
+    
+    replicator.publish(createRevision(2));
+    assertHandlerRevision(2, handlerIndexDir);
+    
+    // Publish two revisions without update, handler should be upgraded to latest
+    replicator.publish(createRevision(3));
+    replicator.publish(createRevision(4));
+    assertHandlerRevision(4, handlerIndexDir);
+  }
+  
+  @Test
+  public void testRecreateTaxonomy() throws Exception {
+    replicator.publish(createRevision(1));
+    client.updateNow();
+    
+    // recreate index and taxonomy
+    Directory newTaxo = newDirectory();
+    new DirectoryTaxonomyWriter(newTaxo).close();
+    publishTaxoWriter.replaceTaxonomy(newTaxo);
+    publishIndexWriter.deleteAll();
+    replicator.publish(createRevision(2));
+    
+    client.updateNow();
+    newTaxo.close();
+  }
+
+  /*
+   * This test verifies that the client and handler do not end up in a corrupt
+   * index if exceptions are thrown at any point during replication. Either when
+   * a client copies files from the server to the temporary space, or when the
+   * handler copies them to the index directory.
+   */
+  @Test
+  public void testConsistencyOnExceptions() throws Exception {
+    // so the handler's index isn't empty
+    replicator.publish(createRevision(1));
+    client.updateNow();
+    client.close();
+    callback.close();
+
+    // Replicator violates write-once policy. It may be that the
+    // handler copies files to the index dir, then fails to copy a
+    // file and reverts the copy operation. On the next attempt, it
+    // will copy the same file again. There is nothing wrong with this
+    // in a real system, but it does violate write-once, and MDW
+    // doesn't like it. Disabling it means that we won't catch cases
+    // where the handler overwrites an existing index file, but
+    // there's nothing currently we can do about it, unless we don't
+    // use MDW.
+    handlerIndexDir.setPreventDoubleWrite(false);
+    handlerTaxoDir.setPreventDoubleWrite(false);
+
+    // wrap sourceDirFactory to return a MockDirWrapper so we can simulate errors
+    final SourceDirectoryFactory in = sourceDirFactory;
+    final AtomicInteger failures = new AtomicInteger(atLeast(10));
+    sourceDirFactory = new SourceDirectoryFactory() {
+      
+      private long clientMaxSize = 100, handlerIndexMaxSize = 100, handlerTaxoMaxSize = 100;
+      private double clientExRate = 1.0, handlerIndexExRate = 1.0, handlerTaxoExRate = 1.0;
+      
+      @Override
+      public void cleanupSession(String sessionID) throws IOException {
+        in.cleanupSession(sessionID);
+      }
+      
+      @SuppressWarnings("synthetic-access")
+      @Override
+      public Directory getDirectory(String sessionID, String source) throws IOException {
+        Directory dir = in.getDirectory(sessionID, source);
+        if (random().nextBoolean() && failures.get() > 0) { // client should fail, return wrapped dir
+          MockDirectoryWrapper mdw = new MockDirectoryWrapper(random(), dir);
+          mdw.setRandomIOExceptionRateOnOpen(clientExRate);
+          mdw.setMaxSizeInBytes(clientMaxSize);
+          mdw.setRandomIOExceptionRate(clientExRate);
+          mdw.setCheckIndexOnClose(false);
+          clientMaxSize *= 2;
+          clientExRate /= 2;
+          return mdw;
+        }
+        
+        if (failures.get() > 0 && random().nextBoolean()) { // handler should fail
+          if (random().nextBoolean()) { // index dir fail
+            handlerIndexDir.setMaxSizeInBytes(handlerIndexMaxSize);
+            handlerIndexDir.setRandomIOExceptionRate(handlerIndexExRate);
+            handlerIndexDir.setRandomIOExceptionRateOnOpen(handlerIndexExRate);
+            handlerIndexMaxSize *= 2;
+            handlerIndexExRate /= 2;
+          } else { // taxo dir fail
+            handlerTaxoDir.setMaxSizeInBytes(handlerTaxoMaxSize);
+            handlerTaxoDir.setRandomIOExceptionRate(handlerTaxoExRate);
+            handlerTaxoDir.setRandomIOExceptionRateOnOpen(handlerTaxoExRate);
+            handlerTaxoDir.setCheckIndexOnClose(false);
+            handlerTaxoMaxSize *= 2;
+            handlerTaxoExRate /= 2;
+          }
+        } else {
+          // disable all errors
+          handlerIndexDir.setMaxSizeInBytes(0);
+          handlerIndexDir.setRandomIOExceptionRate(0.0);
+          handlerIndexDir.setRandomIOExceptionRateOnOpen(0.0);
+          handlerTaxoDir.setMaxSizeInBytes(0);
+          handlerTaxoDir.setRandomIOExceptionRate(0.0);
+          handlerTaxoDir.setRandomIOExceptionRateOnOpen(0.0);
+        }
+
+        return dir;
+      }
+    };
+    
+    handler = new IndexAndTaxonomyReplicationHandler(handlerIndexDir, handlerTaxoDir, new Callable<Boolean>() {
+      @Override
+      public Boolean call() throws Exception {
+        if (random().nextDouble() < 0.2 && failures.get() > 0) {
+          throw new RuntimeException("random exception from callback");
+        }
+        return null;
+      }
+    });
+
+    // wrap handleUpdateException so we can act on the thrown exception
+    client = new ReplicationClient(replicator, handler, sourceDirFactory) {
+      @SuppressWarnings("synthetic-access")
+      @Override
+      protected void handleUpdateException(Throwable t) {
+        if (t instanceof IOException) {
+          try {
+            if (VERBOSE) {
+              System.out.println("hit exception during update: " + t);
+              t.printStackTrace(System.out);
+            }
+
+            // test that the index can be read and also some basic statistics
+            DirectoryReader reader = DirectoryReader.open(handlerIndexDir.getDelegate());
+            try {
+              int numDocs = reader.numDocs();
+              int version = Integer.parseInt(reader.getIndexCommit().getUserData().get(VERSION_ID), 16);
+              assertEquals(numDocs, version);
+            } finally {
+              reader.close();
+            }
+            // verify index is fully consistent
+            _TestUtil.checkIndex(handlerIndexDir.getDelegate());
+            
+            // verify taxonomy index is fully consistent (since we only add one
+            // category to all documents, there's nothing much more to validate
+            _TestUtil.checkIndex(handlerTaxoDir.getDelegate());
+          } catch (IOException e) {
+            throw new RuntimeException(e);
+          } finally {
+            // count-down number of failures
+            failures.decrementAndGet();
+            assert failures.get() >= 0 : "handler failed too many times: " + failures.get();
+            if (VERBOSE) {
+              if (failures.get() == 0) {
+                System.out.println("no more failures expected");
+              } else {
+                System.out.println("num failures left: " + failures.get());
+              }
+            }
+          }
+        } else {
+          if (t instanceof RuntimeException) throw (RuntimeException) t;
+          throw new RuntimeException(t);
+        }
+      }
+    };
+    
+    client.startUpdateThread(10, "indexAndTaxo");
+    
+    final Directory baseHandlerIndexDir = handlerIndexDir.getDelegate();
+    int numRevisions = atLeast(20) + 2;
+    for (int i = 2; i < numRevisions; i++) {
+      replicator.publish(createRevision(i));
+      assertHandlerRevision(i, baseHandlerIndexDir);
+    }
+
+    // disable errors -- maybe randomness didn't exhaust all allowed failures,
+    // and we don't want e.g. CheckIndex to hit false errors. 
+    handlerIndexDir.setMaxSizeInBytes(0);
+    handlerIndexDir.setRandomIOExceptionRate(0.0);
+    handlerIndexDir.setRandomIOExceptionRateOnOpen(0.0);
+    handlerTaxoDir.setMaxSizeInBytes(0);
+    handlerTaxoDir.setRandomIOExceptionRate(0.0);
+    handlerTaxoDir.setRandomIOExceptionRateOnOpen(0.0);
+  }
+  
+}
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyRevisionTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyRevisionTest.java
new file mode 100755
index 0000000..059b8bd
--- /dev/null
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexAndTaxonomyRevisionTest.java
@@ -0,0 +1,170 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.facet.index.FacetFields;
+import org.apache.lucene.facet.taxonomy.CategoryPath;
+import org.apache.lucene.facet.taxonomy.TaxonomyWriter;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.SnapshotDeletionPolicy;
+import org.apache.lucene.replicator.IndexAndTaxonomyRevision.SnapshotDirectoryTaxonomyWriter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.IOUtils;
+import org.junit.Test;
+
+public class IndexAndTaxonomyRevisionTest extends ReplicatorTestCase {
+  
+  private Document newDocument(TaxonomyWriter taxoWriter) throws IOException {
+    Document doc = new Document();
+    FacetFields ff = new FacetFields(taxoWriter);
+    ff.addFields(doc, Collections.singleton(new CategoryPath("A")));
+    return doc;
+  }
+  
+  @Test
+  public void testNoCommit() throws Exception {
+    Directory indexDir = newDirectory();
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
+    IndexWriter indexWriter = new IndexWriter(indexDir, conf);
+    
+    Directory taxoDir = newDirectory();
+    SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir);
+    try {
+      assertNotNull(new IndexAndTaxonomyRevision(indexWriter, taxoWriter));
+      fail("should have failed when there are no commits to snapshot");
+    } catch (IllegalStateException e) {
+      // expected
+    } finally {
+      IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir);
+    }
+  }
+  
+  @Test
+  public void testRevisionRelease() throws Exception {
+    Directory indexDir = newDirectory();
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
+    IndexWriter indexWriter = new IndexWriter(indexDir, conf);
+    
+    Directory taxoDir = newDirectory();
+    SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir);
+    try {
+      indexWriter.addDocument(newDocument(taxoWriter));
+      indexWriter.commit();
+      taxoWriter.commit();
+      Revision rev1 = new IndexAndTaxonomyRevision(indexWriter, taxoWriter);
+      // releasing that revision should not delete the files
+      rev1.release();
+      assertTrue(indexDir.fileExists(IndexFileNames.SEGMENTS + "_1"));
+      assertTrue(taxoDir.fileExists(IndexFileNames.SEGMENTS + "_1"));
+      
+      rev1 = new IndexAndTaxonomyRevision(indexWriter, taxoWriter); // create revision again, so the files are snapshotted
+      indexWriter.addDocument(newDocument(taxoWriter));
+      indexWriter.commit();
+      taxoWriter.commit();
+      assertNotNull(new IndexAndTaxonomyRevision(indexWriter, taxoWriter));
+      rev1.release(); // this release should trigger the delete of segments_1
+      assertFalse(indexDir.fileExists(IndexFileNames.SEGMENTS + "_1"));
+    } finally {
+      IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir);
+    }
+  }
+  
+  @Test
+  public void testSegmentsFileLast() throws Exception {
+    Directory indexDir = newDirectory();
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
+    IndexWriter indexWriter = new IndexWriter(indexDir, conf);
+    
+    Directory taxoDir = newDirectory();
+    SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir);
+    try {
+      indexWriter.addDocument(newDocument(taxoWriter));
+      indexWriter.commit();
+      taxoWriter.commit();
+      Revision rev = new IndexAndTaxonomyRevision(indexWriter, taxoWriter);
+      Map<String,List<RevisionFile>> sourceFiles = rev.getSourceFiles();
+      assertEquals(2, sourceFiles.size());
+      for (List<RevisionFile> files : sourceFiles.values()) {
+        String lastFile = files.get(files.size() - 1).fileName;
+        assertTrue(lastFile.startsWith(IndexFileNames.SEGMENTS) && !lastFile.equals(IndexFileNames.SEGMENTS_GEN));
+      }
+    } finally {
+      IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir);
+    }
+  }
+  
+  @Test
+  public void testOpen() throws Exception {
+    Directory indexDir = newDirectory();
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
+    IndexWriter indexWriter = new IndexWriter(indexDir, conf);
+    
+    Directory taxoDir = newDirectory();
+    SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir);
+    try {
+      indexWriter.addDocument(newDocument(taxoWriter));
+      indexWriter.commit();
+      taxoWriter.commit();
+      Revision rev = new IndexAndTaxonomyRevision(indexWriter, taxoWriter);
+      for (Entry<String,List<RevisionFile>> e : rev.getSourceFiles().entrySet()) {
+        String source = e.getKey();
+        Directory dir = source.equals(IndexAndTaxonomyRevision.INDEX_SOURCE) ? indexDir : taxoDir;
+        for (RevisionFile file : e.getValue()) {
+          IndexInput src = dir.openInput(file.fileName, IOContext.READONCE);
+          InputStream in = rev.open(source, file.fileName);
+          assertEquals(src.length(), in.available());
+          byte[] srcBytes = new byte[(int) src.length()];
+          byte[] inBytes = new byte[(int) src.length()];
+          int offset = 0;
+          if (random().nextBoolean()) {
+            int skip = random().nextInt(10);
+            if (skip >= src.length()) {
+              skip = 0;
+            }
+            in.skip(skip);
+            src.seek(skip);
+            offset = skip;
+          }
+          src.readBytes(srcBytes, offset, srcBytes.length - offset);
+          in.read(inBytes, offset, inBytes.length - offset);
+          assertArrayEquals(srcBytes, inBytes);
+          IOUtils.close(src, in);
+        }
+      }
+    } finally {
+      IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir);
+    }
+  }
+  
+}
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexReplicationClientTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexReplicationClientTest.java
new file mode 100755
index 0000000..0df3490
--- /dev/null
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexReplicationClientTest.java
@@ -0,0 +1,347 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.SnapshotDeletionPolicy;
+import org.apache.lucene.replicator.ReplicationClient.ReplicationHandler;
+import org.apache.lucene.replicator.ReplicationClient.SourceDirectoryFactory;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.ThreadInterruptedException;
+import org.apache.lucene.util._TestUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class IndexReplicationClientTest extends ReplicatorTestCase {
+  
+  private static class IndexReadyCallback implements Callable<Boolean>, Closeable {
+    
+    private final Directory indexDir;
+    private DirectoryReader reader; 
+    private long lastGeneration = -1;
+    
+    public IndexReadyCallback(Directory indexDir) throws IOException {
+      this.indexDir = indexDir;
+      if (DirectoryReader.indexExists(indexDir)) {
+        reader = DirectoryReader.open(indexDir);
+        lastGeneration = reader.getIndexCommit().getGeneration();
+      }
+    }
+    
+    @Override
+    public Boolean call() throws Exception {
+      if (reader == null) {
+        reader = DirectoryReader.open(indexDir);
+        lastGeneration = reader.getIndexCommit().getGeneration();
+      } else {
+        DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
+        assertNotNull("should not have reached here if no changes were made to the index", newReader);
+        long newGeneration = newReader.getIndexCommit().getGeneration();
+        assertTrue("expected newer generation; current=" + lastGeneration + " new=" + newGeneration, newGeneration > lastGeneration);
+        reader.close();
+        reader = newReader;
+        lastGeneration = newGeneration;
+        _TestUtil.checkIndex(indexDir);
+      }
+      return null;
+    }
+    
+    @Override
+    public void close() throws IOException {
+      IOUtils.close(reader);
+    }
+  }
+  
+  private MockDirectoryWrapper publishDir, handlerDir;
+  private Replicator replicator;
+  private SourceDirectoryFactory sourceDirFactory;
+  private ReplicationClient client;
+  private ReplicationHandler handler;
+  private IndexWriter publishWriter;
+  private IndexReadyCallback callback;
+  
+  private static final String VERSION_ID = "version";
+  
+  private void assertHandlerRevision(int expectedID, Directory dir) throws IOException {
+    // loop as long as client is alive. test-framework will terminate us if
+    // there's a serious bug, e.g. client doesn't really update. otherwise,
+    // introducing timeouts is not good, can easily lead to false positives.
+    while (client.isUpdateThreadAlive()) {
+      // give client a chance to update
+      try {
+        Thread.sleep(100);
+      } catch (InterruptedException e) {
+        throw new ThreadInterruptedException(e);
+      }
+
+      try {
+        DirectoryReader reader = DirectoryReader.open(dir);
+        try {
+          int handlerID = Integer.parseInt(reader.getIndexCommit().getUserData().get(VERSION_ID), 16);
+          if (expectedID == handlerID) {
+            return;
+          } else if (VERBOSE) {
+            System.out.println("expectedID=" + expectedID + " actual=" + handlerID + " generation=" + reader.getIndexCommit().getGeneration());
+          }
+        } finally {
+          reader.close();
+        }
+      } catch (Exception e) {
+        // we can hit IndexNotFoundException or e.g. EOFException (on
+        // segments_N) because it is being copied at the same time it is read by
+        // DirectoryReader.open().
+      }
+    }
+  }
+  
+  private Revision createRevision(final int id) throws IOException {
+    publishWriter.addDocument(new Document());
+    publishWriter.setCommitData(new HashMap<String, String>() {{
+      put(VERSION_ID, Integer.toString(id, 16));
+    }});
+    publishWriter.commit();
+    return new IndexRevision(publishWriter);
+  }
+  
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+    publishDir = newMockDirectory();
+    handlerDir = newMockDirectory();
+    sourceDirFactory = new PerSessionDirectoryFactory(_TestUtil.getTempDir("replicationClientTest"));
+    replicator = new LocalReplicator();
+    callback = new IndexReadyCallback(handlerDir);
+    handler = new IndexReplicationHandler(handlerDir, callback);
+    client = new ReplicationClient(replicator, handler, sourceDirFactory);
+    
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
+    publishWriter = new IndexWriter(publishDir, conf);
+  }
+  
+  @After
+  @Override
+  public void tearDown() throws Exception {
+    IOUtils.close(client, callback, publishWriter, replicator, publishDir, handlerDir);
+    super.tearDown();
+  }
+  
+  @Test
+  public void testNoUpdateThread() throws Exception {
+    assertNull("no version expected at start", handler.currentVersion());
+    
+    // Callback validates the replicated index
+    replicator.publish(createRevision(1));
+    client.updateNow();
+    
+    replicator.publish(createRevision(2));
+    client.updateNow();
+    
+    // Publish two revisions without update, handler should be upgraded to latest
+    replicator.publish(createRevision(3));
+    replicator.publish(createRevision(4));
+    client.updateNow();
+  }
+  
+  @Test
+  public void testUpdateThread() throws Exception {
+    client.startUpdateThread(10, "index");
+    
+    replicator.publish(createRevision(1));
+    assertHandlerRevision(1, handlerDir);
+    
+    replicator.publish(createRevision(2));
+    assertHandlerRevision(2, handlerDir);
+    
+    // Publish two revisions without update, handler should be upgraded to latest
+    replicator.publish(createRevision(3));
+    replicator.publish(createRevision(4));
+    assertHandlerRevision(4, handlerDir);
+  }
+  
+  @Test
+  public void testRestart() throws Exception {
+    replicator.publish(createRevision(1));
+    client.updateNow();
+    
+    replicator.publish(createRevision(2));
+    client.updateNow();
+    
+    client.stopUpdateThread();
+    client.close();
+    client = new ReplicationClient(replicator, handler, sourceDirFactory);
+    
+    // Publish two revisions without update, handler should be upgraded to latest
+    replicator.publish(createRevision(3));
+    replicator.publish(createRevision(4));
+    client.updateNow();
+  }
+
+  /*
+   * This test verifies that the client and handler do not end up in a corrupt
+   * index if exceptions are thrown at any point during replication. Either when
+   * a client copies files from the server to the temporary space, or when the
+   * handler copies them to the index directory.
+   */
+  @Test
+  public void testConsistencyOnExceptions() throws Exception {
+    // so the handler's index isn't empty
+    replicator.publish(createRevision(1));
+    client.updateNow();
+    client.close();
+    callback.close();
+    
+    // Replicator violates write-once policy. It may be that the
+    // handler copies files to the index dir, then fails to copy a
+    // file and reverts the copy operation. On the next attempt, it
+    // will copy the same file again. There is nothing wrong with this
+    // in a real system, but it does violate write-once, and MDW
+    // doesn't like it. Disabling it means that we won't catch cases
+    // where the handler overwrites an existing index file, but
+    // there's nothing currently we can do about it, unless we don't
+    // use MDW.
+    handlerDir.setPreventDoubleWrite(false);
+
+    // wrap sourceDirFactory to return a MockDirWrapper so we can simulate errors
+    final SourceDirectoryFactory in = sourceDirFactory;
+    final AtomicInteger failures = new AtomicInteger(atLeast(10));
+    sourceDirFactory = new SourceDirectoryFactory() {
+      
+      private long clientMaxSize = 100, handlerMaxSize = 100;
+      private double clientExRate = 1.0, handlerExRate = 1.0;
+      
+      @Override
+      public void cleanupSession(String sessionID) throws IOException {
+        in.cleanupSession(sessionID);
+      }
+      
+      @SuppressWarnings("synthetic-access")
+      @Override
+      public Directory getDirectory(String sessionID, String source) throws IOException {
+        Directory dir = in.getDirectory(sessionID, source);
+        if (random().nextBoolean() && failures.get() > 0) { // client should fail, return wrapped dir
+          MockDirectoryWrapper mdw = new MockDirectoryWrapper(random(), dir);
+          mdw.setRandomIOExceptionRateOnOpen(clientExRate);
+          mdw.setMaxSizeInBytes(clientMaxSize);
+          mdw.setRandomIOExceptionRate(clientExRate);
+          mdw.setCheckIndexOnClose(false);
+          clientMaxSize *= 2;
+          clientExRate /= 2;
+          return mdw;
+        }
+
+        if (failures.get() > 0 && random().nextBoolean()) { // handler should fail
+          handlerDir.setMaxSizeInBytes(handlerMaxSize);
+          handlerDir.setRandomIOExceptionRateOnOpen(handlerExRate);
+          handlerDir.setRandomIOExceptionRate(handlerExRate);
+          handlerMaxSize *= 2;
+          handlerExRate /= 2;
+        } else {
+          // disable errors
+          handlerDir.setMaxSizeInBytes(0);
+          handlerDir.setRandomIOExceptionRate(0.0);
+          handlerDir.setRandomIOExceptionRateOnOpen(0.0);
+        }
+        return dir;
+      }
+    };
+    
+    handler = new IndexReplicationHandler(handlerDir, new Callable<Boolean>() {
+      @Override
+      public Boolean call() throws Exception {
+        if (random().nextDouble() < 0.2 && failures.get() > 0) {
+          throw new RuntimeException("random exception from callback");
+        }
+        return null;
+      }
+    });
+    
+    // wrap handleUpdateException so we can act on the thrown exception
+    client = new ReplicationClient(replicator, handler, sourceDirFactory) {
+      @SuppressWarnings("synthetic-access")
+      @Override
+      protected void handleUpdateException(Throwable t) {
+        if (t instanceof IOException) {
+          if (VERBOSE) {
+            System.out.println("hit exception during update: " + t);
+            t.printStackTrace(System.out);
+          }
+          try {
+            // test that the index can be read and also some basic statistics
+            DirectoryReader reader = DirectoryReader.open(handlerDir.getDelegate());
+            try {
+              int numDocs = reader.numDocs();
+              int version = Integer.parseInt(reader.getIndexCommit().getUserData().get(VERSION_ID), 16);
+              assertEquals(numDocs, version);
+            } finally {
+              reader.close();
+            }
+            // verify index consistency
+            _TestUtil.checkIndex(handlerDir.getDelegate());
+          } catch (IOException e) {
+            // exceptions here are bad, don't ignore them
+            throw new RuntimeException(e);
+          } finally {
+            // count-down number of failures
+            failures.decrementAndGet();
+            assert failures.get() >= 0 : "handler failed too many times: " + failures.get();
+            if (VERBOSE) {
+              if (failures.get() == 0) {
+                System.out.println("no more failures expected");
+              } else {
+                System.out.println("num failures left: " + failures.get());
+              }
+            }
+          }
+        } else {
+          if (t instanceof RuntimeException) throw (RuntimeException) t;
+          throw new RuntimeException(t);
+        }
+      }
+    };
+    
+    client.startUpdateThread(10, "index");
+
+    final Directory baseHandlerDir = handlerDir.getDelegate();
+    int numRevisions = atLeast(20);
+    for (int i = 2; i < numRevisions; i++) {
+      replicator.publish(createRevision(i));
+      assertHandlerRevision(i, baseHandlerDir);
+    }
+    
+    // disable errors -- maybe randomness didn't exhaust all allowed failures,
+    // and we don't want e.g. CheckIndex to hit false errors. 
+    handlerDir.setMaxSizeInBytes(0);
+    handlerDir.setRandomIOExceptionRate(0.0);
+    handlerDir.setRandomIOExceptionRateOnOpen(0.0);
+  }
+  
+}
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/IndexRevisionTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexRevisionTest.java
new file mode 100755
index 0000000..b6e66b8
--- /dev/null
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/IndexRevisionTest.java
@@ -0,0 +1,155 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.InputStream;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
+import org.apache.lucene.index.SnapshotDeletionPolicy;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.IOUtils;
+import org.junit.Test;
+
+public class IndexRevisionTest extends ReplicatorTestCase {
+  
+  @Test
+  public void testNoSnapshotDeletionPolicy() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
+    IndexWriter writer = new IndexWriter(dir, conf);
+    try {
+      assertNotNull(new IndexRevision(writer));
+      fail("should have failed when IndexDeletionPolicy is not Snapshot");
+    } catch (IllegalArgumentException e) {
+      // expected
+    } finally {
+      IOUtils.close(writer, dir);
+    }
+  }
+  
+  @Test
+  public void testNoCommit() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
+    IndexWriter writer = new IndexWriter(dir, conf);
+    try {
+      assertNotNull(new IndexRevision(writer));
+      fail("should have failed when there are no commits to snapshot");
+    } catch (IllegalStateException e) {
+      // expected
+    } finally {
+      IOUtils.close(writer, dir);
+    }
+  }
+  
+  @Test
+  public void testRevisionRelease() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
+    IndexWriter writer = new IndexWriter(dir, conf);
+    try {
+      writer.addDocument(new Document());
+      writer.commit();
+      Revision rev1 = new IndexRevision(writer);
+      // releasing that revision should not delete the files
+      rev1.release();
+      assertTrue(dir.fileExists(IndexFileNames.SEGMENTS + "_1"));
+      
+      rev1 = new IndexRevision(writer); // create revision again, so the files are snapshotted
+      writer.addDocument(new Document());
+      writer.commit();
+      assertNotNull(new IndexRevision(writer));
+      rev1.release(); // this release should trigger the delete of segments_1
+      assertFalse(dir.fileExists(IndexFileNames.SEGMENTS + "_1"));
+    } finally {
+      IOUtils.close(writer, dir);
+    }
+  }
+  
+  @Test
+  public void testSegmentsFileLast() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
+    IndexWriter writer = new IndexWriter(dir, conf);
+    try {
+      writer.addDocument(new Document());
+      writer.commit();
+      Revision rev = new IndexRevision(writer);
+      @SuppressWarnings("unchecked")
+      Map<String, List<RevisionFile>> sourceFiles = rev.getSourceFiles();
+      assertEquals(1, sourceFiles.size());
+      List<RevisionFile> files = sourceFiles.values().iterator().next();
+      String lastFile = files.get(files.size() - 1).fileName;
+      assertTrue(lastFile.startsWith(IndexFileNames.SEGMENTS) && !lastFile.equals(IndexFileNames.SEGMENTS_GEN));
+    } finally {
+      IOUtils.close(writer, dir);
+    }
+  }
+  
+  @Test
+  public void testOpen() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
+    IndexWriter writer = new IndexWriter(dir, conf);
+    try {
+      writer.addDocument(new Document());
+      writer.commit();
+      Revision rev = new IndexRevision(writer);
+      @SuppressWarnings("unchecked")
+      Map<String, List<RevisionFile>> sourceFiles = rev.getSourceFiles();
+      String source = sourceFiles.keySet().iterator().next();
+      for (RevisionFile file : sourceFiles.values().iterator().next()) {
+        IndexInput src = dir.openInput(file.fileName, IOContext.READONCE);
+        InputStream in = rev.open(source, file.fileName);
+        assertEquals(src.length(), in.available());
+        byte[] srcBytes = new byte[(int) src.length()];
+        byte[] inBytes = new byte[(int) src.length()];
+        int offset = 0;
+        if (random().nextBoolean()) {
+          int skip = random().nextInt(10);
+          if (skip >= src.length()) {
+            skip = 0;
+          }
+          in.skip(skip);
+          src.seek(skip);
+          offset = skip;
+        }
+        src.readBytes(srcBytes, offset, srcBytes.length - offset);
+        in.read(inBytes, offset, inBytes.length - offset);
+        assertArrayEquals(srcBytes, inBytes);
+        IOUtils.close(src, in);
+      }
+    } finally {
+      IOUtils.close(writer, dir);
+    }
+  }
+  
+}
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/LocalReplicatorTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/LocalReplicatorTest.java
new file mode 100755
index 0000000..1fb9152
--- /dev/null
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/LocalReplicatorTest.java
@@ -0,0 +1,196 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map.Entry;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.SnapshotDeletionPolicy;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class LocalReplicatorTest extends ReplicatorTestCase {
+  
+  private static final String VERSION_ID = "version";
+  
+  private LocalReplicator replicator;
+  private Directory sourceDir;
+  private IndexWriter sourceWriter;
+  
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    sourceDir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
+    sourceWriter = new IndexWriter(sourceDir, conf);
+    replicator = new LocalReplicator();
+  }
+  
+  @After
+  @Override
+  public void tearDown() throws Exception {
+    IOUtils.close(replicator, sourceWriter, sourceDir);
+    super.tearDown();
+  }
+  
+  private Revision createRevision(final int id) throws IOException {
+    sourceWriter.addDocument(new Document());
+    sourceWriter.setCommitData(new HashMap<String, String>() {{
+      put(VERSION_ID, Integer.toString(id, 16));
+    }});
+    sourceWriter.commit();
+    return new IndexRevision(sourceWriter);
+  }
+  
+  @Test
+  public void testCheckForUpdateNoRevisions() throws Exception {
+    assertNull(replicator.checkForUpdate(null));
+  }
+  
+  @Test
+  public void testObtainFileAlreadyClosed() throws IOException {
+    replicator.publish(createRevision(1));
+    SessionToken res = replicator.checkForUpdate(null);
+    assertNotNull(res);
+    assertEquals(1, res.sourceFiles.size());
+    Entry<String,List<RevisionFile>> entry = res.sourceFiles.entrySet().iterator().next();
+    replicator.close();
+    try {
+      replicator.obtainFile(res.id, entry.getKey(), entry.getValue().get(0).fileName);
+      fail("should have failed on AlreadyClosedException");
+    } catch (AlreadyClosedException e) {
+      // expected
+    }
+  }
+  
+  @Test
+  public void testPublishAlreadyClosed() throws IOException {
+    replicator.close();
+    try {
+      replicator.publish(createRevision(2));
+      fail("should have failed on AlreadyClosedException");
+    } catch (AlreadyClosedException e) {
+      // expected
+    }
+  }
+  
+  @Test
+  public void testUpdateAlreadyClosed() throws IOException {
+    replicator.close();
+    try {
+      replicator.checkForUpdate(null);
+      fail("should have failed on AlreadyClosedException");
+    } catch (AlreadyClosedException e) {
+      // expected
+    }
+  }
+  
+  @Test
+  public void testPublishSameRevision() throws IOException {
+    Revision rev = createRevision(1);
+    replicator.publish(rev);
+    SessionToken res = replicator.checkForUpdate(null);
+    assertNotNull(res);
+    assertEquals(rev.getVersion(), res.version);
+    replicator.release(res.id);
+    replicator.publish(new IndexRevision(sourceWriter));
+    res = replicator.checkForUpdate(res.version);
+    assertNull(res);
+    
+    // now make sure that publishing same revision doesn't leave revisions
+    // "locked", i.e. that replicator releases revisions even when they are not
+    // kept
+    replicator.publish(createRevision(2));
+    assertEquals(1, DirectoryReader.listCommits(sourceDir).size());
+  }
+  
+  @Test
+  public void testPublishOlderRev() throws IOException {
+    replicator.publish(createRevision(1));
+    Revision old = new IndexRevision(sourceWriter);
+    replicator.publish(createRevision(2));
+    try {
+      replicator.publish(old);
+      fail("should have failed to publish an older revision");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    assertEquals(1, DirectoryReader.listCommits(sourceDir).size());
+  }
+  
+  @Test
+  public void testObtainMissingFile() throws IOException {
+    replicator.publish(createRevision(1));
+    SessionToken res = replicator.checkForUpdate(null);
+    try {
+      replicator.obtainFile(res.id, res.sourceFiles.keySet().iterator().next(), "madeUpFile");
+      fail("should have failed obtaining an unrecognized file");
+    } catch (FileNotFoundException e) {
+      // expected
+    }
+  }
+  
+  @Test
+  public void testSessionExpiration() throws IOException, InterruptedException {
+    replicator.publish(createRevision(1));
+    SessionToken session = replicator.checkForUpdate(null);
+    replicator.setExpirationThreshold(5); // expire quickly
+    Thread.sleep(50); // sufficient for expiration
+    try {
+      replicator.obtainFile(session.id, session.sourceFiles.keySet().iterator().next(), session.sourceFiles.values().iterator().next().get(0).fileName);
+      fail("should have failed to obtain a file for an expired session");
+    } catch (SessionExpiredException e) {
+      // expected
+    }
+  }
+  
+  @Test
+  public void testUpdateToLatest() throws IOException {
+    replicator.publish(createRevision(1));
+    Revision rev = createRevision(2);
+    replicator.publish(rev);
+    SessionToken res = replicator.checkForUpdate(null);
+    assertNotNull(res);
+    assertEquals(0, rev.compareTo(res.version));
+  }
+  
+  @Test
+  public void testRevisionRelease() throws Exception {
+    replicator.publish(createRevision(1));
+    assertTrue(sourceDir.fileExists(IndexFileNames.SEGMENTS + "_1"));
+    replicator.publish(createRevision(2));
+    // now the files of revision 1 can be deleted
+    assertTrue(sourceDir.fileExists(IndexFileNames.SEGMENTS + "_2"));
+    assertFalse("segments_1 should not be found in index directory after revision is released", sourceDir.fileExists(IndexFileNames.SEGMENTS + "_1"));
+  }
+  
+}
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java b/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java
new file mode 100755
index 0000000..704578a
--- /dev/null
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/ReplicatorTestCase.java
@@ -0,0 +1,118 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.net.SocketException;
+
+import org.apache.http.conn.ClientConnectionManager;
+import org.apache.http.impl.conn.PoolingClientConnectionManager;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.eclipse.jetty.server.Connector;
+import org.eclipse.jetty.server.Handler;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.util.thread.QueuedThreadPool;
+import org.junit.AfterClass;
+
+@SuppressCodecs("Lucene3x")
+public class ReplicatorTestCase extends LuceneTestCase {
+  
+  private static final int BASE_PORT = 7000;
+  
+  // if a test calls newServer() multiple times, or some ports already failed,
+  // don't start from BASE_PORT again
+  private static int lastPortUsed = -1;
+  
+  private static ClientConnectionManager clientConnectionManager;
+  
+  @AfterClass
+  public static void afterClassReplicatorTestCase() throws Exception {
+    if (clientConnectionManager != null) {
+      clientConnectionManager.shutdown();
+      clientConnectionManager = null;
+    }
+  }
+  
+  /**
+   * Returns a new {@link Server HTTP Server} instance. To obtain its port, use
+   * {@link #serverPort(Server)}.
+   */
+  public static synchronized Server newHttpServer(Handler handler) throws Exception {
+    int port = lastPortUsed == -1 ? BASE_PORT : lastPortUsed + 1;
+    Server server = null;
+    while (true) {
+      try {
+        server = new Server(port);
+        
+        server.setHandler(handler);
+        
+        QueuedThreadPool threadPool = new QueuedThreadPool();
+        threadPool.setDaemon(true);
+        threadPool.setMaxIdleTimeMs(0);
+        server.setThreadPool(threadPool);
+        
+        // this will test the port
+        server.start();
+        
+        // if here, port is available
+        lastPortUsed = port;
+        return server;
+      } catch (SocketException e) {
+        stopHttpServer(server);
+        // this is ok, we'll try the next port until successful.
+        ++port;
+      }
+    }
+  }
+  
+  /**
+   * Returns a {@link Server}'s port. This method assumes that no
+   * {@link Connector}s were added to the Server besides the default one.
+   */
+  public static int serverPort(Server httpServer) {
+    return httpServer.getConnectors()[0].getPort();
+  }
+  
+  /**
+   * Stops the given HTTP Server instance. This method does its best to guarantee
+   * that no threads will be left running following this method.
+   */
+  public static void stopHttpServer(Server httpServer) throws Exception {
+    httpServer.stop();
+    httpServer.join();
+  }
+  
+  /**
+   * Returns a {@link ClientConnectionManager}.
+   * <p>
+   * <b>NOTE:</b> do not {@link ClientConnectionManager#shutdown()} this
+   * connection manager, it will be shutdown automatically after all tests have
+   * finished.
+   */
+  public static synchronized ClientConnectionManager getClientConnectionManager() {
+    if (clientConnectionManager == null) {
+      PoolingClientConnectionManager ccm = new PoolingClientConnectionManager();
+      ccm.setDefaultMaxPerRoute(128);
+      ccm.setMaxTotal(128);
+      clientConnectionManager = ccm;
+    }
+    
+    return clientConnectionManager;
+  }
+  
+}
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/SessionTokenTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/SessionTokenTest.java
new file mode 100755
index 0000000..34aa08b
--- /dev/null
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/SessionTokenTest.java
@@ -0,0 +1,64 @@
+package org.apache.lucene.replicator;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.SnapshotDeletionPolicy;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
+import org.junit.Test;
+
+public class SessionTokenTest extends ReplicatorTestCase {
+  
+  @Test
+  public void testSerialization() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
+    IndexWriter writer = new IndexWriter(dir, conf);
+    writer.addDocument(new Document());
+    writer.commit();
+    Revision rev = new IndexRevision(writer);
+    
+    SessionToken session1 = new SessionToken("17", rev);
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    session1.serialize(new DataOutputStream(baos));
+    byte[] b = baos.toByteArray();
+    SessionToken session2 = new SessionToken(new DataInputStream(new ByteArrayInputStream(b)));
+    assertEquals(session1.id, session2.id);
+    assertEquals(session1.version, session2.version);
+    assertEquals(1, session2.sourceFiles.size());
+    assertEquals(session1.sourceFiles.size(), session2.sourceFiles.size());
+    assertEquals(session1.sourceFiles.keySet(), session2.sourceFiles.keySet());
+    List<RevisionFile> files1 = session1.sourceFiles.values().iterator().next();
+    List<RevisionFile> files2 = session2.sourceFiles.values().iterator().next();
+    assertEquals(files1, files2);
+    
+    IOUtils.close(writer, dir);
+  }
+  
+}
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java b/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java
new file mode 100755
index 0000000..28499ff
--- /dev/null
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/http/HttpReplicatorTest.java
@@ -0,0 +1,120 @@
+package org.apache.lucene.replicator.http;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.SnapshotDeletionPolicy;
+import org.apache.lucene.replicator.IndexReplicationHandler;
+import org.apache.lucene.replicator.IndexRevision;
+import org.apache.lucene.replicator.LocalReplicator;
+import org.apache.lucene.replicator.PerSessionDirectoryFactory;
+import org.apache.lucene.replicator.ReplicationClient;
+import org.apache.lucene.replicator.Replicator;
+import org.apache.lucene.replicator.ReplicatorTestCase;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util._TestUtil;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.servlet.ServletHandler;
+import org.eclipse.jetty.servlet.ServletHolder;
+import org.junit.Before;
+import org.junit.Test;
+
+public class HttpReplicatorTest extends ReplicatorTestCase {
+  
+  private File clientWorkDir;
+  private Replicator serverReplicator;
+  private IndexWriter writer;
+  private DirectoryReader reader;
+  private Server server;
+  private int port;
+  private Directory serverIndexDir, handlerIndexDir;
+  
+  private void startServer() throws Exception {
+    ServletHandler replicationHandler = new ServletHandler();
+    ReplicationService service = new ReplicationService(Collections.singletonMap("s1", serverReplicator));
+    ServletHolder servlet = new ServletHolder(new ReplicationServlet(service));
+    replicationHandler.addServletWithMapping(servlet, ReplicationService.REPLICATION_CONTEXT + "/*");
+    server = newHttpServer(replicationHandler);
+    port = serverPort(server);
+  }
+  
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    clientWorkDir = _TestUtil.getTempDir("httpReplicatorTest");
+    handlerIndexDir = newDirectory();
+    serverIndexDir = newDirectory();
+    serverReplicator = new LocalReplicator();
+    startServer();
+    
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, null);
+    conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
+    writer = new IndexWriter(serverIndexDir, conf);
+    reader = DirectoryReader.open(writer, false);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    stopHttpServer(server);
+    IOUtils.close(reader, writer, handlerIndexDir, serverIndexDir);
+    super.tearDown();
+  }
+  
+  private void publishRevision(int id) throws IOException {
+    Document doc = new Document();
+    writer.addDocument(doc);
+    writer.setCommitData(Collections.singletonMap("ID", Integer.toString(id, 16)));
+    writer.commit();
+    serverReplicator.publish(new IndexRevision(writer));
+  }
+  
+  private void reopenReader() throws IOException {
+    DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
+    assertNotNull(newReader);
+    reader.close();
+    reader = newReader;
+  }
+  
+  @Test
+  public void testBasic() throws Exception {
+    Replicator replicator = new HttpReplicator("localhost", port, ReplicationService.REPLICATION_CONTEXT + "/s1", 
+        getClientConnectionManager());
+    ReplicationClient client = new ReplicationClient(replicator, new IndexReplicationHandler(handlerIndexDir, null), 
+        new PerSessionDirectoryFactory(clientWorkDir));
+    
+    publishRevision(1);
+    client.updateNow();
+    reopenReader();
+    assertEquals(1, Integer.parseInt(reader.getIndexCommit().getUserData().get("ID"), 16));
+    
+    publishRevision(2);
+    client.updateNow();
+    reopenReader();
+    assertEquals(2, Integer.parseInt(reader.getIndexCommit().getUserData().get("ID"), 16));
+  }
+  
+}
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/http/ReplicationServlet.java b/lucene/replicator/src/test/org/apache/lucene/replicator/http/ReplicationServlet.java
new file mode 100755
index 0000000..797d35d
--- /dev/null
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/http/ReplicationServlet.java
@@ -0,0 +1,41 @@
+package org.apache.lucene.replicator.http;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+public class ReplicationServlet extends HttpServlet {
+  
+  private final ReplicationService service;
+  
+  public ReplicationServlet(ReplicationService service) {
+    super();
+    this.service = service;
+  }
+  
+  @Override
+  protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+    service.perform(req, resp);
+  }
+  
+}
diff --git a/lucene/test-framework/ivy.xml b/lucene/test-framework/ivy.xml
index e75db23..ab3f66e 100644
--- a/lucene/test-framework/ivy.xml
+++ b/lucene/test-framework/ivy.xml
@@ -32,8 +32,8 @@
       <dependency org="org.apache.ant" name="ant" rev="1.8.2" transitive="false" />
 
       <dependency org="junit" name="junit" rev="4.10" transitive="false" conf="default->*;junit4-stdalone->*" />
-      <dependency org="com.carrotsearch.randomizedtesting" name="junit4-ant" rev="2.0.9" transitive="false" conf="default->*;junit4-stdalone->*" />
-      <dependency org="com.carrotsearch.randomizedtesting" name="randomizedtesting-runner" rev="2.0.9" transitive="false" conf="default->*;junit4-stdalone->*" />
+      <dependency org="com.carrotsearch.randomizedtesting" name="junit4-ant" rev="2.0.10" transitive="false" conf="default->*;junit4-stdalone->*" />
+      <dependency org="com.carrotsearch.randomizedtesting" name="randomizedtesting-runner" rev="2.0.10" transitive="false" conf="default->*;junit4-stdalone->*" />
 
       <exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/> 
     </dependencies>
diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
index 3ae5928..aeb8039 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java
@@ -60,7 +60,6 @@
  *        refusing to write/delete to open files.
  * </ul>
  */
-
 public class MockDirectoryWrapper extends BaseDirectoryWrapper {
   long maxSize;
 
@@ -159,6 +158,26 @@
     this.throttling = throttling;
   }
 
+  /**
+   * Returns true if {@link #getDelegate() delegate} must sync its files.
+   * Currently, only {@link NRTCachingDirectory} requires sync'ing its files
+   * because otherwise they are cached in an internal {@link RAMDirectory}. If
+   * other directories require that too, they should be added to this method.
+   */
+  private boolean mustSync() {
+    Directory delegate = this.delegate;
+    while (true) {
+      if (delegate instanceof RateLimitedDirectoryWrapper) {
+        delegate = ((RateLimitedDirectoryWrapper) delegate).getDelegate();
+      } else if (delegate instanceof TrackingDirectoryWrapper) {
+        delegate = ((TrackingDirectoryWrapper) delegate).getDelegate();
+      } else {
+        break;
+      }
+    }
+    return delegate instanceof NRTCachingDirectory;
+  }
+  
   @Override
   public synchronized void sync(Collection<String> names) throws IOException {
     maybeYield();
@@ -166,12 +185,16 @@
     if (crashed) {
       throw new IOException("cannot sync after crash");
     }
-    unSyncedFiles.removeAll(names);
-    // TODO: need to improve hack to be OK w/
-    // RateLimitingDirWrapper in between...
-    if (true || LuceneTestCase.rarely(randomState) || delegate instanceof NRTCachingDirectory) {
-      // don't wear out our hardware so much in tests.
-      delegate.sync(names);
+    // don't wear out our hardware so much in tests.
+    if (LuceneTestCase.rarely(randomState) || mustSync()) {
+      for (String name : names) {
+        // randomly fail with IOE on any file
+        maybeThrowIOException(name);
+        delegate.sync(Collections.singleton(name));
+        unSyncedFiles.remove(name);
+      }
+    } else {
+      unSyncedFiles.removeAll(names);
     }
   }
   
@@ -343,30 +366,26 @@
     return randomIOExceptionRateOnOpen;
   }
 
-  void maybeThrowIOException() throws IOException {
-    maybeThrowIOException(null);
-  }
-
   void maybeThrowIOException(String message) throws IOException {
     if (randomState.nextDouble() < randomIOExceptionRate) {
       if (LuceneTestCase.VERBOSE) {
         System.out.println(Thread.currentThread().getName() + ": MockDirectoryWrapper: now throw random exception" + (message == null ? "" : " (" + message + ")"));
         new Throwable().printStackTrace(System.out);
       }
-      throw new IOException("a random IOException" + (message == null ? "" : "(" + message + ")"));
+      throw new IOException("a random IOException" + (message == null ? "" : " (" + message + ")"));
     }
   }
 
-  void maybeThrowIOExceptionOnOpen() throws IOException {
+  void maybeThrowIOExceptionOnOpen(String name) throws IOException {
     if (randomState.nextDouble() < randomIOExceptionRateOnOpen) {
       if (LuceneTestCase.VERBOSE) {
-        System.out.println(Thread.currentThread().getName() + ": MockDirectoryWrapper: now throw random exception during open");
+        System.out.println(Thread.currentThread().getName() + ": MockDirectoryWrapper: now throw random exception during open file=" + name);
         new Throwable().printStackTrace(System.out);
       }
       if (randomState.nextBoolean()) {
-        throw new IOException("a random IOException");
+        throw new IOException("a random IOException (" + name + ")");
       } else {
-        throw new FileNotFoundException("a random IOException");
+        throw new FileNotFoundException("a random IOException (" + name + ")");
       }
     }
   }
@@ -432,7 +451,7 @@
   @Override
   public synchronized IndexOutput createOutput(String name, IOContext context) throws IOException {
     maybeThrowDeterministicException();
-    maybeThrowIOExceptionOnOpen();
+    maybeThrowIOExceptionOnOpen(name);
     maybeYield();
     if (failOnCreateOutput) {
       maybeThrowDeterministicException();
@@ -486,7 +505,7 @@
     if (throttling == Throttling.ALWAYS || 
         (throttling == Throttling.SOMETIMES && randomState.nextInt(50) == 0) && !(delegate instanceof RateLimitedDirectoryWrapper)) {
       if (LuceneTestCase.VERBOSE) {
-        System.out.println("MockDirectoryWrapper: throttling indexOutput");
+        System.out.println("MockDirectoryWrapper: throttling indexOutput (" + name + ")");
       }
       return throttledOutput.newFromDelegate(io);
     } else {
@@ -519,7 +538,7 @@
   @Override
   public synchronized IndexInput openInput(String name, IOContext context) throws IOException {
     maybeThrowDeterministicException();
-    maybeThrowIOExceptionOnOpen();
+    maybeThrowIOExceptionOnOpen(name);
     maybeYield();
     if (failOnOpenInput) {
       maybeThrowDeterministicException();
@@ -632,7 +651,7 @@
         if (LuceneTestCase.VERBOSE) {
           System.out.println("\nNOTE: MockDirectoryWrapper: now crash");
         }
-        crash(); // corrumpt any unsynced-files
+        crash(); // corrupt any unsynced-files
         if (LuceneTestCase.VERBOSE) {
           System.out.println("\nNOTE: MockDirectoryWrapper: now run CheckIndex");
         } 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
index cc6dd42..2ce2823 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
@@ -23,6 +23,7 @@
 import java.lang.reflect.Method;
 import java.util.*;
 import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicReference;
 import java.util.logging.Logger;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -148,10 +149,10 @@
   public static final String SYSPROP_BADAPPLES = "tests.badapples";
 
   /** @see #ignoreAfterMaxFailures*/
-  private static final String SYSPROP_MAXFAILURES = "tests.maxfailures";
+  public static final String SYSPROP_MAXFAILURES = "tests.maxfailures";
 
   /** @see #ignoreAfterMaxFailures*/
-  private static final String SYSPROP_FAILFAST = "tests.failfast";
+  public static final String SYSPROP_FAILFAST = "tests.failfast";
 
   /**
    * Annotation for tests that should only be run during nightly builds.
@@ -356,9 +357,17 @@
       new TestRuleMarkFailure();
 
   /**
-   * Ignore tests after hitting a designated number of initial failures.
+   * Ignore tests after hitting a designated number of initial failures. This
+   * is truly a "static" global singleton since it needs to span the lifetime of all
+   * test classes running inside this JVM (it cannot be part of a class rule).
+   * 
+   * <p>This poses some problems for the test framework's tests because these sometimes
+   * trigger intentional failures which add up to the global count. This field contains
+   * a (possibly) changing reference to {@link TestRuleIgnoreAfterMaxFailures} and we
+   * dispatch to its current value from the {@link #classRules} chain using {@link TestRuleDelegate}.  
    */
-  final static TestRuleIgnoreAfterMaxFailures ignoreAfterMaxFailures; 
+  private static final AtomicReference<TestRuleIgnoreAfterMaxFailures> ignoreAfterMaxFailuresDelegate;
+  private static final TestRule ignoreAfterMaxFailures;
   static {
     int maxFailures = systemPropertyAsInt(SYSPROP_MAXFAILURES, Integer.MAX_VALUE);
     boolean failFast = systemPropertyAsBoolean(SYSPROP_FAILFAST, false);
@@ -373,7 +382,19 @@
       }
     }
 
-    ignoreAfterMaxFailures = new TestRuleIgnoreAfterMaxFailures(maxFailures);
+    ignoreAfterMaxFailuresDelegate = 
+        new AtomicReference<TestRuleIgnoreAfterMaxFailures>(
+            new TestRuleIgnoreAfterMaxFailures(maxFailures));
+    ignoreAfterMaxFailures = TestRuleDelegate.of(ignoreAfterMaxFailuresDelegate);
+  }
+
+  /**
+   * Temporarily substitute the global {@link TestRuleIgnoreAfterMaxFailures}. See
+   * {@link #ignoreAfterMaxFailuresDelegate} for some explanation why this method 
+   * is needed.
+   */
+  public static TestRuleIgnoreAfterMaxFailures replaceMaxFailureRule(TestRuleIgnoreAfterMaxFailures newValue) {
+    return ignoreAfterMaxFailuresDelegate.getAndSet(newValue);
   }
 
   /**
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleDelegate.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleDelegate.java
new file mode 100644
index 0000000..08d969a
--- /dev/null
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleDelegate.java
@@ -0,0 +1,45 @@
+package org.apache.lucene.util;
+
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.junit.rules.TestRule;
+import org.junit.runner.Description;
+import org.junit.runners.model.Statement;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * A {@link TestRule} that delegates to another {@link TestRule} via a delegate
+ * contained in a an {@link AtomicReference}.
+ */
+final class TestRuleDelegate<T extends TestRule> implements TestRule {
+  private AtomicReference<T> delegate;
+
+  private TestRuleDelegate(AtomicReference<T> delegate) {
+    this.delegate = delegate;
+  }
+
+  @Override
+  public Statement apply(Statement s, Description d) {
+    return delegate.get().apply(s, d);
+  }
+
+  static <T extends TestRule> TestRuleDelegate<T> of(AtomicReference<T> delegate) {
+    return new TestRuleDelegate<T>(delegate);
+  }
+}
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 17967f7..c626bc7 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -38,6 +38,12 @@
 Detailed Change List
 ----------------------
 
+Other Changes
+----------------------
+
+* SOLR-4622: Hardcoded SolrCloud defaults for hostContext and hostPort that
+  were deprecated in 4.3 have been removed completely. (hossman)
+
 ==================  4.4.0 ==================
 
 Versions of Major Components
@@ -54,7 +60,7 @@
 * SOLR-4778: The signature of LogWatcher.registerListener has changed, from
   (ListenerConfig, CoreContainer) to (ListenerConfig).  Users implementing their
   own LogWatcher classes will need to change their code accordingly.
-
+  
 Detailed Change List
 ----------------------
 
@@ -72,6 +78,10 @@
   in cases where exact hit-counts are unnecessary.  Also, when "collateExtendedResults"
   is false, this optimization is always made (James Dyer).
 
+* SOLR-4785: New MaxScoreQParserPlugin returning max() instead of sum() of terms (janhoy)
+
+* SOLR-4234: Add support for binary files in ZooKeeper. (Eric Pugh via Mark Miller)
+
 Bug Fixes
 ----------------------
 
@@ -89,6 +99,16 @@
 
 * SOLR-4616: HitRatio on caches is now exposed over JMX MBeans as a float.
   (Greg Bowyer)
+  
+* SOLR-4803: Fixed core discovery mode (ie: new style solr.xml) to treat 
+  'collection1' as the default core name. (hossman)
+  
+* SOLR-4790: Throw an error if a core has the same name as another core, both old and
+  new style solr.xml
+
+* SOLR-4563: RSS DIH-example not working (janhoy)
+
+* SOLR-4796: zkcli.sh should honor JAVA_HOME (Roman Shaposhnik via Mark Miller)
 
 Other Changes
 ----------------------
@@ -116,6 +136,49 @@
 
 * SOLR-4784: Make class LuceneQParser public (janhoy)
 
+
+==================  4.3.1 ==================
+
+Versions of Major Components
+---------------------
+Apache Tika 1.3
+Carrot2 3.6.2
+Velocity 1.7 and Velocity Tools 2.0
+Apache UIMA 2.3.1
+Apache ZooKeeper 3.4.5
+
+Detailed Change List
+----------------------
+
+Bug Fixes
+----------------------
+
+* SOLR-4795: Sub shard leader should not accept any updates from parent after
+  it goes active (shalin)
+
+* SOLR-4798: shard splitting does not respect the router for the collection
+  when executing the index split.   One effect of this is that documents
+  may be placed in the wrong shard when the default compositeId router
+  is used in conjunction with IDs containing "!". (yonik)
+
+* SOLR-4797: Shard splitting creates sub shards which have the wrong hash
+  range in cluster state. This happens when numShards is not a power of two
+  and router is compositeId. (shalin)
+  
+* SOLR-4791: solr.xml sharedLib does not work in 4.3.0 (Ryan Ernst, Jan Høydahl via 
+  Erick Erickson)
+
+* SOLR-4806: Shard splitting does not abort if WaitForState times out (shalin)
+
+* SOLR-4807: The zkcli script now works with log4j. The zkcli.bat script
+  was broken on Windows in 4.3.0, now it works. (Shawn Heisey)
+
+Other Changes
+----------------------
+
+* SOLR-4760: Include core name in logs when loading schema.
+  (Shawn Heisey)
+
 ==================  4.3.0 ==================
 
 Versions of Major Components
diff --git a/solr/NOTICE.txt b/solr/NOTICE.txt
index a8d5e66..819879e 100644
--- a/solr/NOTICE.txt
+++ b/solr/NOTICE.txt
@@ -69,11 +69,6 @@
 see http://sites.google.com/site/rrettesite/moman and 
 http://bitbucket.org/jpbarrette/moman/overview/
 
-The class org.apache.lucene.util.SorterTemplate was inspired by CGLIB's class
-with the same name. The implementation part is mainly done using pre-existing
-Lucene sorting code. In-place stable mergesort was borrowed from CGLIB,
-which is Apache-licensed.
-
 The class org.apache.lucene.util.WeakIdentityMap was derived from
 the Apache CXF project and is Apache License 2.0.
 
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
index 0a24457..5b83f63 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java
@@ -320,21 +320,22 @@
   
   private boolean splitShard(ClusterState clusterState, ZkNodeProps message, NamedList results) {
     log.info("Split shard invoked");
-    String collection = message.getStr("collection");
+    String collectionName = message.getStr("collection");
     String slice = message.getStr(ZkStateReader.SHARD_ID_PROP);
-    Slice parentSlice = clusterState.getSlice(collection, slice);
+    Slice parentSlice = clusterState.getSlice(collectionName, slice);
     
     if (parentSlice == null) {
-      if(clusterState.getCollections().contains(collection)) {
+      if(clusterState.getCollections().contains(collectionName)) {
         throw new SolrException(ErrorCode.BAD_REQUEST, "No shard with the specified name exists: " + slice);
       } else {
-        throw new SolrException(ErrorCode.BAD_REQUEST, "No collection with the specified name exists: " + collection);
+        throw new SolrException(ErrorCode.BAD_REQUEST, "No collection with the specified name exists: " + collectionName);
       }      
     }
     
     // find the leader for the shard
-    Replica parentShardLeader = clusterState.getLeader(collection, slice);
-    
+    Replica parentShardLeader = clusterState.getLeader(collectionName, slice);
+    DocCollection collection = clusterState.getCollection(collectionName);
+    DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
     DocRouter.Range range = parentSlice.getRange();
     if (range == null) {
       range = new PlainIdRouter().fullRange();
@@ -342,8 +343,7 @@
 
     // todo: fixed to two partitions?
     // todo: accept the range as a param to api?
-    // todo: handle randomizing subshard name in case a shard with the same name already exists.
-    List<DocRouter.Range> subRanges = new PlainIdRouter().partitionRange(2, range);
+    List<DocRouter.Range> subRanges = router.partitionRange(2, range);
     try {
       List<String> subSlices = new ArrayList<String>(subRanges.size());
       List<String> subShardNames = new ArrayList<String>(subRanges.size());
@@ -351,10 +351,10 @@
       for (int i = 0; i < subRanges.size(); i++) {
         String subSlice = slice + "_" + i;
         subSlices.add(subSlice);
-        String subShardName = collection + "_" + subSlice + "_replica1";
+        String subShardName = collectionName + "_" + subSlice + "_replica1";
         subShardNames.add(subShardName);
 
-        Slice oSlice = clusterState.getSlice(collection, subSlice);
+        Slice oSlice = clusterState.getSlice(collectionName, subSlice);
         if (oSlice != null) {
           if (Slice.ACTIVE.equals(oSlice.getState())) {
             throw new SolrException(ErrorCode.BAD_REQUEST, "Sub-shard: " + subSlice + " exists in active state. Aborting split shard.");
@@ -372,13 +372,10 @@
         }
       }
 
-      ShardResponse srsp;
-      do {
-        srsp = shardHandler.takeCompletedOrError();
-        if (srsp != null) {
-          processResponse(results, srsp);
-        }
-      } while (srsp != null);
+      // do not abort splitshard if the unloading fails
+      // this can happen because the replicas created previously may be down
+      // the only side effect of this is that the sub shard may end up having more replicas than we want
+      collectShardResponses(results, false, null);
 
       for (int i=0; i<subRanges.size(); i++)  {
         String subSlice = subSlices.get(i);
@@ -386,14 +383,14 @@
         DocRouter.Range subRange = subRanges.get(i);
 
         log.info("Creating shard " + subShardName + " as part of slice "
-            + subSlice + " of collection " + collection + " on "
+            + subSlice + " of collection " + collectionName + " on "
             + nodeName);
 
         ModifiableSolrParams params = new ModifiableSolrParams();
         params.set(CoreAdminParams.ACTION, CoreAdminAction.CREATE.toString());
 
         params.set(CoreAdminParams.NAME, subShardName);
-        params.set(CoreAdminParams.COLLECTION, collection);
+        params.set(CoreAdminParams.COLLECTION, collectionName);
         params.set(CoreAdminParams.SHARD, subSlice);
         params.set(CoreAdminParams.SHARD_RANGE, subRange.toString());
         params.set(CoreAdminParams.SHARD_STATE, Slice.CONSTRUCTION);
@@ -413,18 +410,14 @@
         sendShardRequest(nodeName, new ModifiableSolrParams(cmd.getParams()));
       }
 
-      do {
-        srsp = shardHandler.takeCompletedOrError();
-        if (srsp != null) {
-          processResponse(results, srsp);
-        }
-      } while (srsp != null);
+      collectShardResponses(results, true,
+          "SPLTSHARD failed to create subshard leaders or timed out waiting for them to come up");
       
       log.info("Successfully created all sub-shards for collection "
-          + collection + " parent shard: " + slice + " on: " + parentShardLeader);
+          + collectionName + " parent shard: " + slice + " on: " + parentShardLeader);
 
       log.info("Splitting shard " + parentShardLeader.getName() + " as part of slice "
-          + slice + " of collection " + collection + " on "
+          + slice + " of collection " + collectionName + " on "
           + parentShardLeader);
 
       ModifiableSolrParams params = new ModifiableSolrParams();
@@ -436,12 +429,7 @@
       }
 
       sendShardRequest(parentShardLeader.getNodeName(), params);
-      do {
-        srsp = shardHandler.takeCompletedOrError();
-        if (srsp != null) {
-          processResponse(results, srsp);
-        }
-      } while (srsp != null);
+      collectShardResponses(results, true, "SPLITSHARD failed to invoke SPLIT core admin command");
 
       log.info("Index on shard: " + nodeName + " split into two successfully");
 
@@ -458,12 +446,8 @@
         sendShardRequest(nodeName, params);
       }
 
-      do {
-        srsp = shardHandler.takeCompletedOrError();
-        if (srsp != null) {
-          processResponse(results, srsp);
-        }
-      } while (srsp != null);
+      collectShardResponses(results, true,
+          "SPLITSHARD failed while asking sub shard leaders to apply buffered updates");
 
       log.info("Successfully applied buffered updates on : " + subShardNames);
 
@@ -474,7 +458,7 @@
 
       // TODO: Have replication factor decided in some other way instead of numShards for the parent
 
-      int repFactor = clusterState.getSlice(collection, slice).getReplicas().size();
+      int repFactor = clusterState.getSlice(collectionName, slice).getReplicas().size();
 
       // we need to look at every node and see how many cores it serves
       // add our new cores to existing nodes serving the least number of cores
@@ -501,10 +485,10 @@
         String sliceName = subSlices.get(i - 1);
         for (int j = 2; j <= repFactor; j++) {
           String subShardNodeName = nodeList.get((repFactor * (i - 1) + (j - 2)) % nodeList.size());
-          String shardName = collection + "_" + sliceName + "_replica" + (j);
+          String shardName = collectionName + "_" + sliceName + "_replica" + (j);
 
           log.info("Creating replica shard " + shardName + " as part of slice "
-              + sliceName + " of collection " + collection + " on "
+              + sliceName + " of collection " + collectionName + " on "
               + subShardNodeName);
 
           // Need to create new params for each request
@@ -512,7 +496,7 @@
           params.set(CoreAdminParams.ACTION, CoreAdminAction.CREATE.toString());
 
           params.set(CoreAdminParams.NAME, shardName);
-          params.set(CoreAdminParams.COLLECTION, collection);
+          params.set(CoreAdminParams.COLLECTION, collectionName);
           params.set(CoreAdminParams.SHARD, sliceName);
           // TODO:  Figure the config used by the parent shard and use it.
           //params.set("collection.configName", configName);
@@ -535,12 +519,9 @@
         }
       }
 
-      do {
-        srsp = shardHandler.takeCompletedOrError();
-        if (srsp != null) {
-          processResponse(results, srsp);
-        }
-      } while (srsp != null);
+      collectShardResponses(results, true,
+          "SPLTSHARD failed to create subshard replicas or timed out waiting for them to come up");
+
       log.info("Successfully created all replica shards for all sub-slices "
           + subSlices);
 
@@ -552,7 +533,7 @@
       for (String subSlice : subSlices) {
         propMap.put(subSlice, Slice.ACTIVE);
       }
-      propMap.put(ZkStateReader.COLLECTION_PROP, collection);
+      propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
       ZkNodeProps m = new ZkNodeProps(propMap);
       inQueue.offer(ZkStateReader.toJSON(m));
 
@@ -560,11 +541,24 @@
     } catch (SolrException e) {
       throw e;
     } catch (Exception e) {
-      log.error("Error executing split operation for collection: " + collection + " parent shard: " + slice, e);
+      log.error("Error executing split operation for collection: " + collectionName + " parent shard: " + slice, e);
       throw new SolrException(ErrorCode.SERVER_ERROR, null, e);
     }
   }
 
+  private void collectShardResponses(NamedList results, boolean abortOnError, String msgOnError) {
+    ShardResponse srsp;
+    do {
+      srsp = shardHandler.takeCompletedOrError();
+      if (srsp != null) {
+        processResponse(results, srsp);
+        if (abortOnError && srsp.getException() != null)  {
+          throw new SolrException(ErrorCode.SERVER_ERROR, msgOnError, srsp.getException());
+        }
+      }
+    } while (srsp != null);
+  }
+
   private void sendShardRequest(String nodeName, ModifiableSolrParams params) {
     ShardRequest sreq = new ShardRequest();
     params.set("qt", adminPath);
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
index 922b486..496da73 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
@@ -1251,7 +1251,7 @@
         byte[] data = zkClient.getData(zkPath + "/" + file, null, null, true);
         dir.mkdirs(); 
         log.info("Write file " + new File(dir, file));
-        FileUtils.writeStringToFile(new File(dir, file), new String(data, "UTF-8"), "UTF-8");
+        FileUtils.writeByteArrayToFile(new File(dir, file), data);
       } else {
         downloadFromZK(zkClient, zkPath + "/" + file, new File(dir, file));
       }
diff --git a/solr/core/src/java/org/apache/solr/core/ConfigSolrXmlOld.java b/solr/core/src/java/org/apache/solr/core/ConfigSolrXmlOld.java
index 7a0b857..1497a70 100644
--- a/solr/core/src/java/org/apache/solr/core/ConfigSolrXmlOld.java
+++ b/solr/core/src/java/org/apache/solr/core/ConfigSolrXmlOld.java
@@ -178,16 +178,19 @@
           log.error(msg);
         }
       }
-      
-      if (dataDir != null) {
-        if (!dirs.containsKey(dataDir)) {
-          dirs.put(dataDir, name);
+
+      String instDir = DOMUtil.getAttr(node, CoreDescriptor.CORE_INSTDIR, null);
+      if (dataDir != null && instDir != null) { // this won't load anyway if instDir not specified.
+
+        String absData = new File(instDir, dataDir).getCanonicalPath();
+        if (!dirs.containsKey(absData)) {
+          dirs.put(absData, name);
         } else {
           String msg = String
               .format(
                   Locale.ROOT,
                   "More than one core points to data dir %s. They are in %s and %s",
-                  dataDir, dirs.get(dataDir), name);
+                  absData, dirs.get(absData), name);
           log.warn(msg);
         }
       }
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index 8733e42..89a74cb 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -270,7 +270,7 @@
     if (libDir != null) {
       File f = FileUtils.resolvePath(new File(dir), libDir);
       log.info("loading shared library: " + f.getAbsolutePath());
-      loader.addToClassLoader(libDir);
+      loader.addToClassLoader(libDir, null, false);
       loader.reloadLuceneSPI();
     }
 
@@ -289,6 +289,7 @@
       adminPath = cfg.get(ConfigSolr.CfgProp.SOLR_ADMINPATH, "/admin/cores");
     } else {
       adminPath = "/admin/cores";
+      defaultCoreName = DEFAULT_DEFAULT_CORE_NAME;
     }
     zkHost = cfg.get(ConfigSolr.CfgProp.SOLR_ZKHOST, null);
     coreLoadThreads = cfg.getInt(ConfigSolr.CfgProp.SOLR_CORELOADTHREADS, CORE_LOAD_THREADS);
@@ -580,6 +581,7 @@
       zkSys.close();
 
     }
+    org.apache.lucene.util.IOUtils.closeWhileHandlingException(loader); // best effort
   }
 
   public void cancelCoreRecoveries() {
@@ -1113,12 +1115,12 @@
       coresAttribs.put("defaultCoreName", defaultCoreName);
     }
 
-    addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_HOSTPORT, "hostPort",zkSys.getHostPort(), ZkContainer.DEFAULT_HOST_PORT);
+    addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_HOSTPORT, "hostPort",zkSys.getHostPort(), null);
     addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_ZKCLIENTTIMEOUT, "zkClientTimeout",
         intToString(this.zkClientTimeout),
         Integer.toString(DEFAULT_ZK_CLIENT_TIMEOUT));
     addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_HOSTCONTEXT, "hostContext",
-        zkSys.getHostContext(), ZkContainer.DEFAULT_HOST_CONTEXT);
+        zkSys.getHostContext(), null);
     addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_LEADERVOTEWAIT, "leaderVoteWait",
         zkSys.getLeaderVoteWait(), LEADER_VOTE_WAIT);
     addCoresAttrib(coresAttribs, ConfigSolr.CfgProp.SOLR_CORELOADTHREADS, "coreLoadThreads",
diff --git a/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java b/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
index dc42278..a44abe7 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
@@ -343,22 +343,4 @@
   public void putProperty(String prop, String val) {
     coreProperties.put(prop, val);
   }
-
-  // This is particularly useful for checking if any two cores have the same
-  // data dir.
-  public String getAbsoluteDataDir() {
-    String dataDir = getDataDir();
-    if (dataDir == null) return null; // No worse than before.
-
-    if (new File(dataDir).isAbsolute()) {
-      return SolrResourceLoader.normalizeDir(
-          SolrResourceLoader.normalizeDir(dataDir));
-    }
-
-    if (coreContainer == null) return null;
-
-    return SolrResourceLoader.normalizeDir(coreContainer.getSolrHome() +
-        SolrResourceLoader.normalizeDir(dataDir));
-
-  }
 }
diff --git a/solr/core/src/java/org/apache/solr/core/SolrConfig.java b/solr/core/src/java/org/apache/solr/core/SolrConfig.java
index e609844..fd887b9 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrConfig.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrConfig.java
@@ -19,9 +19,9 @@
 
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrException.ErrorCode;
-import org.apache.solr.schema.IndexSchema;
 import org.apache.solr.schema.IndexSchemaFactory;
 import org.apache.solr.util.DOMUtil;
+import org.apache.solr.util.FileUtils;
 import org.apache.solr.util.RegexFileFilter;
 import org.apache.solr.handler.component.SearchComponent;
 import org.apache.solr.request.SolrRequestHandler;
@@ -51,6 +51,7 @@
 import javax.xml.parsers.ParserConfigurationException;
 import javax.xml.xpath.XPathConstants;
 
+import java.io.File;
 import java.util.*;
 import java.util.regex.Pattern;
 import java.util.regex.Matcher;
@@ -453,6 +454,7 @@
     if (nodes == null || nodes.getLength() == 0) return;
     
     log.info("Adding specified lib dirs to ClassLoader");
+    SolrResourceLoader loader = getResourceLoader();
     
     try {
       for (int i = 0; i < nodes.getLength(); i++) {
@@ -464,16 +466,22 @@
           // :TODO: add support for a simpler 'glob' mutually exclusive of regex
           String regex = DOMUtil.getAttr(node, "regex");
           FileFilter filter = (null == regex) ? null : new RegexFileFilter(regex);
-          getResourceLoader().addToClassLoader(baseDir, filter, false);
+          loader.addToClassLoader(baseDir, filter, false);
         } else if (null != path) {
-          getResourceLoader().addToClassLoader(path);
+          final File file = FileUtils.resolvePath(new File(loader.getInstanceDir()), path);
+          loader.addToClassLoader(file.getParent(), new FileFilter() {
+            @Override
+            public boolean accept(File pathname) {
+              return pathname.equals(file);
+            }
+          }, false);
         } else {
           throw new RuntimeException(
               "lib: missing mandatory attributes: 'dir' or 'path'");
         }
       }
     } finally {
-      getResourceLoader().reloadLuceneSPI();
+      loader.reloadLuceneSPI();
     }
   }
 }
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCoreDiscoverer.java b/solr/core/src/java/org/apache/solr/core/SolrCoreDiscoverer.java
index 4f7b3ab..8ea16ee 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrCoreDiscoverer.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrCoreDiscoverer.java
@@ -9,6 +9,7 @@
 import java.util.Properties;
 
 import org.apache.commons.io.IOUtils;
+import org.apache.solr.common.SolrException;
 import org.apache.solr.util.PropertiesUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -89,6 +90,11 @@
       props.setProperty(CoreDescriptor.CORE_NAME, childFile.getName());
     }
     CoreDescriptor desc = new CoreDescriptor(container, props);
+    CoreDescriptor check = coreDescriptorMap.get(desc.getName());
+    if (check != null) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Core " + desc.getName() +
+          " defined more than once, once in " + desc.getInstanceDir() + " and once in " + check.getInstanceDir());
+    }
     coreDescriptorMap.put(desc.getName(), desc);
   }
 }
diff --git a/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java b/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java
index c10664f..a6d9b2b 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java
@@ -17,6 +17,7 @@
 
 package org.apache.solr.core;
 
+import java.io.Closeable;
 import java.io.File;
 import java.io.FileFilter;
 import java.io.FileInputStream;
@@ -37,6 +38,7 @@
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.DocValuesFormat;
+import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.analysis.util.WordlistLoader;
 import org.apache.solr.common.ResourceLoader;
 import org.apache.solr.handler.admin.CoreAdminHandler;
@@ -68,7 +70,7 @@
 /**
  * @since solr 1.3
  */ 
-public class SolrResourceLoader implements ResourceLoader
+public class SolrResourceLoader implements ResourceLoader,Closeable
 {
   public static final Logger log = LoggerFactory.getLogger(SolrResourceLoader.class);
 
@@ -152,9 +154,11 @@
     File base = FileUtils.resolvePath(new File(getInstanceDir()), baseDir);
     if (base != null && base.exists() && base.isDirectory()) {
       File[] files = base.listFiles(filter);
-      if (!quiet && (files == null || files.length == 0)) {
-        log.warn("No files added to classloader from lib: "
-            + baseDir + " (resolved as: " + base.getAbsolutePath() + ").");
+      if (files == null || files.length == 0) {
+        if (!quiet) {
+          log.warn("No files added to classloader from lib: "
+                   + baseDir + " (resolved as: " + base.getAbsolutePath() + ").");
+        }
       } else {
         this.classLoader = replaceClassLoader(classLoader, base, filter);
       }
@@ -165,35 +169,10 @@
       }
     }
   }
-
-  /**
-   * Adds the specific file/dir specified to the ClassLoader used by this
-   * ResourceLoader.  This method <b>MUST</b>
-   * only be called prior to using this ResourceLoader to get any resources, otherwise
-   * it's behavior will be non-deterministic. You also have to {link #reloadLuceneSPI()}
-   * before using this ResourceLoader.
-   *
-   * @param path A jar file (or directory of classes) to be added to the classpath,
-   *             will be resolved relative the instance dir.
-   */
-  void addToClassLoader(final String path) {
-    final File file = FileUtils.resolvePath(new File(getInstanceDir()), path);
-    if (file.canRead()) {
-      this.classLoader = replaceClassLoader(classLoader, file.getParentFile(),
-                                            new FileFilter() {
-                                              @Override
-                                              public boolean accept(File pathname) {
-                                                return pathname.equals(file);
-                                              }
-                                            });
-    } else {
-      log.error("Can't find (or read) file to add to classloader: " + file);
-    }
-  }
   
   /**
    * Reloads all Lucene SPI implementations using the new classloader.
-   * This method must be called after {@link #addToClassLoader(String)}
+   * This method must be called after {@link #addToClassLoader(String, FileFilter, boolean)}
    * and {@link #addToClassLoader(String,FileFilter,boolean)} before using
    * this ResourceLoader.
    */
@@ -229,7 +208,9 @@
           SolrException.log(log, "Can't add element to classloader: " + files[j], e);
         }
       }
-      return URLClassLoader.newInstance(elements, oldLoader.getParent());
+      ClassLoader oldParent = oldLoader.getParent();
+      IOUtils.closeWhileHandlingException(oldLoader); // best effort
+      return URLClassLoader.newInstance(elements, oldParent);
     }
     // are we still here?
     return oldLoader;
@@ -778,4 +759,9 @@
     }
     throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, builder.toString() );
   }
+
+  @Override
+  public void close() throws IOException {
+    IOUtils.close(classLoader);
+  }
 }
diff --git a/solr/core/src/java/org/apache/solr/core/ZkContainer.java b/solr/core/src/java/org/apache/solr/core/ZkContainer.java
index 57580c7..c9ed9b3 100644
--- a/solr/core/src/java/org/apache/solr/core/ZkContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/ZkContainer.java
@@ -43,11 +43,6 @@
 public class ZkContainer {
   protected static Logger log = LoggerFactory.getLogger(ZkContainer.class);
   
-  /** @deprecated will be remove in Solr 5.0 (SOLR-4622) */
-  public static final String DEFAULT_HOST_CONTEXT = "solr";
-  /** @deprecated will be remove in Solr 5.0 (SOLR-4622) */
-  public static final String DEFAULT_HOST_PORT = "8983";
-  
   protected ZkController zkController;
   private SolrZkServer zkServer;
   private int zkClientTimeout;
@@ -119,21 +114,14 @@
     if (zkRun == null && zookeeperHost == null)
         return;  // not in zk mode
 
-
-    // BEGIN: SOLR-4622: deprecated hardcoded defaults for hostPort & hostContext
     if (null == hostPort) {
-      // throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-      //               "'hostPort' must be configured to run SolrCloud");
-      log.warn("Solr 'hostPort' has not be explicitly configured, using hardcoded default of " + DEFAULT_HOST_PORT + ".  This default has been deprecated and will be removed in future versions of Solr, please configure this value explicitly");
-      hostPort = DEFAULT_HOST_PORT;
+      throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
+                   "'hostPort' must be configured to run SolrCloud");
     }
     if (null == hostContext) {
-      // throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
-      //               "'hostContext' must be configured to run SolrCloud");
-      log.warn("Solr 'hostContext' has not be explicitly configured, using hardcoded default of " + DEFAULT_HOST_CONTEXT + ".  This default has been deprecated and will be removed in future versions of Solr, please configure this value explicitly");
-      hostContext = DEFAULT_HOST_CONTEXT;
+      throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
+                   "'hostContext' must be configured to run SolrCloud");
     }
-    // END: SOLR-4622
 
     // zookeeper in quorum mode currently causes a failure when trying to
     // register log4j mbeans.  See SOLR-2369
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
index 12336ed..f089141 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
@@ -252,6 +252,7 @@
       List<String> paths = null;
       int partitions = pathsArr != null ? pathsArr.length : newCoreNames.length;
 
+      DocRouter router = null;
       if (coreContainer.isZooKeeperAware()) {
         ClusterState clusterState = coreContainer.getZkController().getClusterState();
         String collectionName = req.getCore().getCoreDescriptor().getCloudDescriptor().getCollectionName();
@@ -259,8 +260,8 @@
         String sliceName = req.getCore().getCoreDescriptor().getCloudDescriptor().getShardId();
         Slice slice = clusterState.getSlice(collectionName, sliceName);
         DocRouter.Range currentRange = slice.getRange();
-        DocRouter hp = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
-        ranges = currentRange != null ? hp.partitionRange(partitions, currentRange) : null;
+        router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
+        ranges = currentRange != null ? router.partitionRange(partitions, currentRange) : null;
       }
 
       if (pathsArr == null) {
@@ -278,7 +279,7 @@
       }
 
 
-      SplitIndexCommand cmd = new SplitIndexCommand(req, paths, newCores, ranges);
+      SplitIndexCommand cmd = new SplitIndexCommand(req, paths, newCores, ranges, router);
       core.getUpdateHandler().split(cmd);
 
       // After the split has completed, someone (here?) should start the process of replaying the buffered updates.
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
index c40a8ca..c971c85 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
@@ -191,9 +191,7 @@
       ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
       params.set(CommonParams.WT, "raw");
       req.setParams(params);
-      
-      ContentStreamBase content = new ContentStreamBase.StringStream(
-          new String(zkClient.getData(adminFile, null, null, true), "UTF-8"));
+      ContentStreamBase content = new ContentStreamBase.ByteArrayStream(zkClient.getData(adminFile, null, null, true), adminFile);
       content.setContentType(req.getParams().get(USE_CONTENT_TYPE));
       
       rsp.add(RawResponseWriter.CONTENT, content);
diff --git a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
index 37f8c0b..ad0422d 100644
--- a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
+++ b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
@@ -432,11 +432,25 @@
       final XPath xpath = schemaConf.getXPath();
       String expression = stepsToPath(SCHEMA, AT + NAME);
       Node nd = (Node) xpath.evaluate(expression, document, XPathConstants.NODE);
+      StringBuilder sb = new StringBuilder();
+      // Another case where the initialization from the test harness is different than the "real world"
+      sb.append("[");
+      if (loader.getCoreProperties() != null) {
+        sb.append(loader.getCoreProperties().getProperty(NAME));
+      } else {
+        sb.append("null");
+      }
+      sb.append("] ");
       if (nd==null) {
-        log.warn("schema has no name!");
+        sb.append("schema has no name!");
+        log.warn(sb.toString());
       } else {
         name = nd.getNodeValue();
-        log.info("Schema " + NAME + "=" + name);
+        sb.append("Schema ");
+        sb.append(NAME);
+        sb.append("=");
+        sb.append(name);
+        log.info(sb.toString());
       }
 
       //                      /schema/@version
diff --git a/solr/core/src/java/org/apache/solr/search/MaxScoreQParser.java b/solr/core/src/java/org/apache/solr/search/MaxScoreQParser.java
new file mode 100644
index 0000000..f2ba28d
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/search/MaxScoreQParser.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.search;
+
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.DisjunctionMaxQuery;
+import org.apache.lucene.search.Query;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.request.SolrQueryRequest;
+
+import java.util.ArrayList;
+import java.util.Collection;
+
+/**
+ * @see MaxScoreQParserPlugin
+ */
+public class MaxScoreQParser extends LuceneQParser {
+  float tie = 0.0f;
+
+  public MaxScoreQParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
+    super(qstr, localParams, params, req);
+    if (getParam("tie") != null) {
+      tie = Float.parseFloat(getParam("tie"));
+    }
+  }
+
+  /**
+   * Parses the query exactly like the Lucene parser does, but
+   * delegates all SHOULD clauses to DisjunctionMaxQuery with
+   * meaning only the clause with the max score will contribute
+   * to the overall score, unless the tie parameter is specified.
+   * <br/>
+   * The max() is only calculated from the SHOULD clauses.
+   * Any MUST clauses will be passed through as separate
+   * BooleanClauses and thus always contribute to the score.
+   * @return the resulting Query
+   * @throws org.apache.solr.search.SyntaxError if parsing fails
+   */
+  @Override
+  public Query parse() throws SyntaxError {
+    Query q = super.parse();
+    if (!(q instanceof BooleanQuery)) {
+      return q;
+    }
+    BooleanQuery obq = (BooleanQuery)q;
+    Collection<Query> should = new ArrayList<Query>();
+    Collection<BooleanClause> prohibOrReq = new ArrayList<BooleanClause>();
+    BooleanQuery newq = new BooleanQuery();
+
+    for (BooleanClause clause : obq.getClauses()) {
+      if(clause.isProhibited() || clause.isRequired()) {
+        prohibOrReq.add(clause);
+      } else {
+        BooleanQuery bq = new BooleanQuery();
+        bq.add(clause);
+        should.add(bq);
+      }
+    }
+    if (should.size() > 0) {
+      DisjunctionMaxQuery dmq = new DisjunctionMaxQuery(should, tie);
+      newq.add(dmq, BooleanClause.Occur.SHOULD);
+    }
+    for(BooleanClause c : prohibOrReq) {
+      newq.add(c);
+    }
+    return newq;
+  }
+}
\ No newline at end of file
diff --git a/solr/core/src/java/org/apache/solr/search/MaxScoreQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/MaxScoreQParserPlugin.java
new file mode 100644
index 0000000..3c028d6
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/search/MaxScoreQParserPlugin.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.search;
+
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.request.SolrQueryRequest;
+
+/**
+ * Parses a query like Lucene query parser, but scoring with max score, not sum
+ * <br>Accepts the "tie" request parameter as with dismax. 0.0=max, 1.0=sum
+ * <br>All other parameters are as with Lucene parser
+ * <br>Example: <code>q=foo {!maxscore v=$myq}&myq=A OR B OR C</code>
+ */
+public class MaxScoreQParserPlugin extends LuceneQParserPlugin {
+  public static String NAME = "maxscore";
+
+  @Override
+  public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
+    return new MaxScoreQParser(qstr, localParams, params, req);
+  }
+}
+
diff --git a/solr/core/src/java/org/apache/solr/search/QParserPlugin.java b/solr/core/src/java/org/apache/solr/search/QParserPlugin.java
index d7dd6dd..8758ad8 100755
--- a/solr/core/src/java/org/apache/solr/search/QParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/QParserPlugin.java
@@ -43,6 +43,7 @@
     JoinQParserPlugin.NAME, JoinQParserPlugin.class,
     SurroundQParserPlugin.NAME, SurroundQParserPlugin.class,
     SwitchQParserPlugin.NAME, SwitchQParserPlugin.class,
+    MaxScoreQParserPlugin.NAME, MaxScoreQParserPlugin.class
   };
 
   /** return a {@link QParser} */
diff --git a/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java b/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
index d526d3d..145f9a6 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
@@ -32,9 +32,11 @@
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.OpenBitSet;
 import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.HashBasedRouter;
 import org.apache.solr.common.util.Hash;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.schema.SchemaField;
+import org.apache.solr.schema.StrField;
 import org.apache.solr.search.SolrIndexSearcher;
 import org.apache.solr.util.RefCounted;
 import org.slf4j.Logger;
@@ -53,6 +55,8 @@
   DocRouter.Range[] rangesArr; // same as ranges list, but an array for extra speed in inner loops
   List<String> paths;
   List<SolrCore> cores;
+  DocRouter router;
+  HashBasedRouter hashRouter;
   int numPieces;
   int currPartition = 0;
 
@@ -62,6 +66,9 @@
     ranges = cmd.ranges;
     paths = cmd.paths;
     cores = cmd.cores;
+    router = cmd.router;
+    hashRouter = router instanceof HashBasedRouter ? (HashBasedRouter)router : null;
+
     if (ranges == null) {
       numPieces =  paths != null ? paths.size() : cores.size();
     } else  {
@@ -151,16 +158,24 @@
     BytesRef term = null;
     DocsEnum docsEnum = null;
 
+    CharsRef idRef = new CharsRef(100);
     for (;;) {
       term = termsEnum.next();
       if (term == null) break;
 
       // figure out the hash for the term
-      // TODO: hook in custom hashes (or store hashes)
-      // TODO: performance implications of using indexedToReadable?
-      CharsRef ref = new CharsRef(term.length);
-      ref = field.getType().indexedToReadable(term, ref);
-      int hash = Hash.murmurhash3_x86_32(ref, ref.offset, ref.length, 0);
+
+      // FUTURE: if conversion to strings costs too much, we could
+      // specialize and use the hash function that can work over bytes.
+      idRef = field.getType().indexedToReadable(term, idRef);
+      String idString = idRef.toString();
+
+      int hash = 0;
+      if (hashRouter != null) {
+        hash = hashRouter.sliceHash(idString, null, null);
+      }
+      // int hash = Hash.murmurhash3_x86_32(ref, ref.offset, ref.length, 0);
+
       docsEnum = termsEnum.docs(liveDocs, docsEnum, DocsEnum.FLAG_NONE);
       for (;;) {
         int doc = docsEnum.nextDoc();
diff --git a/solr/core/src/java/org/apache/solr/update/SplitIndexCommand.java b/solr/core/src/java/org/apache/solr/update/SplitIndexCommand.java
index bc001d7..cf69cbf 100644
--- a/solr/core/src/java/org/apache/solr/update/SplitIndexCommand.java
+++ b/solr/core/src/java/org/apache/solr/update/SplitIndexCommand.java
@@ -34,13 +34,14 @@
   public List<String> paths;
   public List<SolrCore> cores;  // either paths or cores should be specified
   public List<DocRouter.Range> ranges;
-  // TODO: allow specification of custom hash function
+  public DocRouter router;
 
-  public SplitIndexCommand(SolrQueryRequest req, List<String> paths,  List<SolrCore> cores, List<DocRouter.Range> ranges) {
+  public SplitIndexCommand(SolrQueryRequest req, List<String> paths,  List<SolrCore> cores, List<DocRouter.Range> ranges, DocRouter router) {
     super(req);
     this.paths = paths;
     this.cores = cores;
     this.ranges = ranges;
+    this.router = router;
   }
 
   @Override
@@ -54,6 +55,7 @@
     sb.append(",paths=" + paths);
     sb.append(",cores=" + cores);
     sb.append(",ranges=" + ranges);
+    sb.append(",router=" + router);
     sb.append('}');
     return sb.toString();
   }
diff --git a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
index 80be09e..f940489 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
@@ -322,7 +322,11 @@
     boolean localIsLeader = cloudDescriptor.isLeader();
     if (DistribPhase.FROMLEADER == phase && localIsLeader && from != null) { // from will be null on log replay
       String fromShard = req.getParams().get("distrib.from.parent");
-      if (fromShard != null)  {
+      if (fromShard != null) {
+        if (!Slice.CONSTRUCTION.equals(mySlice.getState()))  {
+          throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
+              "Request says it is coming from parent shard leader but we are not in construction state");
+        }
         // shard splitting case -- check ranges to see if we are a sub-shard
         Slice fromSlice = zkController.getClusterState().getCollection(collection).getSlice(fromShard);
         DocRouter.Range parentRange = fromSlice.getRange();
@@ -331,12 +335,12 @@
           throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
               "Request says it is coming from parent shard leader but parent hash range is not superset of my range");
         }
-      } else  {
-      log.error("Request says it is coming from leader, but we are the leader: " + req.getParamString());
-      throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Request says it is coming from leader, but we are the leader");
+      } else {
+        log.error("Request says it is coming from leader, but we are the leader: " + req.getParamString());
+        throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Request says it is coming from leader, but we are the leader");
+      }
     }
-    }
-    
+
     if (isLeader && !localIsLeader) {
       log.error("ClusterState says we are the leader, but locally we don't think so");
       throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "ClusterState says we are the leader, but locally we don't think so");
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
index 9347d76..1db6ddd 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyShardSplitTest.java
@@ -63,7 +63,7 @@
     waitForThingsToLevelOut(15);
 
     ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-    DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
+    final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
     Slice shard1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
     DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
     final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range);
@@ -78,16 +78,17 @@
     try {
       del("*:*");
       for (int id = 0; id < 100; id++) {
-        indexAndUpdateCount(ranges, docCounts, id);
+        indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id));
       }
       commit();
 
       indexThread = new Thread() {
         @Override
         public void run() {
-          for (int id = 101; id < atLeast(401); id++) {
+          int max = atLeast(401);
+          for (int id = 101; id < max; id++) {
             try {
-              indexAndUpdateCount(ranges, docCounts, id);
+              indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id));
               Thread.sleep(atLeast(25));
             } catch (Exception e) {
               log.error("Exception while adding doc", e);
diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
index 35e4e58..a999ce6 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java
@@ -28,13 +28,15 @@
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.cloud.ClusterState;
+import org.apache.solr.common.cloud.CompositeIdRouter;
 import org.apache.solr.common.cloud.DocRouter;
+import org.apache.solr.common.cloud.HashBasedRouter;
+import org.apache.solr.common.cloud.PlainIdRouter;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkCoreNodeProps;
 import org.apache.solr.common.cloud.ZkStateReader;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.common.util.Hash;
 import org.apache.solr.handler.admin.CollectionsHandler;
 import org.apache.solr.update.DirectUpdateHandler2;
 import org.apache.zookeeper.KeeperException;
@@ -52,6 +54,10 @@
   public static final String SHARD1_0 = SHARD1 + "_0";
   public static final String SHARD1_1 = SHARD1 + "_1";
 
+  public ShardSplitTest() {
+    schemaString = "schema15.xml";      // we need a string id
+  }
+
   @Override
   @Before
   public void setUp() throws Exception {
@@ -94,7 +100,7 @@
     waitForThingsToLevelOut(15);
 
     ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
-    DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
+    final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
     Slice shard1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
     DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
     final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range);
@@ -102,17 +108,19 @@
     int numReplicas = shard1.getReplicas().size();
 
     del("*:*");
-    for (int id = 0; id < 100; id++) {
-      indexAndUpdateCount(ranges, docCounts, id);
+    for (int id = 0; id <= 100; id++) {
+      String shardKey = "" + (char)('a' + (id % 26)); // See comment in ShardRoutingTest for hash distribution
+      indexAndUpdateCount(router, ranges, docCounts, shardKey + "!" + String.valueOf(id));
     }
     commit();
 
     Thread indexThread = new Thread() {
       @Override
       public void run() {
-        for (int id = 101; id < atLeast(401); id++) {
+        int max = atLeast(401);
+        for (int id = 101; id < max; id++) {
           try {
-            indexAndUpdateCount(ranges, docCounts, id);
+            indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id));
             Thread.sleep(atLeast(25));
           } catch (Exception e) {
             log.error("Exception while adding doc", e);
@@ -200,12 +208,14 @@
     baseServer.request(request);
   }
 
-  protected void indexAndUpdateCount(List<DocRouter.Range> ranges, int[] docCounts, int id) throws Exception {
-    indexr("id", id);
+  protected void indexAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id) throws Exception {
+    index("id", id);
 
-    // todo - hook in custom hashing
-    byte[] bytes = String.valueOf(id).getBytes("UTF-8");
-    int hash = Hash.murmurhash3_x86_32(bytes, 0, bytes.length, 0);
+    int hash = 0;
+    if (router instanceof HashBasedRouter) {
+      HashBasedRouter hashBasedRouter = (HashBasedRouter) router;
+      hash = hashBasedRouter.sliceHash(id, null, null);
+    }
     for (int i = 0; i < ranges.size(); i++) {
       DocRouter.Range range = ranges.get(i);
       if (range.includes(hash))
diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java b/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java
index 70c4ad4..4de809d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java
@@ -18,6 +18,7 @@
  */
 
 import java.io.File;
+import java.util.Collection;
 import java.util.List;
 
 import org.apache.commons.io.FileUtils;
@@ -186,6 +187,21 @@
     List<String> zkFiles = zkClient.getChildren(ZkController.CONFIGS_ZKNODE + "/" + confsetname, null, true);
     assertEquals(files.length, zkFiles.size());
     
+    File sourceConfDir = new File(ExternalPaths.EXAMPLE_HOME + File.separator + "collection1"
+            + File.separator + "conf");
+    Collection<File> sourceFiles = FileUtils.listFiles(sourceConfDir,null,true);
+    for (File sourceFile :sourceFiles){
+      if (!sourceFile.isHidden()){
+        int indexOfRelativePath = sourceFile.getAbsolutePath().lastIndexOf("collection1/conf");
+        String relativePathofFile = sourceFile.getAbsolutePath().substring(indexOfRelativePath + 17, sourceFile.getAbsolutePath().length());
+        File downloadedFile = new File(confDir,relativePathofFile);
+        assertTrue(downloadedFile.getAbsolutePath() + " does not exist source:" + sourceFile.getAbsolutePath(), downloadedFile.exists());
+        assertTrue("Content didn't change",FileUtils.contentEquals(sourceFile,downloadedFile));
+      }
+      
+    }
+    
+   
     // test reset zk
     args = new String[] {"-zkhost", zkServer.getZkAddress(), "-cmd",
         "clear", "/"};
diff --git a/solr/core/src/test/org/apache/solr/core/ResourceLoaderTest.java b/solr/core/src/test/org/apache/solr/core/ResourceLoaderTest.java
index 05d490a..d5966b6 100644
--- a/solr/core/src/test/org/apache/solr/core/ResourceLoaderTest.java
+++ b/solr/core/src/test/org/apache/solr/core/ResourceLoaderTest.java
@@ -22,6 +22,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.analysis.core.KeywordTokenizerFactory;
 import org.apache.lucene.analysis.ngram.NGramFilterFactory;
+import org.apache.lucene.util._TestUtil;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.handler.admin.LukeRequestHandler;
 import org.apache.solr.handler.component.FacetComponent;
@@ -30,11 +31,15 @@
 import org.apache.solr.util.plugin.SolrCoreAware;
 
 import java.io.File;
+import java.io.FileFilter;
+import java.io.FileOutputStream;
 import java.io.InputStream;
 import java.nio.charset.CharacterCodingException;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
+import java.util.jar.JarEntry;
+import java.util.jar.JarOutputStream;
 
 public class ResourceLoaderTest extends LuceneTestCase 
 {
@@ -131,4 +136,51 @@
       assertTrue(expected.getCause() instanceof CharacterCodingException);
     }
   }
+
+  public void testClassLoaderLibs() throws Exception {
+    File tmpRoot = _TestUtil.getTempDir("testClassLoaderLibs");
+
+    File lib = new File(tmpRoot, "lib");
+    lib.mkdirs();
+
+    JarOutputStream jar1 = new JarOutputStream(new FileOutputStream(new File(lib, "jar1.jar")));
+    jar1.putNextEntry(new JarEntry("aLibFile"));
+    jar1.closeEntry();
+    jar1.close();
+
+    File otherLib = new File(tmpRoot, "otherLib");
+    otherLib.mkdirs();
+
+    JarOutputStream jar2 = new JarOutputStream(new FileOutputStream(new File(otherLib, "jar2.jar")));
+    jar2.putNextEntry(new JarEntry("explicitFile"));
+    jar2.closeEntry();
+    jar2.close();
+    JarOutputStream jar3 = new JarOutputStream(new FileOutputStream(new File(otherLib, "jar3.jar")));
+    jar3.putNextEntry(new JarEntry("otherFile"));
+    jar3.closeEntry();
+    jar3.close();
+
+    SolrResourceLoader loader = new SolrResourceLoader(tmpRoot.getAbsolutePath());
+
+    // ./lib is accessible by default
+    assertNotNull(loader.getClassLoader().getResource("aLibFile"));
+
+    // file filter works (and doesn't add other files in the same dir)
+    final File explicitFileJar = new File(otherLib, "jar2.jar").getAbsoluteFile();
+    loader.addToClassLoader("otherLib",
+        new FileFilter() {
+          @Override
+          public boolean accept(File pathname) {
+            return pathname.equals(explicitFileJar);
+          }
+        }, false);
+    assertNotNull(loader.getClassLoader().getResource("explicitFile"));
+    assertNull(loader.getClassLoader().getResource("otherFile"));
+
+
+    // null file filter means accept all (making otherFile accessible)
+    loader.addToClassLoader("otherLib", null, false);
+    assertNotNull(loader.getClassLoader().getResource("otherFile"));
+    loader.close();
+  }
 }
diff --git a/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java b/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java
index 5fc0dd5..d56f2bf 100644
--- a/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java
+++ b/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java
@@ -24,11 +24,14 @@
 import java.io.OutputStreamWriter;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.jar.JarEntry;
+import java.util.jar.JarOutputStream;
 
 import javax.xml.parsers.ParserConfigurationException;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util._TestUtil;
 import org.apache.solr.SolrTestCaseJ4;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -334,6 +337,55 @@
       cc.shutdown();
     }
   }
+
+  @Test
+  public void testSharedLib() throws Exception {
+    File tmpRoot = _TestUtil.getTempDir("testSharedLib");
+
+    File lib = new File(tmpRoot, "lib");
+    lib.mkdirs();
+
+    JarOutputStream jar1 = new JarOutputStream(new FileOutputStream(new File(lib, "jar1.jar")));
+    jar1.putNextEntry(new JarEntry("defaultSharedLibFile"));
+    jar1.closeEntry();
+    jar1.close();
+
+    File customLib = new File(tmpRoot, "customLib");
+    customLib.mkdirs();
+
+    JarOutputStream jar2 = new JarOutputStream(new FileOutputStream(new File(customLib, "jar2.jar")));
+    jar2.putNextEntry(new JarEntry("customSharedLibFile"));
+    jar2.closeEntry();
+    jar2.close();
+
+    FileUtils.writeStringToFile(new File(tmpRoot, "default-lib-solr.xml"), "<solr><cores/></solr>", "UTF-8");
+    FileUtils.writeStringToFile(new File(tmpRoot, "explicit-lib-solr.xml"), "<solr sharedLib=\"lib\"><cores/></solr>", "UTF-8");
+    FileUtils.writeStringToFile(new File(tmpRoot, "custom-lib-solr.xml"), "<solr sharedLib=\"customLib\"><cores/></solr>", "UTF-8");
+
+    final CoreContainer cc1 = new CoreContainer(tmpRoot.getAbsolutePath());
+    cc1.load(tmpRoot.getAbsolutePath(), new File(tmpRoot, "default-lib-solr.xml"));
+    try {
+      cc1.loader.openResource("defaultSharedLibFile").close();
+    } finally {
+      cc1.shutdown();
+    }
+
+    final CoreContainer cc2 = new CoreContainer(tmpRoot.getAbsolutePath());
+    cc2.load(tmpRoot.getAbsolutePath(), new File(tmpRoot, "explicit-lib-solr.xml"));
+    try {
+      cc2.loader.openResource("defaultSharedLibFile").close();
+    } finally {
+      cc2.shutdown();
+    }
+
+    final CoreContainer cc3 = new CoreContainer(tmpRoot.getAbsolutePath());
+    cc3.load(tmpRoot.getAbsolutePath(), new File(tmpRoot, "custom-lib-solr.xml"));
+    try {
+      cc3.loader.openResource("customSharedLibFile").close();
+    } finally {
+      cc3.shutdown();
+    }
+  }
   
   private static final String EMPTY_SOLR_XML ="<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n" +
       "<solr persistent=\"false\">\n" +
diff --git a/solr/core/src/test/org/apache/solr/core/TestCoreDiscovery.java b/solr/core/src/test/org/apache/solr/core/TestCoreDiscovery.java
index c9df520..dd4973a 100644
--- a/solr/core/src/test/org/apache/solr/core/TestCoreDiscovery.java
+++ b/solr/core/src/test/org/apache/solr/core/TestCoreDiscovery.java
@@ -24,6 +24,7 @@
 import org.apache.commons.io.FileUtils;
 import org.apache.lucene.util.IOUtils;
 import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.common.SolrException;
 import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -88,10 +89,9 @@
 
   }
 
-  private void addCoreWithProps(Properties stockProps) throws Exception {
+  private void addCoreWithProps(String name, Properties stockProps) throws Exception {
 
-    File propFile = new File(solrHomeDirectory,
-        stockProps.getProperty(CoreDescriptor.CORE_NAME) + File.separator + SolrCoreDiscoverer.CORE_PROP_FILE);
+    File propFile = new File(new File(solrHomeDirectory, name), SolrCoreDiscoverer.CORE_PROP_FILE);
     File parent = propFile.getParentFile();
     assertTrue("Failed to mkdirs for " + parent.getAbsolutePath(), parent.mkdirs());
     addCoreWithProps(stockProps, propFile);
@@ -127,16 +127,17 @@
     setMeUp();
 
     // name, isLazy, loadOnStartup
-    addCoreWithProps(makeCorePropFile("core1", false, true, "dataDir=core1"));
-    addCoreWithProps(makeCorePropFile("core2", false, false, "dataDir=core2"));
+    addCoreWithProps("core1", makeCorePropFile("core1", false, true, "dataDir=core1"));
+    addCoreWithProps("core2", makeCorePropFile("core2", false, false, "dataDir=core2"));
 
     // I suspect what we're adding in here is a "configset" rather than a schema or solrconfig.
     //
-    addCoreWithProps(makeCorePropFile("lazy1", true, false, "dataDir=lazy1"));
+    addCoreWithProps("lazy1", makeCorePropFile("lazy1", true, false, "dataDir=lazy1"));
 
     CoreContainer cc = init();
     try {
-      assertNull("defaultCore no longer allowed in solr.xml", cc.getDefaultCoreName());
+      assertEquals(CoreContainer.DEFAULT_DEFAULT_CORE_NAME,
+                   cc.getDefaultCoreName());
 
       TestLazyCores.checkInCores(cc, "core1");
       TestLazyCores.checkNotInCores(cc, "lazy1", "core2", "collection1");
@@ -170,6 +171,30 @@
   }
 
   @Test
+  public void testDuplicateNames() throws Exception {
+    setMeUp();
+
+    // name, isLazy, loadOnStartup
+    addCoreWithProps("core1", makeCorePropFile("core1", false, true));
+    addCoreWithProps("core2", makeCorePropFile("core2", false, false, "name=core1"));
+    CoreContainer cc = null;
+    try {
+      cc = init();
+      fail("Should have thrown exception in testDuplicateNames");
+    } catch (SolrException se) {
+      assertTrue("Should have seen an exception because two cores had the same name",
+          "Core  + desc.getName() + \" defined twice".indexOf(se.getMessage()) != -1);
+      assertTrue("/core1 should have been mentioned in the message", "/core1".indexOf(se.getMessage()) != -1);
+      assertTrue("/core2 should have been mentioned in the message", "/core2".indexOf(se.getMessage()) != -1);
+    } finally {
+      if (cc != null) {
+        cc.shutdown();
+      }
+    }
+  }
+
+
+  @Test
   public void testAlternateCoreDir() throws Exception {
     File alt = new File(TEMP_DIR, "alternateCoreDir");
     if (alt.exists()) FileUtils.deleteDirectory(alt);
@@ -192,7 +217,29 @@
       if (alt.exists()) FileUtils.deleteDirectory(alt);
     }
   }
-
+  @Test
+  public void testNoCoreDir() throws Exception {
+    File noCoreDir = new File(TEMP_DIR, "noCoreDir");
+    if (noCoreDir.exists()) FileUtils.deleteDirectory(noCoreDir);
+    noCoreDir.mkdirs();
+    setMeUp(noCoreDir.getAbsolutePath());
+    addCoreWithProps(makeCorePropFile("core1", false, true),
+        new File(noCoreDir, "core1" + File.separator + SolrCoreDiscoverer.CORE_PROP_FILE));
+    addCoreWithProps(makeCorePropFile("core2", false, false),
+        new File(noCoreDir, "core2" + File.separator + SolrCoreDiscoverer.CORE_PROP_FILE));
+    CoreContainer cc = init();
+    try {
+      SolrCore core1 = cc.getCore("core1");
+      SolrCore core2 = cc.getCore("core2");
+      assertNotNull(core1);
+      assertNotNull(core2);
+      core1.close();
+      core2.close();
+    } finally {
+      cc.shutdown();
+      if (noCoreDir.exists()) FileUtils.deleteDirectory(noCoreDir);
+    }
+  }
   // For testing whether finding a solr.xml overrides looking at solr.properties
   private final static String SOLR_XML = "<solr> " +
       "<int name=\"transientCacheSize\">2</int> " +
diff --git a/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java b/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java
new file mode 100644
index 0000000..b6e1483
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java
@@ -0,0 +1,117 @@
+package org.apache.solr.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.search.*;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.util.AbstractSolrTestCase;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+
+public class TestMaxScoreQueryParser extends AbstractSolrTestCase {
+  Query q;
+  BooleanClause[] clauses;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("solrconfig.xml", "schema.xml");
+  }
+
+  @Test
+  public void testFallbackToLucene() {
+    q = parse("foo");
+    assertTrue(q instanceof TermQuery);
+
+    q = parse("price:[0 TO 10]");
+    assertTrue(q instanceof NumericRangeQuery);
+  }
+
+  @Test
+  public void testNoShouldClauses() {
+    q = parse("+foo +bar");
+    clauses = clauses(q);
+    assertEquals(2, clauses.length);
+    assertTrue(clauses[0].isRequired());
+    assertTrue(clauses[1].isRequired());
+
+    q = parse("+foo -bar");
+    clauses = clauses(q);
+    assertEquals(2, clauses.length);
+    assertTrue(clauses[0].isRequired());
+    assertTrue(clauses[1].isProhibited());
+  }
+
+  @Test
+  public void testPureMax() {
+    q = parse("foo bar");
+    clauses = clauses(q);
+    assertEquals(1, clauses.length);
+    assertTrue(clauses[0].getQuery() instanceof DisjunctionMaxQuery);
+    assertEquals(0.0, ((DisjunctionMaxQuery) clauses[0].getQuery()).getTieBreakerMultiplier(), 1e-15);
+    ArrayList<Query> qa = ((DisjunctionMaxQuery) clauses[0].getQuery()).getDisjuncts();
+    assertEquals(2, qa.size());
+    assertEquals("text:foo", qa.get(0).toString());
+  }
+
+  @Test
+  public void testMaxAndProhibited() {
+    q = parse("foo bar -baz");
+    clauses = clauses(q);
+    assertEquals(2, clauses.length);
+    assertTrue(clauses[0].getQuery() instanceof DisjunctionMaxQuery);
+    assertTrue(clauses[1].getQuery() instanceof TermQuery);
+    assertEquals("text:baz", clauses[1].getQuery().toString());
+    assertTrue(clauses[1].isProhibited());
+  }
+
+  @Test
+  public void testTie() {
+    q = parse("foo bar", "tie", "0.5");
+    clauses = clauses(q);
+    assertEquals(1, clauses.length);
+    assertTrue(clauses[0].getQuery() instanceof DisjunctionMaxQuery);
+    assertEquals(0.5, ((DisjunctionMaxQuery) clauses[0].getQuery()).getTieBreakerMultiplier(), 1e-15);
+  }
+
+  //
+  // Helper methods
+  //
+
+  private Query parse(String q, String... params) {
+    try {
+      ModifiableSolrParams p = new ModifiableSolrParams();
+      ArrayList<String> al = new ArrayList<String>(Arrays.asList(params));
+      while(al.size() >= 2) {
+        p.add(al.remove(0), al.remove(0));
+      }
+      return new MaxScoreQParser(q, p, new ModifiableSolrParams(), req(q)).parse();
+    } catch (SyntaxError syntaxError) {
+      fail("Failed with exception "+syntaxError.getMessage());
+    }
+    fail("Parse failed");
+    return null;
+  }
+
+  private BooleanClause[] clauses(Query q) {
+    return ((BooleanQuery) q).getClauses();
+  }
+}
diff --git a/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java b/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java
index 30a253f..f017ebe 100644
--- a/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java
+++ b/solr/core/src/test/org/apache/solr/update/SolrIndexSplitterTest.java
@@ -95,7 +95,7 @@
       request = lrf.makeRequest("q", "dummy");
 
       SplitIndexCommand command = new SplitIndexCommand(request,
-          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath()), null, ranges);
+          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath()), null, ranges, new PlainIdRouter());
       new SolrIndexSplitter(command).split();
 
       Directory directory = h.getCore().getDirectoryFactory().get(indexDir1.getAbsolutePath(),
@@ -148,7 +148,7 @@
       try {
         request = lrf.makeRequest("q", "dummy");
 
-        SplitIndexCommand command = new SplitIndexCommand(request, null, Lists.newArrayList(core1, core2), ranges);
+        SplitIndexCommand command = new SplitIndexCommand(request, null, Lists.newArrayList(core1, core2), ranges, new PlainIdRouter());
         new SolrIndexSplitter(command).split();
       } finally {
         if (request != null) request.close();
@@ -185,7 +185,7 @@
       request = lrf.makeRequest("q", "dummy");
 
       SplitIndexCommand command = new SplitIndexCommand(request,
-          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath(), indexDir3.getAbsolutePath()), null, null);
+          Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath(), indexDir3.getAbsolutePath()), null, null, new PlainIdRouter());
       new SolrIndexSplitter(command).split();
 
       directory = h.getCore().getDirectoryFactory().get(indexDir1.getAbsolutePath(),
diff --git a/solr/example/cloud-scripts/log4j.properties b/solr/example/cloud-scripts/log4j.properties
new file mode 100644
index 0000000..c581583
--- /dev/null
+++ b/solr/example/cloud-scripts/log4j.properties
@@ -0,0 +1,8 @@
+#  Logging level
+log4j.rootLogger=INFO, stderr
+
+# log to stderr
+log4j.appender.stderr = org.apache.log4j.ConsoleAppender
+log4j.appender.stderr.Target = System.err
+log4j.appender.stderr.layout = org.apache.log4j.PatternLayout
+log4j.appender.stderr.layout.ConversionPattern=%-5p - %d{yyyy-MM-dd HH:mm:ss.SSS}; %C; %m\n
diff --git a/solr/example/cloud-scripts/zkcli.bat b/solr/example/cloud-scripts/zkcli.bat
index 5857a3c..8232a72 100644
--- a/solr/example/cloud-scripts/zkcli.bat
+++ b/solr/example/cloud-scripts/zkcli.bat
@@ -8,5 +8,4 @@
 set SDIR=%~dp0

 if "%SDIR:~-1%"=="\" set SDIR=%SDIR:~0,-1%

 

-     

-"%JVM%" -classpath "%SDIR%\..\solr-webapp\webapp\WEB-INF\lib\*;%SDIR%\..\lib\ext" org.apache.solr.cloud.ZkCLI %*

+"%JVM%" -Dlog4j.configuration=file:%SDIR%\log4j.properties -classpath "%SDIR%\..\solr-webapp\webapp\WEB-INF\lib\*;%SDIR%\..\lib\ext\*" org.apache.solr.cloud.ZkCLI %*

diff --git a/solr/example/cloud-scripts/zkcli.sh b/solr/example/cloud-scripts/zkcli.sh
index 1a8f477..ab5da96 100644
--- a/solr/example/cloud-scripts/zkcli.sh
+++ b/solr/example/cloud-scripts/zkcli.sh
@@ -9,6 +9,5 @@
 
 sdir="`dirname \"$0\"`"
 
-
-$JVM  -classpath "$sdir/../solr-webapp/webapp/WEB-INF/lib/*:$sdir/../lib/ext/*" org.apache.solr.cloud.ZkCLI ${1+"$@"}
+PATH=$JAVA_HOME/bin:$PATH $JVM -Dlog4j.configuration=file:$sdir/log4j.properties -classpath "$sdir/../solr-webapp/webapp/WEB-INF/lib/*:$sdir/../lib/ext/*" org.apache.solr.cloud.ZkCLI ${1+"$@"}
 
diff --git a/solr/example/example-DIH/solr/rss/conf/rss-data-config.xml b/solr/example/example-DIH/solr/rss/conf/rss-data-config.xml
index cb6d395..7dd2f67 100644
--- a/solr/example/example-DIH/solr/rss/conf/rss-data-config.xml
+++ b/solr/example/example-DIH/solr/rss/conf/rss-data-config.xml
@@ -5,22 +5,22 @@
                 pk="link"
                 url="http://rss.slashdot.org/Slashdot/slashdot"
                 processor="XPathEntityProcessor"
-                forEach="/rss/channel | /rss/item"
+                forEach="/rss/channel/item"
                 transformer="DateFormatTransformer">
 				
             <field column="source" xpath="/rss/channel/title" commonField="true" />
             <field column="source-link" xpath="/rss/channel/link" commonField="true" />
             <field column="subject" xpath="/rss/channel/subject" commonField="true" />
 			
-            <field column="title" xpath="/rss/item/title" />
-            <field column="link" xpath="/rss/item/link" />
-            <field column="description" xpath="/rss/item/description" />
-            <field column="creator" xpath="/rss/item/creator" />
-            <field column="item-subject" xpath="/rss/item/subject" />
-            <field column="date" xpath="/rss/item/date" dateTimeFormat="yyyy-MM-dd'T'HH:mm:ss" />
-            <field column="slash-department" xpath="/rss/item/department" />
-            <field column="slash-section" xpath="/rss/item/section" />
-            <field column="slash-comments" xpath="/rss/item/comments" />
+            <field column="title" xpath="/rss/channel/item/title" />
+            <field column="link" xpath="/rss/channel/item/link" />
+            <field column="description" xpath="/rss/channel/item/description" />
+            <field column="creator" xpath="/rss/channel/item/creator" />
+            <field column="item-subject" xpath="/rss/channel/item/subject" />
+            <field column="date" xpath="/rss/channel/item/date" dateTimeFormat="yyyy-MM-dd'T'HH:mm:ss" />
+            <field column="slash-department" xpath="/rss/channel/item/department" />
+            <field column="slash-section" xpath="/rss/channel/item/section" />
+            <field column="slash-comments" xpath="/rss/channel/item/comments" />
         </entity>
     </document>
 </dataConfig>
diff --git a/solr/licenses/junit4-ant-2.0.10.jar.sha1 b/solr/licenses/junit4-ant-2.0.10.jar.sha1
new file mode 100644
index 0000000..d63d8dd
--- /dev/null
+++ b/solr/licenses/junit4-ant-2.0.10.jar.sha1
@@ -0,0 +1 @@
+ca55927404cf0a1a0e078d988222c4feb9dfc01c
diff --git a/solr/licenses/junit4-ant-2.0.9.jar.sha1 b/solr/licenses/junit4-ant-2.0.9.jar.sha1
deleted file mode 100644
index 8d50518..0000000
--- a/solr/licenses/junit4-ant-2.0.9.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-bba707f4b0933f782dd456c262dc36f4bac01f45
diff --git a/solr/licenses/randomizedtesting-runner-2.0.10.jar.sha1 b/solr/licenses/randomizedtesting-runner-2.0.10.jar.sha1
new file mode 100644
index 0000000..fb7355b
--- /dev/null
+++ b/solr/licenses/randomizedtesting-runner-2.0.10.jar.sha1
@@ -0,0 +1 @@
+00befdff5ccc24797b46a68819524f42b570e745
diff --git a/solr/licenses/randomizedtesting-runner-2.0.9.jar.sha1 b/solr/licenses/randomizedtesting-runner-2.0.9.jar.sha1
deleted file mode 100644
index 22d5067..0000000
--- a/solr/licenses/randomizedtesting-runner-2.0.9.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-4b8e918d278f56a18a6044660215290995889bfa
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/CompositeIdRouter.java b/solr/solrj/src/java/org/apache/solr/common/cloud/CompositeIdRouter.java
index 8557e9d..a7d4b82 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/CompositeIdRouter.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/CompositeIdRouter.java
@@ -36,7 +36,7 @@
   private int separator = '!';
 
   // separator used to optionally specify number of bits to allocate toward first part.
-  private int bitsSepartor = '/';
+  private int bitsSeparator = '/';
   private int bits = 16;
   private int mask1 = 0xffff0000;
   private int mask2 = 0x0000ffff;
@@ -59,7 +59,7 @@
   }
 
   @Override
-  protected int sliceHash(String id, SolrInputDocument doc, SolrParams params) {
+  public int sliceHash(String id, SolrInputDocument doc, SolrParams params) {
     int idx = id.indexOf(separator);
     if (idx < 0) {
       return Hash.murmurhash3_x86_32(id, 0, id.length(), 0);
@@ -69,7 +69,7 @@
     int m2 = mask2;
 
     String part1 = id.substring(0,idx);
-    int commaIdx = part1.indexOf(bitsSepartor);
+    int commaIdx = part1.indexOf(bitsSeparator);
     if (commaIdx > 0) {
       int firstBits = getBits(part1, commaIdx);
       if (firstBits >= 0) {
@@ -105,7 +105,7 @@
     int m2 = mask2;
 
     String part1 = id.substring(0,idx);
-    int bitsSepIdx = part1.indexOf(bitsSepartor);
+    int bitsSepIdx = part1.indexOf(bitsSeparator);
     if (bitsSepIdx > 0) {
       int firstBits = getBits(part1, bitsSepIdx);
       if (firstBits >= 0) {
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/HashBasedRouter.java b/solr/solrj/src/java/org/apache/solr/common/cloud/HashBasedRouter.java
index b77d57e..0addda7 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/HashBasedRouter.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/HashBasedRouter.java
@@ -42,7 +42,7 @@
     return range != null && range.includes(hash);
   }
 
-  protected int sliceHash(String id, SolrInputDocument sdoc, SolrParams params) {
+  public int sliceHash(String id, SolrInputDocument sdoc, SolrParams params) {
     return Hash.murmurhash3_x86_32(id, 0, id.length(), 0);
   }
 
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/SolrZkClient.java b/solr/solrj/src/java/org/apache/solr/common/cloud/SolrZkClient.java
index 59cb15b..bf69b2c 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/SolrZkClient.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/SolrZkClient.java
@@ -311,13 +311,13 @@
   
   public void makePath(String path, File file, boolean failOnExists, boolean retryOnConnLoss)
       throws IOException, KeeperException, InterruptedException {
-    makePath(path, FileUtils.readFileToString(file).getBytes("UTF-8"),
+    makePath(path, FileUtils.readFileToByteArray(file),
         CreateMode.PERSISTENT, null, failOnExists, retryOnConnLoss);
   }
   
   public void makePath(String path, File file, boolean retryOnConnLoss) throws IOException,
       KeeperException, InterruptedException {
-    makePath(path, FileUtils.readFileToString(file).getBytes("UTF-8"), retryOnConnLoss);
+    makePath(path, FileUtils.readFileToByteArray(file), retryOnConnLoss);
   }
   
   public void makePath(String path, CreateMode createMode, boolean retryOnConnLoss) throws KeeperException,
diff --git a/solr/solrj/src/java/org/apache/solr/common/util/ContentStreamBase.java b/solr/solrj/src/java/org/apache/solr/common/util/ContentStreamBase.java
index 877da36..d531b0f 100755
--- a/solr/solrj/src/java/org/apache/solr/common/util/ContentStreamBase.java
+++ b/solr/solrj/src/java/org/apache/solr/common/util/ContentStreamBase.java
@@ -233,4 +233,27 @@
   public void setSourceInfo(String sourceInfo) {
     this.sourceInfo = sourceInfo;
   }
+  
+  /**
+   * Construct a <code>ContentStream</code> from a <code>File</code>
+   */
+  public static class ByteArrayStream extends ContentStreamBase
+  {
+    private final byte[] bytes;
+    
+    public ByteArrayStream( byte[] bytes, String source ) {
+      this.bytes = bytes; 
+      
+      this.contentType = null;
+      name = source;
+      size = new Long(bytes.length);
+      sourceInfo = source;
+    }
+
+
+    @Override
+    public InputStream getStream() throws IOException {
+      return new ByteArrayInputStream( bytes );
+    }
+  }  
 }
diff --git a/solr/test-framework/ivy.xml b/solr/test-framework/ivy.xml
index a86bd4c..3de0822 100644
--- a/solr/test-framework/ivy.xml
+++ b/solr/test-framework/ivy.xml
@@ -32,8 +32,8 @@
       <dependency org="org.apache.ant" name="ant" rev="1.8.2" transitive="false" />
 
       <dependency org="junit" name="junit" rev="4.10" transitive="false" conf="default->*;junit4-stdalone->*" />
-      <dependency org="com.carrotsearch.randomizedtesting" name="junit4-ant" rev="2.0.9" transitive="false" conf="default->*;junit4-stdalone->*" />
-      <dependency org="com.carrotsearch.randomizedtesting" name="randomizedtesting-runner" rev="2.0.9" transitive="false" conf="default->*;junit4-stdalone->*" />
+      <dependency org="com.carrotsearch.randomizedtesting" name="junit4-ant" rev="2.0.10" transitive="false" conf="default->*;junit4-stdalone->*" />
+      <dependency org="com.carrotsearch.randomizedtesting" name="randomizedtesting-runner" rev="2.0.10" transitive="false" conf="default->*;junit4-stdalone->*" />
 
       <exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/> 
     </dependencies>
diff --git a/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java b/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java
index a596820..2df2f10 100644
--- a/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java
+++ b/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java
@@ -183,8 +183,8 @@
         String solrHome = SolrResourceLoader.locateSolrHome();
         container = new CoreContainer(new SolrResourceLoader(solrHome)) {
           {
-            String hostPort = System.getProperty("hostPort");
-            String hostContext = "solr";
+            String hostPort = System.getProperty("hostPort", "8983");
+            String hostContext = System.getProperty("hostContext", "solr");
             defaultCoreName = CoreContainer.DEFAULT_DEFAULT_CORE_NAME;
             initShardHandler();
             zkSys.initZooKeeper(this, solrHome, System.getProperty("zkHost"), 30000, hostPort, hostContext, null, "30000", 30000, 30000);